signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def make_transaction ( self ) : """Create the transaction for this RecurredCost May only be used to create the RecurredCost ' s initial transaction . Returns : Transaction : The created transaction , also assigned to self . transaction . None if the amount is zero ."""
if self . pk : raise CannotRecreateTransactionOnRecurredCost ( 'The transaction for this recurred cost has already been created. You cannot create it again.' ) amount = self . recurring_cost . get_amount ( self . billing_cycle ) # It is quite possible that there will be nothing to bill , in which # case we cannot create a transaction with no legs , nor can we create # legs with zero values . Therefore we don ' t create any transaction . if not amount : return None self . transaction = Transaction . objects . create ( description = 'Created by recurring cost' , date = self . billing_cycle . date_range . lower ) # Use the SplitManager ' s custom queryset ' s split ( ) method to get the # amount to be billed for each split splits = self . recurring_cost . splits . all ( ) . split ( amount ) # Create the transaction leg for the outbound funds # ( normally to an expense account ) self . transaction . legs . add ( Leg . objects . create ( transaction = self . transaction , amount = Money ( amount , self . recurring_cost . currency ) , account = self . recurring_cost . to_account , ) ) for split , split_amount in splits : # Create the transaction legs for the inbound funds # ( from housemate accounts ) if split_amount : self . transaction . legs . add ( Leg . objects . create ( transaction = self . transaction , amount = Money ( split_amount * - 1 , self . recurring_cost . currency ) , account = split . from_account , ) ) return self . transaction
def kuhn_munkres ( G , TOLERANCE = 1e-6 ) : """Maximum profit bipartite matching by Kuhn - Munkres : param G : weight matrix where G [ u ] [ v ] is the weight of edge ( u , v ) , : param TOLERANCE : a value with absolute value below tolerance is considered as being zero . If G consists of integer or fractional values then TOLERANCE can be chosen 0. : requires : graph ( U , V , E ) is complete bi - partite graph with len ( U ) < = len ( V ) . float ( ' - inf ' ) or float ( ' inf ' ) entries in G are allowed but not None . : returns : matching table from U to V , value of matching : complexity : : math : ` O ( | U | ^ 2 | V | ) `"""
nU = len ( G ) U = range ( nU ) nV = len ( G [ 0 ] ) V = range ( nV ) assert nU <= nV mu = [ None ] * nU # empty matching mv = [ None ] * nV lu = [ max ( row ) for row in G ] # trivial labels lv = [ 0 ] * nV for root in U : # build an alternate tree au = [ False ] * nU # au , av mark nodes . . . au [ root ] = True # . . . covered by the tree Av = [ None ] * nV # Av [ v ] successor of v in the tree # for every vertex u , slack [ u ] : = ( val , v ) such that # val is the smallest slack on the constraints ( * ) # with fixed u and v being the corresponding vertex slack = [ ( lu [ root ] + lv [ v ] - G [ root ] [ v ] , root ) for v in V ] while True : ( ( delta , u ) , v ) = min ( ( slack [ v ] , v ) for v in V if Av [ v ] is None ) assert au [ u ] if delta > TOLERANCE : # tree is full for u0 in U : # improve labels if au [ u0 ] : lu [ u0 ] -= delta for v0 in V : if Av [ v0 ] is not None : lv [ v0 ] += delta else : ( val , arg ) = slack [ v0 ] slack [ v0 ] = ( val - delta , arg ) assert abs ( lu [ u ] + lv [ v ] - G [ u ] [ v ] ) <= TOLERANCE # equality Av [ v ] = u # add ( u , v ) to A if mv [ v ] is None : break # alternating path found u1 = mv [ v ] assert not au [ u1 ] au [ u1 ] = True # add ( u1 , v ) to A for v1 in V : if Av [ v1 ] is None : # update margins alt = ( lu [ u1 ] + lv [ v1 ] - G [ u1 ] [ v1 ] , u1 ) if slack [ v1 ] > alt : slack [ v1 ] = alt while v is not None : # . . . alternating path found u = Av [ v ] # along path to root prec = mu [ u ] mv [ v ] = u # augment matching mu [ u ] = v v = prec return ( mu , sum ( lu ) + sum ( lv ) )
def register_eventclass ( event_id ) : """Decorator for registering event classes for parsing"""
def register ( cls ) : if not issubclass ( cls , Event ) : raise MessageException ( ( 'Cannot register a class that' ' is not a subclass of Event' ) ) EVENT_REGISTRY [ event_id ] = cls logger . debug ( '######### Event registry is now: {0}' . format ( EVENT_REGISTRY ) ) return cls return register
def unquote_redirection_tokens ( args : List [ str ] ) -> None : """Unquote redirection tokens in a list of command - line arguments This is used when redirection tokens have to be passed to another command : param args : the command line args"""
for i , arg in enumerate ( args ) : unquoted_arg = strip_quotes ( arg ) if unquoted_arg in constants . REDIRECTION_TOKENS : args [ i ] = unquoted_arg
def check_arguments ( cls , conf ) : """Sanity check plugin options values . As a side effect , it also converts the specified interval and port to an integer ."""
# Checking the interval if not conf [ 'tcp_check_interval' ] : raise ArgsError ( "A TCP health-test interval needs to be " "specified (--tcp_check_interval)." ) if not ( 1 <= conf [ 'tcp_check_interval' ] <= 3600 ) : raise ArgsError ( "Specified TCP health-test interval must be " "between 1 and 3600 seconds" ) # Checking the port if not conf [ 'tcp_check_port' ] : raise ArgsError ( "A port for the TCP health-test needs to be " "specified (--tcp_check_port)." ) if not ( 1 <= conf [ 'tcp_check_port' ] <= 65535 ) : raise ArgsError ( "Specified port for TCP health-test must be " "between 1 and 65535" )
def getbalance ( self , account = None , minconf = 6 ) : """retrieve balance , If [ account ] is specified , returns the balance in the account ."""
if account : return self . req ( "getbalance" , [ account , minconf ] ) else : return self . req ( "getbalance" )
def load_metascenario ( self , scenario_list ) : """Load one or more scenarios from a list . Each entry in scenario _ list should be a dict containing at least a name key and an optional tile key and args key . If tile is present and its value is not None , the scenario specified will be loaded into the given tile only . Otherwise it will be loaded into the entire device . If the args key is specified is will be passed as keyword arguments to load _ scenario . Args : scenario _ list ( list ) : A list of dicts for each scenario that should be loaded ."""
for scenario in scenario_list : name = scenario . get ( 'name' ) if name is None : raise DataError ( "Scenario in scenario list is missing a name parameter" , scenario = scenario ) tile_address = scenario . get ( 'tile' ) args = scenario . get ( 'args' , { } ) dest = self if tile_address is not None : dest = self . _tiles . get ( tile_address ) if dest is None : raise DataError ( "Attempted to load a scenario into a tile address that does not exist" , address = tile_address , valid_addresses = list ( self . _tiles ) ) dest . load_scenario ( name , ** args )
def _collate_metadata ( self ) : """Turns a list of objects associated with a classification result into a DataFrame of metadata . Returns None , but stores a result in self . _ cached ."""
import pandas as pd DEFAULT_FIELDS = None metadata = [ ] for c in self . _classifications : m = c . sample . metadata if DEFAULT_FIELDS is None : DEFAULT_FIELDS = list ( m . _resource . _schema [ "properties" ] . keys ( ) ) DEFAULT_FIELDS . remove ( "$uri" ) DEFAULT_FIELDS . remove ( "sample" ) metadatum = { f : getattr ( m , f ) for f in DEFAULT_FIELDS } metadatum [ "classification_id" ] = c . id metadatum [ "sample_id" ] = m . sample . id metadatum [ "metadata_id" ] = m . id metadatum [ "created_at" ] = m . sample . created_at metadatum [ "filename" ] = c . sample . filename metadatum . update ( m . custom ) metadata . append ( metadatum ) if metadata : metadata = pd . DataFrame ( metadata ) . set_index ( "classification_id" ) else : metadata = pd . DataFrame ( columns = [ "classification_id" , "sample_id" , "metadata_id" , "created_at" ] ) self . _cached [ "metadata" ] = metadata
def add_key ( self , key , first = False ) : """Adds the given key to this row . : param key : Key to be added to this row . : param first : BOolean flag that indicates if key is added at the beginning or at the end ."""
if first : self . keys = [ key ] + self . keys else : self . keys . append ( key ) if isinstance ( key , VSpaceKey ) : self . space = key
def validate_fixed ( datum , schema , ** kwargs ) : """Check that the data value is fixed width bytes , matching the schema [ ' size ' ] exactly ! Parameters datum : Any Data being validated schema : dict Schema kwargs : Any Unused kwargs"""
return ( ( isinstance ( datum , bytes ) and len ( datum ) == schema [ 'size' ] ) or ( isinstance ( datum , decimal . Decimal ) ) )
def read_population_file ( file_name ) : """Reads the population file . : param file _ name : the name of the population file . : type file _ name : str : returns : a : py : class : ` dict ` containing the population for each of the samples . The population file should contain three columns : 1 . The family ID . 2 . The individual ID . 3 . The population of the file ( one of ` ` CEU ` ` , ` ` YRI ` ` , ` ` JPT - CHB ` ` or ` ` SOURCE ` ` ) . The outliers are from the ` ` SOURCE ` ` population , when compared to one of the three reference population ( ` ` CEU ` ` , ` ` YRI ` ` or ` ` JPT - CHB ` ` ) ."""
pops = { } required_pops = { "CEU" , "YRI" , "JPT-CHB" , "SOURCE" } with open ( file_name , 'rb' ) as input_file : for line in input_file : row = line . rstrip ( "\r\n" ) . split ( "\t" ) # The data sample_id = tuple ( row [ : 2 ] ) pop = row [ - 1 ] # Checking the pop if pop not in required_pops : msg = ( "{}: sample {}: unknown population " "{}" . format ( file_name , " " . join ( sample_id ) , pop ) ) raise ProgramError ( msg ) # Saving the population file pops [ tuple ( row [ : 2 ] ) ] = row [ - 1 ] return pops
def render_search ( self , ctx , data ) : """Render some UI for performing searches , if we know about a search aggregator ."""
if self . username is None : return '' translator = self . _getViewerPrivateApplication ( ) searchAggregator = translator . getPageComponents ( ) . searchAggregator if searchAggregator is None or not searchAggregator . providers ( ) : return '' return ctx . tag . fillSlots ( 'form-action' , translator . linkTo ( searchAggregator . storeID ) )
def _format_msg ( text , width , indent = 0 , prefix = "" ) : r"""Format exception message . Replace newline characters \ n with ` ` \ n ` ` , ` with \ ` and then wrap text as needed"""
text = repr ( text ) . replace ( "`" , "\\`" ) . replace ( "\\n" , " ``\\n`` " ) sindent = " " * indent if not prefix else prefix wrapped_text = textwrap . wrap ( text , width , subsequent_indent = sindent ) # [1 : - 1 ] eliminates quotes generated by repr in first line return ( "\n" . join ( wrapped_text ) ) [ 1 : - 1 ] . rstrip ( )
def download_data ( url : str , fname : PathOrStr = None , data : bool = True , ext : str = '.tgz' ) -> Path : "Download ` url ` to destination ` fname ` ."
fname = Path ( ifnone ( fname , _url2tgz ( url , data , ext = ext ) ) ) os . makedirs ( fname . parent , exist_ok = True ) if not fname . exists ( ) : print ( f'Downloading {url}' ) download_url ( f'{url}{ext}' , fname ) return fname
def get_hosts ( self , path , start , length ) : """Get hostnames where a particular block ( determined by pos and blocksize ) of a file is stored . Due to replication , a single block could be present on multiple hosts . : type path : str : param path : the path of the file : type start : int : param start : the start of the block : type length : int : param length : the length of the block : rtype : list : return : list of hosts that store the block"""
_complain_ifclosed ( self . closed ) return self . fs . get_hosts ( path , start , length )
def init_layout ( self ) : """Set the checked state after all children have been populated ."""
super ( AndroidRadioGroup , self ) . init_layout ( ) d = self . declaration w = self . widget if d . checked : self . set_checked ( d . checked ) else : # : Check if any of the children have " checked = True " for c in d . children : if c . checked : d . checked = c w . setOnCheckedChangeListener ( w . getId ( ) ) w . onCheckedChanged . connect ( self . on_checked_changed )
def format_git_describe ( git_str , pep440 = False ) : """format the result of calling ' git describe ' as a python version"""
if git_str is None : return None if "-" not in git_str : # currently at a tag return git_str else : # formatted as version - N - githash # want to convert to version . postN - githash git_str = git_str . replace ( "-" , ".post" , 1 ) if pep440 : # does not allow git hash afterwards return git_str . split ( "-" ) [ 0 ] else : return git_str . replace ( "-g" , "+git" )
def yn_choice ( msg , indent = 4 , fg_color = 'cyan' , separator = '' ) : """浼犲叆 msg , 杩斿洖 True / False : param separator : : type separator : : param fg _ color : : type fg _ color : : param indent : : type indent : : param msg : : type msg : : return : : rtype :"""
_header , _footer = gen_separator ( separator = separator ) if _header : textui . puts ( getattr ( textui . colored , fg_color ) ( _header ) ) with textui . indent ( indent , quote = ' {}' . format ( ' ' ) ) : textui . puts ( textui . colored . green ( msg ) ) if _footer : textui . puts ( getattr ( textui . colored , fg_color ) ( _footer ) ) c = click . confirm ( click . style ( 'Your Choice?[yn] (q-quit/b-back)?' , fg = 'cyan' ) , default = True , ) return c
def soft_break ( self , el , text ) : """Apply soft break ."""
# Break word documents by paragraphs . if self . type == 'docx' and el . namespace == self . namespaces [ 'w' ] and el . name == 'p' : text . append ( '\n' ) # Break slides by paragraphs . if self . type == 'pptx' and el . namespace == self . namespaces [ 'a' ] and el . name == 'p' : text . append ( '\n' )
def mapping ( self ) : """Get a mapping class for this model This method will return a Mapping class for your model , generating it using settings from a ` Mapping ` class on your model ( if one exists ) . The generated class is cached on the manager ."""
if not hasattr ( self , "_mapping" ) : if hasattr ( self . model , "Mapping" ) : mapping_klass = type ( "Mapping" , ( DjangoMapping , self . model . Mapping ) , { } ) else : mapping_klass = get_first_mapping ( self . model ) if mapping_klass is None : mapping_klass = DjangoMapping self . _mapping = mapping_klass ( self . model ) return self . _mapping
def init_from_str ( self , entries ) : """Initialize the structured and textual data based on a string representing the entries . For detailed information about the format of this string , refer to the : func : ` ~ taxi . timesheet . parser . parse _ text ` function ."""
self . lines = self . parser . parse_text ( entries ) for line in self . lines : if isinstance ( line , DateLine ) : current_date = line . date self [ current_date ] = self . default_factory ( self , line . date ) elif isinstance ( line , Entry ) : if len ( self [ current_date ] ) > 0 : line . previous_entry = self [ current_date ] [ - 1 ] self [ current_date ] [ - 1 ] . next_entry = line self [ current_date ] . append ( line )
def _respond ( self , resp ) : """Respond to the person waiting"""
response_queue = self . _response_queues . get ( timeout = 0.1 ) response_queue . put ( resp ) self . _completed_response_lines = [ ] self . _is_multiline = None
def write_input ( self , output_dir , make_dir_if_not_present = True , include_cif = False ) : """Writes a set of VASP input to a directory . Args : output _ dir ( str ) : Directory to output the VASP input files make _ dir _ if _ not _ present ( bool ) : Set to True if you want the directory ( and the whole path ) to be created if it is not present . include _ cif ( bool ) : Whether to write a CIF file in the output directory for easier opening by VESTA ."""
vinput = self . get_vasp_input ( ) vinput . write_input ( output_dir , make_dir_if_not_present = make_dir_if_not_present ) if include_cif : s = vinput [ "POSCAR" ] . structure fname = Path ( output_dir ) / ( "%s.cif" % re . sub ( r'\s' , "" , s . formula ) ) s . to ( filename = fname )
def get_pipeline_newsfeeds ( self , pipeline_key , detail_level = None ) : '''Function to get newsfeed for a pipeline Args : pipeline _ keypipeline key detail _ level arguments for req str [ ' ALL ' , ' CONDENSED ' ] return list of feed dicts parse at your convenience'''
uri = '/' . join ( [ self . api_uri , self . pipelines_suffix , pipeline_key , self . newsfeed_suffix ] ) return self . _get_newsfeeds ( uri , detail_level )
def _rfc3339_to_datetime ( dt_str ) : """Convert a microsecond - precision timestamp to a native datetime . : type dt _ str : str : param dt _ str : The string to convert . : rtype : : class : ` datetime . datetime ` : returns : The datetime object created from the string ."""
return datetime . datetime . strptime ( dt_str , _RFC3339_MICROS ) . replace ( tzinfo = UTC )
def args_from_config ( func ) : """Decorator that injects parameters from the configuration ."""
func_args = signature ( func ) . parameters @ wraps ( func ) def wrapper ( * args , ** kwargs ) : config = get_config ( ) for i , argname in enumerate ( func_args ) : if len ( args ) > i or argname in kwargs : continue elif argname in config : kwargs [ argname ] = config [ argname ] try : getcallargs ( func , * args , ** kwargs ) except TypeError as exc : msg = "{}\n{}" . format ( exc . args [ 0 ] , PALLADIUM_CONFIG_ERROR ) exc . args = ( msg , ) raise exc return func ( * args , ** kwargs ) wrapper . __wrapped__ = func return wrapper
def _refresh_oath_token ( self ) : """Refresh Monzo OAuth 2 token . Official docs : https : / / monzo . com / docs / # refreshing - access : raises UnableToRefreshTokenException : when token couldn ' t be refreshed"""
url = urljoin ( self . api_url , '/oauth2/token' ) data = { 'grant_type' : 'refresh_token' , 'client_id' : self . _client_id , 'client_secret' : self . _client_secret , 'refresh_token' : self . _token [ 'refresh_token' ] , } token_response = requests . post ( url , data = data ) token = token_response . json ( ) # Not ideal , but that ' s how Monzo API returns errors if 'error' in token : raise CantRefreshTokenError ( "Unable to refresh the token: {}" . format ( token ) ) self . _token = token self . _save_token_on_disk ( )
def recommended_overlap ( name , nfft = None ) : """Returns the recommended fractional overlap for the given window If ` ` nfft ` ` is given , the return is in samples Parameters name : ` str ` the name of the window you are using nfft : ` int ` , optional the length of the window Returns rov : ` float ` , ` int ` the recommended overlap ( ROV ) for the given window , in samples if ` ` nfft ` is given ( ` int ` ) , otherwise fractional ( ` float ` ) Examples > > > from gwpy . signal . window import recommended _ overlap > > > recommended _ overlap ( ' hann ' ) 0.5 > > > recommended _ overlap ( ' blackmanharris ' , nfft = 128) 85"""
try : name = canonical_name ( name ) except KeyError as exc : raise ValueError ( str ( exc ) ) try : rov = ROV [ name ] except KeyError : raise ValueError ( "no recommended overlap for %r window" % name ) if nfft : return int ( ceil ( nfft * rov ) ) return rov
def restore_descriptor ( self , converted_descriptor ) : """Restore descriptor rom BigQuery"""
# Convert fields = [ ] for field in converted_descriptor [ 'fields' ] : field_type = self . restore_type ( field [ 'type' ] ) resfield = { 'name' : field [ 'name' ] , 'type' : field_type , } if field . get ( 'mode' , 'NULLABLE' ) != 'NULLABLE' : resfield [ 'constraints' ] = { 'required' : True } fields . append ( resfield ) descriptor = { 'fields' : fields } return descriptor
def get_additional_actions ( self , reftrack ) : """Return a list of additional actions you want to provide for the menu of the reftrack . E . e . you want to have a menu entry , that will select the entity in your programm . This will call : meth : ` ReftypeInterface . get _ additional _ actions ` . The base implementation returns an empty list . : param reftrack : the reftrack to return the actions for : type reftrack : : class : ` Reftrack ` : returns : A list of : class : ` ReftrackAction ` : rtype : list : raises : None"""
inter = self . get_typ_interface ( reftrack . get_typ ( ) ) return inter . get_additional_actions ( reftrack )
def remove_child_gradebook ( self , gradebook_id , child_id ) : """Removes a child from a gradebook . arg : gradebook _ id ( osid . id . Id ) : the ` ` Id ` ` of a gradebook arg : child _ id ( osid . id . Id ) : the ` ` Id ` ` of the new child raise : NotFound - ` ` gradebook _ id ` ` not a parent of ` ` child _ id ` ` raise : NullArgument - ` ` gradebook _ id ` ` or ` ` child _ id ` ` is ` ` null ` ` raise : OperationFailed - unable to complete request raise : PermissionDenied - authorization failure * compliance : mandatory - - This method must be implemented . *"""
# Implemented from template for # osid . resource . BinHierarchyDesignSession . remove _ child _ bin _ template if self . _catalog_session is not None : return self . _catalog_session . remove_child_catalog ( catalog_id = gradebook_id , child_id = child_id ) return self . _hierarchy_session . remove_child ( id_ = gradebook_id , child_id = child_id )
def get_permission_requests ( parser , token ) : """Retrieves all permissions requests associated with the given obj and user and assigns the result to a context variable . Syntax : : { % get _ permission _ requests obj % } { % for perm in permissions % } { { perm } } { % endfor % } { % get _ permission _ requests obj as " my _ permissions " % } { % get _ permission _ requests obj for request . user as " my _ permissions " % }"""
return PermissionsForObjectNode . handle_token ( parser , token , approved = False , name = '"permission_requests"' )
def on ( device ) : '''Turns on the quota system CLI Example : . . code - block : : bash salt ' * ' quota . on'''
cmd = 'quotaon {0}' . format ( device ) __salt__ [ 'cmd.run' ] ( cmd , python_shell = False ) return True
def _nest_at_rules ( self , rule , scope , block ) : """Implements @ - blocks"""
# TODO handle @ charset , probably ? # Interpolate the current block # TODO this seems like it should be done in the block header . and more # generally ? calculator = self . _make_calculator ( rule . namespace ) if block . header . argument : # TODO is this correct ? do ALL at - rules ALWAYS allow both vars and # interpolation ? node = calculator . parse_vars_and_interpolations ( block . header . argument ) block . header . argument = node . evaluate ( calculator ) . render ( ) # TODO merge into RuleAncestry new_ancestry = list ( rule . ancestry . headers ) if block . directive == '@media' and new_ancestry : for i , header in reversed ( list ( enumerate ( new_ancestry ) ) ) : if header . is_selector : continue elif header . directive == '@media' : new_ancestry [ i ] = BlockAtRuleHeader ( '@media' , "%s and %s" % ( header . argument , block . argument ) ) break else : new_ancestry . insert ( i , block . header ) else : new_ancestry . insert ( 0 , block . header ) else : new_ancestry . append ( block . header ) rule . descendants += 1 new_rule = SassRule ( source_file = rule . source_file , import_key = rule . import_key , lineno = block . lineno , num_header_lines = block . header . num_lines , unparsed_contents = block . unparsed_contents , legacy_compiler_options = rule . legacy_compiler_options , options = rule . options . copy ( ) , # properties # extends _ selectors ancestry = RuleAncestry ( new_ancestry ) , namespace = rule . namespace . derive ( ) , nested = rule . nested + 1 , ) self . rules . append ( new_rule ) rule . namespace . use_import ( rule . source_file ) self . manage_children ( new_rule , scope ) self . _warn_unused_imports ( new_rule )
def apply_kwargs ( func , ** kwargs ) : """Call * func * with kwargs , but only those kwargs that it accepts ."""
new_kwargs = { } params = signature ( func ) . parameters for param_name in params . keys ( ) : if param_name in kwargs : new_kwargs [ param_name ] = kwargs [ param_name ] return func ( ** new_kwargs )
def refresh_index ( meta , index ) -> None : """Recalculate the projection , hash _ key , and range _ key for the given index . : param meta : model . Meta to find columns by name : param index : The index to refresh"""
# All projections include model + index keys projection_keys = set . union ( meta . keys , index . keys ) proj = index . projection mode = proj [ "mode" ] if mode == "keys" : proj [ "included" ] = projection_keys elif mode == "all" : proj [ "included" ] = meta . columns elif mode == "include" : # pragma : no branch if all ( isinstance ( p , str ) for p in proj [ "included" ] ) : proj [ "included" ] = set ( meta . columns_by_name [ n ] for n in proj [ "included" ] ) else : proj [ "included" ] = set ( proj [ "included" ] ) proj [ "included" ] . update ( projection_keys ) if proj [ "strict" ] : proj [ "available" ] = proj [ "included" ] else : proj [ "available" ] = meta . columns
def validate_version_pragma ( version_str : str , start : ParserPosition ) -> None : """Validates a version pragma directive against the current compiler version ."""
from vyper import ( __version__ , ) version_arr = version_str . split ( '@version' ) file_version = version_arr [ 1 ] . strip ( ) file_major , file_minor , file_patch = _parse_version_str ( file_version , start ) compiler_major , compiler_minor , compiler_patch = _parse_version_str ( __version__ , start ) if ( file_major , file_minor ) != ( compiler_major , compiler_minor ) : raise VersionException ( f'File version "{file_version}" is not compatible ' f'with compiler version "{__version__}"' , start , )
def merge_cameras ( self ) : """Merge all sync camera dicts into one ."""
combined = CaseInsensitiveDict ( { } ) for sync in self . sync : combined = merge_dicts ( combined , self . sync [ sync ] . cameras ) return combined
def run_cmd ( cmd , show_output = True , raise_errs = True , ** kwargs ) : """Run a console command . When show _ output = True , prints output and returns exit code , otherwise returns output . When raise _ errs = True , raises a subprocess . CalledProcessError if the command fails ."""
internal_assert ( cmd and isinstance ( cmd , list ) , "console commands must be passed as non-empty lists" ) try : from shutil import which except ImportError : pass else : cmd [ 0 ] = which ( cmd [ 0 ] ) or cmd [ 0 ] logger . log_cmd ( cmd ) try : if show_output and raise_errs : return subprocess . check_call ( cmd , ** kwargs ) elif show_output : return subprocess . call ( cmd , ** kwargs ) else : stdout , stderr , retcode = call_output ( cmd , ** kwargs ) output = "" . join ( stdout + stderr ) if retcode and raise_errs : raise subprocess . CalledProcessError ( retcode , cmd , output = output ) return output except OSError : logger . log_exc ( ) if raise_errs : raise subprocess . CalledProcessError ( oserror_retcode , cmd ) elif show_output : return oserror_retcode else : return ""
def name_build ( self , name , is_policy = False , prefix = True ) : """Build name from prefix and name + type : param name : Name of the role / policy : param is _ policy : True if policy should be added as suffix : param prefix : True if prefix should be added : return : Joined name"""
str = name # Add prefix if prefix : str = self . __role_name_prefix + str # Add policy suffix if is_policy : str = str + "-policy" return str
def printTemporalMemory ( tm , outFile ) : """Given an instance of TemporalMemory , print out the relevant parameters"""
table = PrettyTable ( [ "Parameter name" , "Value" , ] ) table . add_row ( [ "columnDimensions" , tm . getColumnDimensions ( ) ] ) table . add_row ( [ "cellsPerColumn" , tm . getCellsPerColumn ( ) ] ) table . add_row ( [ "activationThreshold" , tm . getActivationThreshold ( ) ] ) table . add_row ( [ "minThreshold" , tm . getMinThreshold ( ) ] ) table . add_row ( [ "maxNewSynapseCount" , tm . getMaxNewSynapseCount ( ) ] ) table . add_row ( [ "permanenceIncrement" , tm . getPermanenceIncrement ( ) ] ) table . add_row ( [ "permanenceDecrement" , tm . getPermanenceDecrement ( ) ] ) table . add_row ( [ "initialPermanence" , tm . getInitialPermanence ( ) ] ) table . add_row ( [ "connectedPermanence" , tm . getConnectedPermanence ( ) ] ) table . add_row ( [ "predictedSegmentDecrement" , tm . getPredictedSegmentDecrement ( ) ] ) print >> outFile , table . get_string ( ) . encode ( "utf-8" )
def want_service_notification ( self , timeperiods , timestamp , state , n_type , business_impact , cmd = None ) : # pylint : disable = too - many - return - statements """Check if notification options match the state of the service Notification is NOT wanted in ONE of the following case : : * service notifications are disabled * cmd is not in service _ notification _ commands * business _ impact < self . min _ business _ impact * service _ notification _ period is not valid * state does not match service _ notification _ options for problem , recovery and flapping * state does not match host _ notification _ options for downtime : param timestamp : time we want to notify the contact ( usually now ) : type timestamp : int : param state : host or service state ( " WARNING " , " CRITICAL " . . ) : type state : str : param n _ type : type of notification ( " PROBLEM " , " RECOVERY " . . ) : type n _ type : str : param business _ impact : impact of this service : type business _ impact : int : param cmd : command launched to notify the contact : type cmd : str : return : True if no condition is matched , otherwise False : rtype : bool TODO : Simplify function"""
if not self . service_notifications_enabled : return False # Maybe the command we ask for are not for us , but for another notification ways # on the same contact . If so , bail out if cmd and cmd not in self . service_notification_commands : return False # If the business _ impact is not high enough , we bail out if business_impact < self . min_business_impact : return False notif_period = timeperiods [ self . service_notification_period ] in_notification_period = notif_period . is_time_valid ( timestamp ) if 'n' in self . service_notification_options : return False if in_notification_period : short_states = { u'WARNING' : 'w' , u'UNKNOWN' : 'u' , u'CRITICAL' : 'c' , u'RECOVERY' : 'r' , u'FLAPPING' : 'f' , u'DOWNTIME' : 's' } if n_type == u'PROBLEM' and state in short_states : return short_states [ state ] in self . service_notification_options if n_type == u'RECOVERY' and n_type in short_states : return short_states [ n_type ] in self . service_notification_options if n_type == u'ACKNOWLEDGEMENT' : return in_notification_period if n_type in ( u'FLAPPINGSTART' , u'FLAPPINGSTOP' , u'FLAPPINGDISABLED' ) : return 'f' in self . service_notification_options if n_type in ( u'DOWNTIMESTART' , u'DOWNTIMEEND' , u'DOWNTIMECANCELLED' ) : # No notification when a downtime was cancelled . Is that true ? ? # According to the documentation we need to look at _ host _ options return 's' in self . host_notification_options return False
def stop_timer ( self , request_len , reply_len , server_time = None , exception = False ) : """This is a low - level method is called by pywbem at the end of an operation . It completes the measurement for that operation by capturing the needed data , and updates the statistics data , if statistics is enabled for the connection . Parameters : request _ len ( : term : ` integer ` ) Size of the HTTP body of the CIM - XML request message , in Bytes . reply _ len ( : term : ` integer ` ) Size of the HTTP body of the CIM - XML response message , in Bytes . exception ( : class : ` py : bool ` ) Boolean that specifies whether an exception was raised while processing the operation . server _ time ( : class : ` py : bool ` ) Time in seconds that the server optionally returns to the client in the HTTP response defining the time from when the server received the request to when it started sending the response . If ` None ` , there is no time from the server . Returns : float : The elapsed time for the operation that just ended , or ` None ` if the statistics container holding this object is not enabled ."""
if not self . container . enabled : return None # stop the timer if self . _start_time is None : raise RuntimeError ( 'stop_timer() called without preceding ' 'start_timer()' ) dt = time . time ( ) - self . _start_time self . _start_time = None self . _count += 1 self . _time_sum += dt self . _request_len_sum += request_len self . _reply_len_sum += reply_len if exception : self . _exception_count += 1 if dt > self . _time_max : self . _time_max = dt if dt < self . _time_min : self . _time_min = dt if server_time : self . _server_time_stored = True self . _server_time_sum += server_time if dt > self . _server_time_max : self . _server_time_max = server_time if dt < self . _server_time_min : self . _server_time_min = server_time if request_len > self . _request_len_max : self . _request_len_max = request_len if request_len < self . _request_len_min : self . _request_len_min = request_len if reply_len > self . _reply_len_max : self . _reply_len_max = reply_len if reply_len < self . _reply_len_min : self . _reply_len_min = reply_len return dt
def validate_vertex_field_directive_in_context ( parent_location , vertex_field_name , directives , context ) : """Ensure that the specified vertex field directives are allowed in the current context ."""
fold_directive = directives . get ( 'fold' , None ) optional_directive = directives . get ( 'optional' , None ) recurse_directive = directives . get ( 'recurse' , None ) output_source_directive = directives . get ( 'output_source' , None ) fold_context = 'fold' in context optional_context = 'optional' in context output_source_context = 'output_source' in context if fold_directive and fold_context : raise GraphQLCompilationError ( u'@fold is not allowed within a @fold traversal! ' u'Parent location: {}, vertex field name: {}' . format ( parent_location , vertex_field_name ) ) if optional_directive and fold_context : raise GraphQLCompilationError ( u'@optional is not allowed within a @fold traversal! ' u'Parent location: {}, vertex field name: {}' . format ( parent_location , vertex_field_name ) ) if output_source_directive and fold_context : raise GraphQLCompilationError ( u'@output_source is not allowed within a @fold traversal! ' u'Parent location: {}, vertex field name: {}' . format ( parent_location , vertex_field_name ) ) if recurse_directive and fold_context : raise GraphQLCompilationError ( u'@recurse is not allowed within a @fold traversal! ' u'Parent location: {}, vertex field name: {}' . format ( parent_location , vertex_field_name ) ) if output_source_context and not fold_directive : raise GraphQLCompilationError ( u'Found non-fold vertex field after the vertex marked ' u'output source! Parent location: {}, vertex field name: {}' . format ( parent_location , vertex_field_name ) ) if optional_context and fold_directive : raise GraphQLCompilationError ( u'@fold is not allowed within a @optional traversal! ' u'Parent location: {}, vertex field name: {}' . format ( parent_location , vertex_field_name ) ) if optional_context and output_source_directive : raise GraphQLCompilationError ( u'@output_source is not allowed within a @optional ' u'traversal! Parent location: {}, vertex field name: {}' . format ( parent_location , vertex_field_name ) )
def _get_server ( vm_ , volumes , nics ) : '''Construct server instance from cloud profile config'''
# Apply component overrides to the size from the cloud profile config vm_size = _override_size ( vm_ ) # Set the server availability zone from the cloud profile config availability_zone = config . get_cloud_config_value ( 'availability_zone' , vm_ , __opts__ , default = None , search_global = False ) # Assign CPU family from the cloud profile config cpu_family = config . get_cloud_config_value ( 'cpu_family' , vm_ , __opts__ , default = None , search_global = False ) # Contruct server object return Server ( name = vm_ [ 'name' ] , ram = vm_size [ 'ram' ] , availability_zone = availability_zone , cores = vm_size [ 'cores' ] , cpu_family = cpu_family , create_volumes = volumes , nics = nics )
def permute_data ( arrays , random_state = None ) : """Permute multiple numpy arrays with the same order ."""
if any ( len ( a ) != len ( arrays [ 0 ] ) for a in arrays ) : raise ValueError ( 'All arrays must be the same length.' ) if not random_state : random_state = np . random order = random_state . permutation ( len ( arrays [ 0 ] ) ) return [ a [ order ] for a in arrays ]
def check_newline_after_last_paragraph ( self , definition , docstring ) : """D209 : Put multi - line docstring closing quotes on separate line . Unless the entire docstring fits on a line , place the closing quotes on a line by themselves ."""
if docstring : lines = [ l for l in ast . literal_eval ( docstring ) . split ( '\n' ) if not is_blank ( l ) ] if len ( lines ) > 1 : if docstring . split ( "\n" ) [ - 1 ] . strip ( ) not in [ '"""' , "'''" ] : return violations . D209 ( )
def ashrae_revised_clear_sky ( altitudes , tb , td , use_2017_model = False ) : """Calculate solar flux for an ASHRAE Revised Clear Sky ( " Tau Model " ) . By default , this function returns clear sky values following the methods originally published in the ASHRAE 2009 HOF . Args : altitudes : A list of solar altitudes in degrees . tb : A value indicating the beam optical depth of the sky . td : A value indicating the diffuse optical depth of the sky . use _ 2017 _ model : Set to True to use coefficients associated with the new version of the Tau model released in the 2013 and 2017 HOF . Note that the correct use of the new version requires updated tb and td values . At present , all . stat files distributed by the US DoE are using the older ( 2009 ) values for tb and td and so this input defaults to False . Returns : dir _ norm _ rad : A list of direct normal radiation values for each of the connected altitudes in W / m2. dif _ horiz _ rad : A list of diffuse horizontall radiation values for each of the connected altitudes in W / m2."""
dir_norm_rad = [ ] dif_horiz_rad = [ ] if use_2017_model is True : ab = 1.454 - ( 0.406 * tb ) - ( 0.268 * td ) - ( 0.021 * tb * td ) ad = 0.507 + ( 0.205 * tb ) - ( 0.080 * td ) - ( 0.190 * tb * td ) else : ab = 1.219 - ( 0.043 * tb ) - ( 0.151 * td ) - ( 0.204 * tb * td ) ad = 0.202 + ( 0.852 * tb ) - ( 0.007 * td ) - ( 0.357 * tb * td ) # compute hourly radiation for alt in altitudes : if alt > 0 : # calculate hourly air mass between top of the atmosphere and earth air_mass = get_relative_airmass ( alt ) dir_norm_rad . append ( 1415 * math . exp ( - tb * math . pow ( air_mass , ab ) ) ) dif_horiz_rad . append ( 1415 * math . exp ( - td * math . pow ( air_mass , ad ) ) ) else : dir_norm_rad . append ( 0 ) dif_horiz_rad . append ( 0 ) return dir_norm_rad , dif_horiz_rad
def _to_mongo_query ( query ) : """Convert the query received by the Sacred Web API to a MongoDB query . Takes a query in format { " type " : " and " , " filters " : [ { " field " : " host . hostname " , " operator " : " = = " , " value " : " ntbacer " } , { " type " : " or " , " filters " : [ { " field " : " result " , " operator " : " = = " , " value " : 2403.52 } , { " field " : " host . python _ version " , " operator " : " = = " , " value " : " 3.5.2 " } and returns an appropriate MongoDB Query . : param query : A query in the Sacred Web API format . : return : Mongo Query ."""
mongo_query = [ ] for clause in query [ "filters" ] : if clause . get ( "type" ) is None : mongo_clause = MongoRunDAO . _simple_clause_to_query ( clause ) else : # It ' s a subclause mongo_clause = MongoRunDAO . _to_mongo_query ( clause ) mongo_query . append ( mongo_clause ) if len ( mongo_query ) == 0 : return { } if query [ "type" ] == "and" : return { "$and" : mongo_query } elif query [ "type" ] == "or" : return { "$or" : mongo_query } else : raise ValueError ( "Unexpected query type %s" % query . get ( "type" ) )
def insert ( self , key , value , ttl = 0 , format = None , persist_to = 0 , replicate_to = 0 ) : """Store an object in Couchbase unless it already exists . Follows the same conventions as : meth : ` upsert ` but the value is stored only if it does not exist already . Conversely , the value is not stored if the key already exists . Notably missing from this method is the ` cas ` parameter , this is because ` insert ` will only succeed if a key does not already exist on the server ( and thus can have no CAS ) : raise : : exc : ` . KeyExistsError ` if the key already exists . . seealso : : : meth : ` upsert ` , : meth : ` insert _ multi `"""
return _Base . insert ( self , key , value , ttl = ttl , format = format , persist_to = persist_to , replicate_to = replicate_to )
def _to_add_with_category ( self , catid ) : '''Used for info2. : param catid : the uid of category'''
catinfo = MCategory . get_by_uid ( catid ) kwd = { 'uid' : self . _gen_uid ( ) , 'userid' : self . userinfo . user_name if self . userinfo else '' , 'gcat0' : catid , 'parentname' : MCategory . get_by_uid ( catinfo . pid ) . name , 'catname' : MCategory . get_by_uid ( catid ) . name , } self . render ( 'autogen/add/add_{0}.html' . format ( catid ) , userinfo = self . userinfo , kwd = kwd )
def fit ( self , X , y , sample_weight = None ) : """Fit the Genetic Program according to X , y . Parameters X : array - like , shape = [ n _ samples , n _ features ] Training vectors , where n _ samples is the number of samples and n _ features is the number of features . y : array - like , shape = [ n _ samples ] Target values . sample _ weight : array - like , shape = [ n _ samples ] , optional Weights applied to individual samples . Returns self : object Returns self ."""
random_state = check_random_state ( self . random_state ) # Check arrays if isinstance ( self , ClassifierMixin ) : X , y = check_X_y ( X , y , y_numeric = False ) check_classification_targets ( y ) self . classes_ , y = np . unique ( y , return_inverse = True ) n_trim_classes = np . count_nonzero ( np . bincount ( y , sample_weight ) ) if n_trim_classes != 2 : raise ValueError ( "y contains %d class after sample_weight " "trimmed classes with zero weights, while 2 " "classes are required." % n_trim_classes ) self . n_classes_ = len ( self . classes_ ) else : X , y = check_X_y ( X , y , y_numeric = True ) if sample_weight is not None : sample_weight = check_array ( sample_weight , ensure_2d = False ) _ , self . n_features_ = X . shape hall_of_fame = self . hall_of_fame if hall_of_fame is None : hall_of_fame = self . population_size if hall_of_fame > self . population_size or hall_of_fame < 1 : raise ValueError ( 'hall_of_fame (%d) must be less than or equal to ' 'population_size (%d).' % ( self . hall_of_fame , self . population_size ) ) n_components = self . n_components if n_components is None : n_components = hall_of_fame if n_components > hall_of_fame or n_components < 1 : raise ValueError ( 'n_components (%d) must be less than or equal to ' 'hall_of_fame (%d).' % ( self . n_components , self . hall_of_fame ) ) self . _function_set = [ ] for function in self . function_set : if isinstance ( function , str ) : if function not in _function_map : raise ValueError ( 'invalid function name %s found in ' '`function_set`.' % function ) self . _function_set . append ( _function_map [ function ] ) elif isinstance ( function , _Function ) : self . _function_set . append ( function ) else : raise ValueError ( 'invalid type %s found in `function_set`.' % type ( function ) ) if not self . _function_set : raise ValueError ( 'No valid functions found in `function_set`.' ) # For point - mutation to find a compatible replacement node self . _arities = { } for function in self . _function_set : arity = function . arity self . _arities [ arity ] = self . _arities . get ( arity , [ ] ) self . _arities [ arity ] . append ( function ) if isinstance ( self . metric , _Fitness ) : self . _metric = self . metric elif isinstance ( self , RegressorMixin ) : if self . metric not in ( 'mean absolute error' , 'mse' , 'rmse' , 'pearson' , 'spearman' ) : raise ValueError ( 'Unsupported metric: %s' % self . metric ) self . _metric = _fitness_map [ self . metric ] elif isinstance ( self , ClassifierMixin ) : if self . metric != 'log loss' : raise ValueError ( 'Unsupported metric: %s' % self . metric ) self . _metric = _fitness_map [ self . metric ] elif isinstance ( self , TransformerMixin ) : if self . metric not in ( 'pearson' , 'spearman' ) : raise ValueError ( 'Unsupported metric: %s' % self . metric ) self . _metric = _fitness_map [ self . metric ] self . _method_probs = np . array ( [ self . p_crossover , self . p_subtree_mutation , self . p_hoist_mutation , self . p_point_mutation ] ) self . _method_probs = np . cumsum ( self . _method_probs ) if self . _method_probs [ - 1 ] > 1 : raise ValueError ( 'The sum of p_crossover, p_subtree_mutation, ' 'p_hoist_mutation and p_point_mutation should ' 'total to 1.0 or less.' ) if self . init_method not in ( 'half and half' , 'grow' , 'full' ) : raise ValueError ( 'Valid program initializations methods include ' '"grow", "full" and "half and half". Given %s.' % self . init_method ) if not ( ( isinstance ( self . const_range , tuple ) and len ( self . const_range ) == 2 ) or self . const_range is None ) : raise ValueError ( 'const_range should be a tuple with length two, ' 'or None.' ) if ( not isinstance ( self . init_depth , tuple ) or len ( self . init_depth ) != 2 ) : raise ValueError ( 'init_depth should be a tuple with length two.' ) if self . init_depth [ 0 ] > self . init_depth [ 1 ] : raise ValueError ( 'init_depth should be in increasing numerical ' 'order: (min_depth, max_depth).' ) if self . feature_names is not None : if self . n_features_ != len ( self . feature_names ) : raise ValueError ( 'The supplied `feature_names` has different ' 'length to n_features. Expected %d, got %d.' % ( self . n_features_ , len ( self . feature_names ) ) ) for feature_name in self . feature_names : if not isinstance ( feature_name , str ) : raise ValueError ( 'invalid type %s found in ' '`feature_names`.' % type ( feature_name ) ) if self . transformer is not None : if isinstance ( self . transformer , _Function ) : self . _transformer = self . transformer elif self . transformer == 'sigmoid' : self . _transformer = sigmoid else : raise ValueError ( 'Invalid `transformer`. Expected either ' '"sigmoid" or _Function object, got %s' % type ( self . transformer ) ) if self . _transformer . arity != 1 : raise ValueError ( 'Invalid arity for `transformer`. Expected 1, ' 'got %d.' % ( self . _transformer . arity ) ) params = self . get_params ( ) params [ '_metric' ] = self . _metric if hasattr ( self , '_transformer' ) : params [ '_transformer' ] = self . _transformer else : params [ '_transformer' ] = None params [ 'function_set' ] = self . _function_set params [ 'arities' ] = self . _arities params [ 'method_probs' ] = self . _method_probs if not self . warm_start or not hasattr ( self , '_programs' ) : # Free allocated memory , if any self . _programs = [ ] self . run_details_ = { 'generation' : [ ] , 'average_length' : [ ] , 'average_fitness' : [ ] , 'best_length' : [ ] , 'best_fitness' : [ ] , 'best_oob_fitness' : [ ] , 'generation_time' : [ ] } prior_generations = len ( self . _programs ) n_more_generations = self . generations - prior_generations if n_more_generations < 0 : raise ValueError ( 'generations=%d must be larger or equal to ' 'len(_programs)=%d when warm_start==True' % ( self . generations , len ( self . _programs ) ) ) elif n_more_generations == 0 : fitness = [ program . raw_fitness_ for program in self . _programs [ - 1 ] ] warn ( 'Warm-start fitting without increasing n_estimators does not ' 'fit new programs.' ) if self . warm_start : # Generate and discard seeds that would have been produced on the # initial fit call . for i in range ( len ( self . _programs ) ) : _ = random_state . randint ( MAX_INT , size = self . population_size ) if self . verbose : # Print header fields self . _verbose_reporter ( ) for gen in range ( prior_generations , self . generations ) : start_time = time ( ) if gen == 0 : parents = None else : parents = self . _programs [ gen - 1 ] # Parallel loop n_jobs , n_programs , starts = _partition_estimators ( self . population_size , self . n_jobs ) seeds = random_state . randint ( MAX_INT , size = self . population_size ) population = Parallel ( n_jobs = n_jobs , verbose = int ( self . verbose > 1 ) ) ( delayed ( _parallel_evolve ) ( n_programs [ i ] , parents , X , y , sample_weight , seeds [ starts [ i ] : starts [ i + 1 ] ] , params ) for i in range ( n_jobs ) ) # Reduce , maintaining order across different n _ jobs population = list ( itertools . chain . from_iterable ( population ) ) fitness = [ program . raw_fitness_ for program in population ] length = [ program . length_ for program in population ] parsimony_coefficient = None if self . parsimony_coefficient == 'auto' : parsimony_coefficient = ( np . cov ( length , fitness ) [ 1 , 0 ] / np . var ( length ) ) for program in population : program . fitness_ = program . fitness ( parsimony_coefficient ) self . _programs . append ( population ) # Remove old programs that didn ' t make it into the new population . if not self . low_memory : for old_gen in np . arange ( gen , 0 , - 1 ) : indices = [ ] for program in self . _programs [ old_gen ] : if program is not None : for idx in program . parents : if 'idx' in idx : indices . append ( program . parents [ idx ] ) indices = set ( indices ) for idx in range ( self . population_size ) : if idx not in indices : self . _programs [ old_gen - 1 ] [ idx ] = None elif gen > 0 : # Remove old generations self . _programs [ gen - 1 ] = None # Record run details if self . _metric . greater_is_better : best_program = population [ np . argmax ( fitness ) ] else : best_program = population [ np . argmin ( fitness ) ] self . run_details_ [ 'generation' ] . append ( gen ) self . run_details_ [ 'average_length' ] . append ( np . mean ( length ) ) self . run_details_ [ 'average_fitness' ] . append ( np . mean ( fitness ) ) self . run_details_ [ 'best_length' ] . append ( best_program . length_ ) self . run_details_ [ 'best_fitness' ] . append ( best_program . raw_fitness_ ) oob_fitness = np . nan if self . max_samples < 1.0 : oob_fitness = best_program . oob_fitness_ self . run_details_ [ 'best_oob_fitness' ] . append ( oob_fitness ) generation_time = time ( ) - start_time self . run_details_ [ 'generation_time' ] . append ( generation_time ) if self . verbose : self . _verbose_reporter ( self . run_details_ ) # Check for early stopping if self . _metric . greater_is_better : best_fitness = fitness [ np . argmax ( fitness ) ] if best_fitness >= self . stopping_criteria : break else : best_fitness = fitness [ np . argmin ( fitness ) ] if best_fitness <= self . stopping_criteria : break if isinstance ( self , TransformerMixin ) : # Find the best individuals in the final generation fitness = np . array ( fitness ) if self . _metric . greater_is_better : hall_of_fame = fitness . argsort ( ) [ : : - 1 ] [ : self . hall_of_fame ] else : hall_of_fame = fitness . argsort ( ) [ : self . hall_of_fame ] evaluation = np . array ( [ gp . execute ( X ) for gp in [ self . _programs [ - 1 ] [ i ] for i in hall_of_fame ] ] ) if self . metric == 'spearman' : evaluation = np . apply_along_axis ( rankdata , 1 , evaluation ) with np . errstate ( divide = 'ignore' , invalid = 'ignore' ) : correlations = np . abs ( np . corrcoef ( evaluation ) ) np . fill_diagonal ( correlations , 0. ) components = list ( range ( self . hall_of_fame ) ) indices = list ( range ( self . hall_of_fame ) ) # Iteratively remove least fit individual of most correlated pair while len ( components ) > self . n_components : most_correlated = np . unravel_index ( np . argmax ( correlations ) , correlations . shape ) # The correlation matrix is sorted by fitness , so identifying # the least fit of the pair is simply getting the higher index worst = max ( most_correlated ) components . pop ( worst ) indices . remove ( worst ) correlations = correlations [ : , indices ] [ indices , : ] indices = list ( range ( len ( components ) ) ) self . _best_programs = [ self . _programs [ - 1 ] [ i ] for i in hall_of_fame [ components ] ] else : # Find the best individual in the final generation if self . _metric . greater_is_better : self . _program = self . _programs [ - 1 ] [ np . argmax ( fitness ) ] else : self . _program = self . _programs [ - 1 ] [ np . argmin ( fitness ) ] return self
def get_index ( self , key : Hashable ) -> pd . Index : """Get an index for a dimension , with fall - back to a default RangeIndex"""
if key not in self . dims : raise KeyError ( key ) try : return self . indexes [ key ] except KeyError : # need to ensure dtype = int64 in case range is empty on Python 2 return pd . Index ( range ( self . sizes [ key ] ) , name = key , dtype = np . int64 )
def __expand_meta_datas ( meta_datas , meta_datas_expanded ) : """expand meta _ datas to one level Args : meta _ datas ( dict / list ) : maybe in nested format Returns : list : expanded list in one level Examples : > > > meta _ datas = [ dict1, dict2 dict3 > > > meta _ datas _ expanded = [ ] > > > _ _ expand _ meta _ datas ( meta _ datas , meta _ datas _ expanded ) > > > print ( meta _ datas _ expanded ) [ dict1 , dict2 , dict3]"""
if isinstance ( meta_datas , dict ) : meta_datas_expanded . append ( meta_datas ) elif isinstance ( meta_datas , list ) : for meta_data in meta_datas : __expand_meta_datas ( meta_data , meta_datas_expanded )
def mask_by_ind ( self , inds ) : """Create a new image by zeroing out data at locations not in the given indices . Parameters inds : : obj : ` numpy . ndarray ` of int A 2D ndarray whose first entry is the list of row indices and whose second entry is the list of column indices . The data at these indices will not be set to zero . Returns : obj : ` Image ` A new Image of the same type , with data not indexed by inds set to zero ."""
new_data = np . zeros ( self . shape ) for ind in inds : new_data [ ind [ 0 ] , ind [ 1 ] ] = self . data [ ind [ 0 ] , ind [ 1 ] ] return type ( self ) ( new_data . astype ( self . data . dtype ) , self . frame )
def toggle ( self , rows ) : 'Toggle selection of given ` rows ` .'
for r in Progress ( rows , 'toggling' , total = len ( self . rows ) ) : if not self . unselectRow ( r ) : self . selectRow ( r )
def accounts_frontiers ( self , accounts ) : """Returns a list of pairs of account and block hash representing the head block for * * accounts * * list : param accounts : Accounts to return frontier blocks for : type accounts : list of str : raises : : py : exc : ` nano . rpc . RPCException ` > > > rpc . accounts _ frontiers ( . . . accounts = [ . . . " xrb _ 3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3 " , . . . " xrb _ 3i1aq1cchnmbn9x5rsbap8b15akfh7wj7pwskuzi7ahz8oq6cobd99d4r3b7" " xrb _ 3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3 " : "791AF413173EEE674A6FCF633B5DFC0F3C33F397F0DA08E987D9E0741D40D81A " , " xrb _ 3i1aq1cchnmbn9x5rsbap8b15akfh7wj7pwskuzi7ahz8oq6cobd99d4r3b7 " : "6A32397F4E95AF025DE29D9BF1ACE864D5404362258E06489FABDBA9DCCC046F " """
accounts = self . _process_value ( accounts , 'list' ) payload = { "accounts" : accounts } resp = self . call ( 'accounts_frontiers' , payload ) return resp . get ( 'frontiers' ) or { }
def logger ( self ) : """uses " global logger " for logging"""
if self . _logger : return self . _logger else : log_builder = p_logging . ProsperLogger ( self . PROGNAME , self . config . get_option ( 'LOGGING' , 'log_path' ) , config_obj = self . config ) if self . verbose : log_builder . configure_debug_logger ( ) else : id_string = '({platform}--{version})' . format ( platform = platform . node ( ) , version = self . VERSION ) if self . config . get_option ( 'LOGGING' , 'discord_webhook' ) : log_builder . configure_discord_logger ( custom_args = id_string ) if self . config . get_option ( 'LOGGING' , 'slack_webhook' ) : log_builder . configure_slack_logger ( custom_args = id_string ) if self . config . get_option ( 'LOGGING' , 'hipchat_webhook' ) : log_builder . configure_hipchat_logger ( custom_args = id_string ) self . _logger = log_builder . get_logger ( ) return self . _logger
def get_literals ( self , c , i , depth ) : """Get a string literal . Gather all the literal chars up to opening curly or closing brace . Also gather chars between braces and commas within a group ( is _ expanding ) ."""
result = [ '' ] is_dollar = False try : while c : ignore_brace = is_dollar is_dollar = False if c == '$' : is_dollar = True elif c == '\\' : c = [ self . get_escape ( c , i ) ] elif not ignore_brace and c == '{' : # Try and get the group index = i . index try : seq = self . get_sequence ( next ( i ) , i , depth + 1 ) if seq : c = seq except StopIteration : # Searched to end of string # and still didn ' t find it . i . rewind ( i . index - index ) elif self . is_expanding ( ) and c in ( ',' , '}' ) : # We are Expanding within a group and found a group delimiter # Return what we gathered before the group delimiters . i . rewind ( 1 ) return ( x for x in result ) # Squash the current set of literals . result = self . squash ( result , [ c ] if isinstance ( c , str ) else c ) c = next ( i ) except StopIteration : if self . is_expanding ( ) : return None return ( x for x in result )
def remove_all_servers ( self ) : """Remove all registered WBEM servers from the subscription manager . This also unregisters listeners from these servers and removes all owned indication subscriptions , owned indication filters , and owned listener destinations . Raises : Exceptions raised by : class : ` ~ pywbem . WBEMConnection ` ."""
for server_id in list ( self . _servers . keys ( ) ) : self . remove_server ( server_id )
def update ( self , gradient , step ) : """Update the search direction given the latest gradient and step"""
self . old_gradient = self . gradient self . gradient = gradient N = len ( self . gradient ) if self . inv_hessian is None : # update the direction self . direction = - self . gradient self . status = "SD" # new guess of the inverse hessian self . inv_hessian = np . identity ( N , float ) else : # update the direction self . direction = - np . dot ( self . inv_hessian , self . gradient ) self . status = "QN" # new guess of the inverse hessian ( BFGS ) y = self . gradient - self . old_gradient s = step sy = abs ( np . dot ( s , y ) ) + 1e-5 A = np . outer ( - y / sy , s ) A . ravel ( ) [ : : N + 1 ] += 1 self . inv_hessian = ( np . dot ( np . dot ( A . transpose ( ) , self . inv_hessian ) , A ) + np . outer ( s / sy , s ) )
def kde ( data , npoints = _npoints ) : """Identify peak using Gaussian kernel density estimator . Parameters : data : The 1d data sample npoints : The number of kde points to evaluate"""
# Clipping of severe outliers to concentrate more KDE samples in the parameter range of interest mad = np . median ( np . fabs ( np . median ( data ) - data ) ) cut = ( data > np . median ( data ) - 5. * mad ) & ( data < np . median ( data ) + 5. * mad ) x = data [ cut ] kde = scipy . stats . gaussian_kde ( x ) # No penalty for using a finer sampling for KDE evaluation except computation time values = np . linspace ( np . min ( x ) , np . max ( x ) , npoints ) kde_values = kde . evaluate ( values ) peak = values [ np . argmax ( kde_values ) ] return values [ np . argmax ( kde_values ) ] , kde . evaluate ( peak )
def best ( self ) : """Returns the element with the highest probability ."""
b = ( - 1e999999 , None ) for k , c in iteritems ( self . counts ) : b = max ( b , ( c , k ) ) return b [ 1 ]
def query ( number , domains , resolver = None ) : """Look for NAPTR RRs for the specified number in the specified domains . e . g . lookup ( ' 16505551212 ' , [ ' e164 . dnspython . org . ' , ' e164 . arpa . ' ] )"""
if resolver is None : resolver = dns . resolver . get_default_resolver ( ) for domain in domains : if isinstance ( domain , ( str , unicode ) ) : domain = dns . name . from_text ( domain ) qname = dns . e164 . from_e164 ( number , domain ) try : return resolver . query ( qname , 'NAPTR' ) except dns . resolver . NXDOMAIN : pass raise dns . resolver . NXDOMAIN
def load_more_data ( self , value , rows = False , columns = False ) : """Load more rows and columns to display ."""
try : if rows and value == self . verticalScrollBar ( ) . maximum ( ) : self . model ( ) . fetch_more ( rows = rows ) self . sig_fetch_more_rows . emit ( ) if columns and value == self . horizontalScrollBar ( ) . maximum ( ) : self . model ( ) . fetch_more ( columns = columns ) self . sig_fetch_more_columns . emit ( ) except NameError : # Needed to handle a NameError while fetching data when closing # See issue 7880 pass
def ip_addrs ( interface = None , include_loopback = False , cidr = None , type = None ) : '''Returns a list of IPv4 addresses assigned to the host . interface Only IP addresses from that interface will be returned . include _ loopback : False Include loopback 127.0.0.1 IPv4 address . cidr Describes subnet using CIDR notation and only IPv4 addresses that belong to this subnet will be returned . . . versionchanged : : 2019.2.0 type If option set to ' public ' then only public addresses will be returned . Ditto for ' private ' . . . versionchanged : : 2019.2.0 CLI Example : . . code - block : : bash salt ' * ' network . ip _ addrs salt ' * ' network . ip _ addrs cidr = 10.0.0.0/8 salt ' * ' network . ip _ addrs cidr = 192.168.0.0/16 type = private'''
addrs = salt . utils . network . ip_addrs ( interface = interface , include_loopback = include_loopback ) if cidr : return [ i for i in addrs if salt . utils . network . in_subnet ( cidr , [ i ] ) ] else : if type == 'public' : return [ i for i in addrs if not is_private ( i ) ] elif type == 'private' : return [ i for i in addrs if is_private ( i ) ] else : return addrs
def limit ( self , limit ) : """Apply a LIMIT to the query and return the newly resulting Query ."""
query = self . _copy ( ) query . _limit = limit return query
def tick ( self ) : """Emulate a timer : in order to avoid a real timer we " tick " a number of times depending on the actual time passed since the last tick"""
now = time . time ( ) elapsed = now - self . latest_tick if elapsed > self . tick_interval : ticks = int ( elapsed / self . tick_interval ) self . tick_all ( ticks ) self . latest_tick = now
def _make ( c ) : """create html from template , adding figure , annotation and sequences counts"""
ann = defaultdict ( list ) for pos in c [ 'ann' ] : for db in pos : ann [ db ] += list ( pos [ db ] ) logger . debug ( ann ) valid = [ l for l in c [ 'valid' ] ] ann_list = [ ", " . join ( list ( set ( ann [ feature ] ) ) ) for feature in ann if feature in valid ] return valid , ann_list
def create_request ( version , method , url , headers ) : """Create a HTTP request header ."""
# According to my measurements using b ' ' . join is faster that constructing a # bytearray . message = [ ] message . append ( '{} {} HTTP/{}\r\n' . format ( method , url , version ) ) for name , value in headers : message . append ( name ) message . append ( ': ' ) message . append ( value ) message . append ( '\r\n' ) message . append ( '\r\n' ) return s2b ( '' . join ( message ) )
def java_potential_term ( mesh , instructions ) : '''java _ potential _ term ( mesh , instructions ) yields a Java object that implements the potential field described in the given list of instructions . Generally , this should not be invoked directly and should only be called by mesh _ register . Note : this expects a single term ' s description , not a series of descriptions .'''
faces = to_java_ints ( mesh . indexed_faces ) edges = to_java_ints ( mesh . indexed_edges ) coords = to_java_doubles ( mesh . coordinates ) return _parse_field_arguments ( [ instructions ] , faces , edges , coords )
def Conditions ( cls , artifact = None , os_name = None , cpe = None , labels = None ) : """Provide a series of condition tuples . A Target can specify multiple artifact , os _ name , cpe or label entries . These are expanded to all distinct tuples . When an entry is undefined or None , it is treated as a single definition of None , meaning that the condition does not apply . Args : artifact : Names of artifacts that should trigger an action . os _ name : Names of OS ' that should trigger an action . cpe : CPE strings that should trigger an action . labels : Host labels that should trigger an action . Yields : a permuted series of ( artifact , os _ name , cpe , label ) tuples ."""
artifact = cls . _AsList ( artifact ) os_name = cls . _AsList ( os_name ) cpe = cls . _AsList ( cpe ) labels = cls . _AsList ( labels ) for condition in itertools . product ( artifact , os_name , cpe , labels ) : yield condition
def list_containers ( ** kwargs ) : '''Returns a list of containers by name . This is different from : py : func : ` docker . ps < salt . modules . dockermod . ps _ > ` in that : py : func : ` docker . ps < salt . modules . dockermod . ps _ > ` returns its results organized by container ID . all : False If ` ` True ` ` , stopped containers will be included in return data CLI Example : . . code - block : : bash salt myminion docker . inspect _ image < image >'''
ret = set ( ) for item in six . itervalues ( ps_ ( all = kwargs . get ( 'all' , False ) ) ) : names = item . get ( 'Names' ) if not names : continue for c_name in [ x . lstrip ( '/' ) for x in names or [ ] ] : ret . add ( c_name ) return sorted ( ret )
def open ( self , host , port = 23 ) : """Opens a telnet connection to the desired AttenuatorDevice and queries basic information . Args : host : A valid hostname ( IP address or DNS - resolvable name ) to an MC - DAT attenuator instrument . port : An optional port number ( defaults to telnet default 23)"""
self . _telnet_client . open ( host , port ) config_str = self . _telnet_client . cmd ( "MN?" ) if config_str . startswith ( "MN=" ) : config_str = config_str [ len ( "MN=" ) : ] self . properties = dict ( zip ( [ 'model' , 'max_freq' , 'max_atten' ] , config_str . split ( "-" , 2 ) ) ) self . max_atten = float ( self . properties [ 'max_atten' ] )
def transformer_nat_big ( ) : """Set of hyperparameters ."""
hparams = transformer_nat_small ( ) hparams . batch_size = 2048 hparams . hidden_size = 1024 hparams . filter_size = 4096 hparams . num_hidden_layers = 6 hparams . num_heads = 16 hparams . layer_prepostprocess_dropout = 0.3 return hparams
def plotallanvar ( data , dt , tmax = 10 , ax = None , ** kwargs ) : """Plot Allan variance . Args : data ( np . ndarray ) : Input data . dt ( float ) : Time between each data . tmax ( float ) : Maximum time . ax ( matplotlib . axes ) : Axis the figure is plotted on . kwargs ( optional ) : Plot options passed to ax . plot ( ) ."""
if ax is None : ax = plt . gca ( ) tk , allanvar = allan_variance ( data , dt , tmax ) ax . loglog ( tk , allanvar , ** kwargs ) ax . set_xlabel ( 'Time [s]' ) ax . set_ylabel ( 'Allan Variance' ) ax . legend ( )
def _bind_method ( self , name , unconditionally = False ) : """Generate a Matlab function and bind it to the instance This is where the magic happens . When an unknown attribute of the Matlab class is requested , it is assumed to be a call to a Matlab function , and is generated and bound to the instance . This works because getattr ( ) falls back to _ _ getattr _ _ only if no attributes of the requested name can be found through normal routes ( _ _ getattribute _ _ , _ _ dict _ _ , class tree ) . bind _ method first checks whether the requested name is a callable Matlab function before generating a binding . Parameters name : str The name of the Matlab function to call e . g . ' sqrt ' , ' sum ' , ' svd ' , etc unconditionally : bool , optional Bind the method without performing checks . Used to bootstrap methods that are required and know to exist Returns MatlabFunction A reference to a newly bound MatlabFunction instance if the requested name is determined to be a callable function Raises AttributeError : if the requested name is not a callable Matlab function"""
# TODO : This does not work if the function is a mex function inside a folder of the same name exists = self . run_func ( 'exist' , name ) [ 'result' ] in [ 2 , 3 , 5 ] if not unconditionally and not exists : raise AttributeError ( "'Matlab' object has no attribute '%s'" % name ) # create a new method instance method_instance = MatlabFunction ( weakref . ref ( self ) , name ) method_instance . __name__ = name # bind to the Matlab instance with a weakref ( to avoid circular references ) if sys . version . startswith ( '3' ) : method = types . MethodType ( method_instance , weakref . ref ( self ) ) else : method = types . MethodType ( method_instance , weakref . ref ( self ) , _Session ) setattr ( self , name , method ) return getattr ( self , name )
def html_to_fc ( html = None , clean_html = None , clean_visible = None , encoding = None , url = None , timestamp = None , other_features = None ) : '''` html ` is expected to be a raw string received over the wire from a remote webserver , and ` encoding ` , if provided , is used to decode it . Typically , encoding comes from the Content - Type header field . The : func : ` ~ streamcorpus _ pipeline . _ clean _ html . make _ clean _ html ` function handles character encodings .'''
def add_feature ( name , xs ) : if name not in fc : fc [ name ] = StringCounter ( ) fc [ name ] += StringCounter ( xs ) timestamp = timestamp or int ( time . time ( ) * 1000 ) other_features = other_features or { } if clean_html is None : if html is not None : try : clean_html_utf8 = make_clean_html ( html , encoding = encoding ) except : logger . warn ( 'dropping doc because:' , exc_info = True ) return clean_html = clean_html_utf8 . decode ( 'utf-8' ) else : clean_html_utf8 = u'' clean_html = u'' else : clean_html_utf8 = u'' if clean_visible is None or len ( clean_visible ) == 0 : clean_visible = make_clean_visible ( clean_html_utf8 ) . decode ( 'utf-8' ) elif isinstance ( clean_visible , str ) : clean_visible = clean_visible . decode ( 'utf-8' ) fc = FeatureCollection ( ) fc [ u'meta_raw' ] = html and uni ( html , encoding ) or u'' fc [ u'meta_clean_html' ] = clean_html fc [ u'meta_clean_visible' ] = clean_visible fc [ u'meta_timestamp' ] = unicode ( timestamp ) url = url or u'' fc [ u'meta_url' ] = uni ( url ) add_feature ( u'icq' , features . ICQs ( clean_visible ) ) add_feature ( u'skype' , features . skypes ( clean_visible ) ) add_feature ( u'phone' , features . phones ( clean_visible ) ) add_feature ( u'email' , features . emails ( clean_visible ) ) bowNP , normalizations = features . noun_phrases ( cleanse ( clean_visible ) , included_unnormalized = True ) add_feature ( u'bowNP' , bowNP ) bowNP_unnorm = chain ( * normalizations . values ( ) ) add_feature ( u'bowNP_unnorm' , bowNP_unnorm ) add_feature ( u'image_url' , features . image_urls ( clean_html ) ) add_feature ( u'a_url' , features . a_urls ( clean_html ) ) # # get parsed versions , extract usernames fc [ u'img_url_path_dirs' ] = features . path_dirs ( fc [ u'image_url' ] ) fc [ u'img_url_hostnames' ] = features . host_names ( fc [ u'image_url' ] ) fc [ u'usernames' ] = features . usernames ( fc [ u'image_url' ] ) fc [ u'a_url_path_dirs' ] = features . path_dirs ( fc [ u'a_url' ] ) fc [ u'a_url_hostnames' ] = features . host_names ( fc [ u'a_url' ] ) fc [ u'usernames' ] += features . usernames ( fc [ u'a_url' ] ) # fc [ u ' usernames ' ] + = features . usernames2( # fc [ u ' meta _ clean _ visible ' ] ) # beginning of treating this as a pipeline . . . xform = features . entity_names ( ) fc = xform . process ( fc ) for feat_name , feat_val in other_features . iteritems ( ) : fc [ feat_name ] += StringCounter ( feat_val ) return fc
def get_changed_files ( self , first_sha , second_sha , exclude_paths = None ) : """: param first _ sha : : param second _ sha : : param exclude _ paths : : return :"""
if not exclude_paths : exclude_paths = [ ] first_commit = self . repo . commit ( first_sha ) second_commit = self . repo . commit ( second_sha ) diffs = first_commit . diff ( second_commit ) changed_files = [ ] for diff in diffs : excluded = False for exclude in exclude_paths : if diff . a_path . startswith ( exclude ) : excluded = True if not excluded : changed_files . append ( diff . a_path ) return changed_files
def whoami ( anchore_config ) : """Show user data for current user if available : param anchore _ config : : return :"""
ecode = 0 try : aa = contexts [ 'anchore_auth' ] if aa and 'username' in aa and 'password' in aa : info = { 'Current user' : aa [ 'user_info' ] if aa [ 'user_info' ] else 'anonymous' } anchore_print ( info , do_formatting = True ) else : anchore_print_err ( 'No anchore auth context found. Cannot get user info. Try logging in first' ) ecode = 1 except Exception as err : anchore_print_err ( 'Cannot get user info' ) ecode = 1 sys . exit ( ecode )
def find_recurring ( number , min_repeat = 5 ) : """Attempts to find repeating digits in the fractional component of a number . Args : number ( tuple ) : the number to process in the form : ( int , int , int , . . . " . " , . . . , int int int ) min _ repeat ( int ) : the minimum number of times a pattern must occur to be defined as recurring . A min _ repeat of n would mean a pattern must occur at least n + 1 times , so as to be repeated n times . Returns : The original number with repeating digits ( if found ) enclosed by " [ " and " ] " ( tuple ) . Examples : > > > find _ recurring ( ( 3 , 2 , 1 , ' . ' , 1 , 2 , 3 , 1 , 2 , 3 ) , min _ repeat = 1) (3 , 2 , 1 , ' . ' , ' [ ' , 1 , 2 , 3 , ' ] ' )"""
# Return number if it has no fractional part , or min _ repeat value invalid . if "." not in number or min_repeat < 1 : return number # Seperate the number into integer and fractional parts . integer_part , fractional_part = integer_fractional_parts ( number ) # Reverse fractional part to get a sequence . sequence = fractional_part [ : : - 1 ] # Initialize counters # The ' period ' is the number of digits in a pattern . period = 0 # The best pattern found will be stored . best = 0 best_period = 0 best_repeat = 0 # Find recurring pattern . while ( period < len ( sequence ) ) : period += 1 pattern = sequence [ : period ] repeat = 0 digit = period pattern_match = True while ( pattern_match and digit < len ( sequence ) ) : for i , pattern_digit in enumerate ( pattern ) : if sequence [ digit + i ] != pattern_digit : pattern_match = False break else : repeat += 1 digit += period # Give each pattern found a rank and use the best . rank = period * repeat if rank > best : best_period = period best_repeat = repeat best = rank # If the pattern does not repeat often enough , return the original number . if best_repeat < min_repeat : return number # Use the best pattern found . pattern = sequence [ : best_period ] # Remove the pattern from our original number . number = integer_part + fractional_part [ : - ( best + best_period ) ] # Ensure we are at the start of the pattern . pattern_temp = pattern for i , digit in enumerate ( pattern ) : if number [ - 1 ] == digit : number = number [ : - 1 ] pattern_temp = pattern_temp [ 1 : ] + ( pattern_temp [ 0 ] , ) pattern = pattern_temp # Return the number with the recurring pattern enclosed with ' [ ' and ' ] ' . return number + ( "[" , ) + pattern [ : : - 1 ] + ( "]" , )
def draw_separators ( self ) : """Draw the lines separating the categories on the Canvas"""
total = 1 self . _timeline . create_line ( ( 0 , 1 , self . pixel_width , 1 ) ) for index , ( category , label ) in enumerate ( self . _category_labels . items ( ) ) : height = label . winfo_reqheight ( ) self . _rows [ category ] = ( total , total + height ) total += height self . _timeline . create_line ( ( 0 , total , self . pixel_width , total ) ) pixel_height = total self . _timeline . config ( height = pixel_height )
def prepare_weighted_spans ( targets , # type : List [ TargetExplanation ] preserve_density = None , # type : Optional [ bool ] ) : # type : ( . . . ) - > List [ Optional [ List [ PreparedWeightedSpans ] ] ] """Return weighted spans prepared for rendering . Calculate a separate weight range for each different weighted span ( for each different index ) : each target has the same number of weighted spans ."""
targets_char_weights = [ [ get_char_weights ( ws , preserve_density = preserve_density ) for ws in t . weighted_spans . docs_weighted_spans ] if t . weighted_spans else None for t in targets ] # type : List [ Optional [ List [ np . ndarray ] ] ] max_idx = max_or_0 ( len ( ch_w or [ ] ) for ch_w in targets_char_weights ) targets_char_weights_not_None = [ cw for cw in targets_char_weights if cw is not None ] # type : List [ List [ np . ndarray ] ] spans_weight_ranges = [ max_or_0 ( abs ( x ) for char_weights in targets_char_weights_not_None for x in char_weights [ idx ] ) for idx in range ( max_idx ) ] return [ [ PreparedWeightedSpans ( ws , char_weights , weight_range ) for ws , char_weights , weight_range in zip ( t . weighted_spans . docs_weighted_spans , # type : ignore t_char_weights , spans_weight_ranges ) ] if t_char_weights is not None else None for t , t_char_weights in zip ( targets , targets_char_weights ) ]
def get ( self , column , default_value = None ) : """Get an item from the Row by column name . Args : column : Tuple of column names , or a ( str ) column name , or positional column number , 0 - indexed . default _ value : The value to use if the key is not found . Returns : A list or string with column value ( s ) or default _ value if not found ."""
if isinstance ( column , ( list , tuple ) ) : ret = [ ] for col in column : ret . append ( self . get ( col , default_value ) ) return ret # Perhaps we have a range like ' 1 ' , ' : - 1 ' or ' 1 : ' . try : return self . _values [ column ] except ( IndexError , TypeError ) : pass try : return self [ column ] except IndexError : return default_value
def select_window ( pymux , variables ) : """Select a window . E . g : select - window - t : 3"""
window_id = variables [ '<target-window>' ] def invalid_window ( ) : raise CommandException ( 'Invalid window: %s' % window_id ) if window_id . startswith ( ':' ) : try : number = int ( window_id [ 1 : ] ) except ValueError : invalid_window ( ) else : w = pymux . arrangement . get_window_by_index ( number ) if w : pymux . arrangement . set_active_window ( w ) else : invalid_window ( ) else : invalid_window ( )
def getReceivers ( sender = Any , signal = Any ) : """Get list of receivers from global tables This utility function allows you to retrieve the raw list of receivers from the connections table for the given sender and signal pair . Note : there is no guarantee that this is the actual list stored in the connections table , so the value should be treated as a simple iterable / truth value rather than , for instance a list to which you might append new records . Normally you would use liveReceivers ( getReceivers ( . . . ) ) to retrieve the actual receiver objects as an iterable object ."""
try : return connections [ id ( sender ) ] [ signal ] except KeyError : return [ ]
def _user_perm_cache ( self ) : """cached _ permissions will generate the cache in a lazy fashion ."""
# Check to see if the cache has been primed . if not self . user : return { } cache_filled = getattr ( self . user , '_authority_perm_cache_filled' , False , ) if cache_filled : # Don ' t really like the name for this , but this matches how Django # does it . return self . user . _authority_perm_cache # Prime the cache . self . _prime_user_perm_caches ( ) return self . user . _authority_perm_cache
def _fetch_datatype ( self , transport , bucket , key , r = None , pr = None , basic_quorum = None , notfound_ok = None , timeout = None , include_context = None ) : """_ fetch _ datatype ( bucket , key , r = None , pr = None , basic _ quorum = None , notfound _ ok = None , timeout = None , include _ context = None ) Fetches the value of a Riak Datatype as raw data . This is used internally to update already reified Datatype objects . Use the public version to fetch a reified type . . . note : : This request is automatically retried : attr : ` retries ` times if it fails due to network error . : param bucket : the bucket of the datatype , which must belong to a : class : ` ~ riak . BucketType ` : type bucket : RiakBucket : param key : the key of the datatype : type key : string , None : param r : the read quorum : type r : integer , string , None : param pr : the primary read quorum : type pr : integer , string , None : param basic _ quorum : whether to use the " basic quorum " policy for not - founds : type basic _ quorum : bool : param notfound _ ok : whether to treat not - found responses as successful : type notfound _ ok : bool : param timeout : a timeout value in milliseconds : type timeout : int : param include _ context : whether to return the opaque context as well as the value , which is useful for removal operations on sets and maps : type include _ context : bool : rtype : tuple of type , value and context"""
_validate_timeout ( timeout ) return transport . fetch_datatype ( bucket , key , r = r , pr = pr , basic_quorum = basic_quorum , notfound_ok = notfound_ok , timeout = timeout , include_context = include_context )
def json_obj ( self , method , params = None , auth = True ) : """Return JSON object expected by the Zabbix API"""
if params is None : params = { } obj = { 'jsonrpc' : '2.0' , 'method' : method , 'params' : params , 'auth' : self . __auth if auth else None , 'id' : self . id , } return json . dumps ( obj )
def run_all ( self , delay_seconds = 0 ) : """Run all jobs regardless if they are scheduled to run or not . A delay of ` delay ` seconds is added between each job . This helps distribute system load generated by the jobs more evenly over time . : param delay _ seconds : A delay added between every executed job"""
logger . info ( 'Running *all* %i jobs with %is delay inbetween' , len ( self . jobs ) , delay_seconds ) for job in self . jobs [ : ] : self . _run_job ( job ) time . sleep ( delay_seconds )
def LessThan ( self , value ) : """Sets the type of the WHERE clause as " less than " . Args : value : The value to be used in the WHERE condition . Returns : The query builder that this WHERE builder links to ."""
self . _awql = self . _CreateSingleValueCondition ( value , '<' ) return self . _query_builder
def find_ident ( self , name ) : """Searches this module and * * all * * of its sub - modules for an identifier with name ` name ` in its list of exported identifiers according to ` pydoc ` . Note that unexported sub - modules are searched . A bare identifier ( without ` . ` separators ) will only be checked for in this module . The documentation object corresponding to the identifier is returned . If one cannot be found , then an instance of ` External ` is returned populated with the given identifier ."""
if name in self . refdoc : return self . refdoc [ name ] for module in self . submodules ( ) : o = module . find_ident ( name ) if not isinstance ( o , External ) : return o return External ( name )
def pop_viewport ( self ) : """Pop a viewport from the stack ."""
vp = self . _vp_stack . pop ( ) # Activate latest if len ( self . _vp_stack ) > 0 : self . context . set_viewport ( * self . _vp_stack [ - 1 ] ) else : self . context . set_viewport ( 0 , 0 , * self . physical_size ) self . _update_transforms ( ) return vp
def list_images ( self , identifier , offset = - 1 , limit = - 1 ) : """Parameters identifier : string Unique image group identifier limit : int Limit number of results in returned object listing offset : int Set offset in list ( order as defined by object store ) Returns ObjectListing Listing of group images or None if image group does not exist"""
# Get image group to ensure that it exists . The object contains the full # list of group images img_grp = self . get_object ( identifier ) if img_grp is None : return None # Extract subset of group images based on offset and limit arguments total_count = len ( img_grp . images ) items = [ ] if offset < total_count : if limit > 0 : list_end = min ( ( offset + limit ) , total_count ) else : list_end = total_count for i in range ( offset , list_end ) : items . append ( img_grp . images [ i ] ) # Return object listing return datastore . ObjectListing ( items , offset , limit , total_count )
def is_bsd ( name = None ) : """Return true if this is a BSD like operating system ."""
name = name or sys . platform return Platform . is_darwin ( name ) or Platform . is_freebsd ( name )
def idfdiffs ( idf1 , idf2 ) : """return the diffs between the two idfs"""
# for any object type , it is sorted by name thediffs = { } keys = idf1 . model . dtls # undocumented variable for akey in keys : idfobjs1 = idf1 . idfobjects [ akey ] idfobjs2 = idf2 . idfobjects [ akey ] names = set ( [ getobjname ( i ) for i in idfobjs1 ] + [ getobjname ( i ) for i in idfobjs2 ] ) names = sorted ( names ) idfobjs1 = sorted ( idfobjs1 , key = lambda idfobj : idfobj [ 'obj' ] ) idfobjs2 = sorted ( idfobjs2 , key = lambda idfobj : idfobj [ 'obj' ] ) for name in names : n_idfobjs1 = [ item for item in idfobjs1 if getobjname ( item ) == name ] n_idfobjs2 = [ item for item in idfobjs2 if getobjname ( item ) == name ] for idfobj1 , idfobj2 in zip_longest ( n_idfobjs1 , n_idfobjs2 ) : if idfobj1 == None : thediffs [ ( idfobj2 . key . upper ( ) , getobjname ( idfobj2 ) ) ] = ( None , idf2 . idfname ) # ( idf1 . idfname , None ) - > old break if idfobj2 == None : thediffs [ ( idfobj1 . key . upper ( ) , getobjname ( idfobj1 ) ) ] = ( idf1 . idfname , None ) # ( None , idf2 . idfname ) - > old break for i , ( f1 , f2 ) in enumerate ( zip ( idfobj1 . obj , idfobj2 . obj ) ) : if i == 0 : f1 , f2 = f1 . upper ( ) , f2 . upper ( ) if f1 != f2 : thediffs [ ( akey , getobjname ( idfobj1 ) , idfobj1 . objidd [ i ] [ 'field' ] [ 0 ] ) ] = ( f1 , f2 ) return thediffs
def _get_roles_for_request ( request , application ) : """Check the authentication of the current user ."""
roles = application . get_roles_for_person ( request . user ) if common . is_admin ( request ) : roles . add ( "is_admin" ) roles . add ( 'is_authorised' ) return roles
def get_brandings ( self ) : """Get all account brandings @ return List of brandings"""
connection = Connection ( self . token ) connection . set_url ( self . production , self . BRANDINGS_URL ) return connection . get_request ( )
def is_connected ( self ) : r"""Check if the graph is connected ( cached ) . A graph is connected if and only if there exists a ( directed ) path between any two vertices . Returns connected : bool True if the graph is connected , False otherwise . Notes For undirected graphs , starting at a vertex and trying to visit all the others is enough . For directed graphs , one needs to check that a vertex can both be visited by all the others and visit all the others . Examples Connected graph : > > > graph = graphs . Graph ( [ . . . [ 0 , 3 , 0 , 0 ] , . . . [ 3 , 0 , 4 , 0 ] , . . . [ 0 , 4 , 0 , 2 ] , . . . [ 0 , 0 , 2 , 0 ] , > > > graph . is _ connected ( ) True Disconnected graph : > > > graph = graphs . Graph ( [ . . . [ 0 , 3 , 0 , 0 ] , . . . [ 3 , 0 , 4 , 0 ] , . . . [ 0 , 0 , 0 , 2 ] , . . . [ 0 , 0 , 2 , 0 ] , > > > graph . is _ connected ( ) False"""
if self . _connected is not None : return self . _connected adjacencies = [ self . W ] if self . is_directed ( ) : adjacencies . append ( self . W . T ) for adjacency in adjacencies : visited = np . zeros ( self . n_vertices , dtype = np . bool ) stack = set ( [ 0 ] ) while stack : vertex = stack . pop ( ) if visited [ vertex ] : continue visited [ vertex ] = True neighbors = adjacency [ vertex ] . nonzero ( ) [ 1 ] stack . update ( neighbors ) if not np . all ( visited ) : self . _connected = False return self . _connected self . _connected = True return self . _connected
def get_dialect ( self ) : """Return the SQLAlchemy database dialect class corresponding to this URL ' s driver name ."""
if "+" not in self . drivername : name = self . drivername else : name = self . drivername . replace ( "+" , "." ) cls = registry . load ( name ) # check for legacy dialects that # would return a module with ' dialect ' as the # actual class if ( hasattr ( cls , "dialect" ) and isinstance ( cls . dialect , type ) and issubclass ( cls . dialect , Dialect ) ) : return cls . dialect else : return cls