signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def derivative ( self , t , n = 1 ) : """returns the nth derivative of the segment at t . Note : Bezier curves can have points where their derivative vanishes . If you are interested in the tangent direction , use the unit _ tangent ( ) method instead ."""
p = self . bpoints ( ) if n == 1 : return 2 * ( ( p [ 1 ] - p [ 0 ] ) * ( 1 - t ) + ( p [ 2 ] - p [ 1 ] ) * t ) elif n == 2 : return 2 * ( p [ 2 ] - 2 * p [ 1 ] + p [ 0 ] ) elif n > 2 : return 0 else : raise ValueError ( "n should be a positive integer." )
def copy ( self ) : """Make deep copy of this KeyBundle : return : The copy"""
kb = KeyBundle ( ) kb . _keys = self . _keys [ : ] kb . cache_time = self . cache_time kb . verify_ssl = self . verify_ssl if self . source : kb . source = self . source kb . fileformat = self . fileformat kb . keytype = self . keytype kb . keyusage = self . keyusage kb . remote = self . remote return kb
def get_account_entitlement_for_user ( self , user_id , determine_rights = None , create_if_not_exists = None ) : """GetAccountEntitlementForUser . [ Preview API ] Get the entitlements for a user : param str user _ id : The id of the user : param bool determine _ rights : : param bool create _ if _ not _ exists : : rtype : : class : ` < AccountEntitlement > < azure . devops . v5_1 . licensing . models . AccountEntitlement > `"""
route_values = { } if user_id is not None : route_values [ 'userId' ] = self . _serialize . url ( 'user_id' , user_id , 'str' ) query_parameters = { } if determine_rights is not None : query_parameters [ 'determineRights' ] = self . _serialize . query ( 'determine_rights' , determine_rights , 'bool' ) if create_if_not_exists is not None : query_parameters [ 'createIfNotExists' ] = self . _serialize . query ( 'create_if_not_exists' , create_if_not_exists , 'bool' ) response = self . _send ( http_method = 'GET' , location_id = '6490e566-b299-49a7-a4e4-28749752581f' , version = '5.1-preview.1' , route_values = route_values , query_parameters = query_parameters ) return self . _deserialize ( 'AccountEntitlement' , response )
def _wait_for_file ( cls , filename , timeout = FAIL_WAIT_SEC , want_content = True ) : """Wait up to timeout seconds for filename to appear with a non - zero size or raise Timeout ( ) ."""
def file_waiter ( ) : return os . path . exists ( filename ) and ( not want_content or os . path . getsize ( filename ) ) action_msg = 'file {} to appear' . format ( filename ) return cls . _deadline_until ( file_waiter , action_msg , timeout = timeout )
def split_by_length_of_utterances ( self , proportions = { } , separate_issuers = False ) : """Split the corpus into subsets where the total duration of subsets are proportional to the given proportions . The corpus gets splitted into len ( proportions ) parts , so the number of utterances are distributed according to the proportions . Args : proportions ( dict ) : A dictionary containing the relative size of the target subsets . The key is an identifier for the subset . separate _ issuers ( bool ) : If True it makes sure that all utterances of an issuer are in the same subset . Returns : ( dict ) : A dictionary containing the subsets with the identifier from the input as key . Example : : > > > spl = Splitter ( corpus ) > > > corpus . num _ utterances 100 > > > subsets = spl . split _ by _ length _ of _ utterances ( proportions = { > > > " train " : 0.6, > > > " dev " : 0.2, > > > " test " : 0.2 > > > print ( subsets ) { ' dev ' : < audiomate . corpus . subview . Subview at 0x104ce7400 > , ' test ' : < audiomate . corpus . subview . Subview at 0x104ce74e0 > , ' train ' : < audiomate . corpus . subview . Subview at 0x104ce7438 > } > > > subsets [ ' train ' ] . num _ utterances 60 > > > subsets [ ' test ' ] . num _ utterances 20"""
utterance_to_duration = { } if separate_issuers : # Count total length of utterances per issuer issuer_utts_total_duration = collections . defaultdict ( float ) issuer_utts = collections . defaultdict ( list ) for utterance in self . corpus . utterances . values ( ) : issuer_utts_total_duration [ utterance . issuer . idx ] += utterance . duration issuer_utts [ utterance . issuer . idx ] . append ( utterance . idx ) issuer_utts_total_duration = { k : { 'duration' : int ( v ) } for k , v in issuer_utts_total_duration . items ( ) } # Split with total utt duration per issuer as weight issuer_splits = utils . get_identifiers_splitted_by_weights ( issuer_utts_total_duration , proportions = proportions ) # Collect utterances of all issuers per split splits = collections . defaultdict ( list ) for split_idx , issuer_ids in issuer_splits . items ( ) : for issuer_idx in issuer_ids : splits [ split_idx ] . extend ( issuer_utts [ issuer_idx ] ) else : for utterance in self . corpus . utterances . values ( ) : utterance_to_duration [ utterance . idx ] = { 'length' : int ( utterance . duration * 100 ) } splits = utils . get_identifiers_splitted_by_weights ( utterance_to_duration , proportions = proportions ) return self . _subviews_from_utterance_splits ( splits )
def compute_tls13_traffic_secrets ( self ) : """Ciphers key and IV are updated accordingly for Application data . self . handshake _ messages should be ClientHello . . . ServerFinished ."""
hkdf = self . prcs . hkdf self . tls13_master_secret = hkdf . extract ( self . tls13_handshake_secret , None ) cts0 = hkdf . derive_secret ( self . tls13_master_secret , b"client application traffic secret" , b"" . join ( self . handshake_messages ) ) self . tls13_derived_secrets [ "client_traffic_secrets" ] = [ cts0 ] sts0 = hkdf . derive_secret ( self . tls13_master_secret , b"server application traffic secret" , b"" . join ( self . handshake_messages ) ) self . tls13_derived_secrets [ "server_traffic_secrets" ] = [ sts0 ] es = hkdf . derive_secret ( self . tls13_master_secret , b"exporter master secret" , b"" . join ( self . handshake_messages ) ) self . tls13_derived_secrets [ "exporter_secret" ] = es if self . connection_end == "server" : # self . prcs . tls13 _ derive _ keys ( cts0) self . pwcs . tls13_derive_keys ( sts0 ) elif self . connection_end == "client" : # self . pwcs . tls13 _ derive _ keys ( cts0) self . prcs . tls13_derive_keys ( sts0 )
def _add_fold_decoration ( self , block , region ) : """Add fold decorations ( boxes arround a folded block in the editor widget ) ."""
deco = TextDecoration ( block ) deco . signals . clicked . connect ( self . _on_fold_deco_clicked ) deco . tooltip = region . text ( max_lines = 25 ) deco . draw_order = 1 deco . block = block deco . select_line ( ) deco . set_outline ( drift_color ( self . _get_scope_highlight_color ( ) , 110 ) ) deco . set_background ( self . _get_scope_highlight_color ( ) ) deco . set_foreground ( QtGui . QColor ( '#808080' ) ) self . _block_decos . append ( deco ) self . editor . decorations . append ( deco )
def uniformVectorRDD ( sc , numRows , numCols , numPartitions = None , seed = None ) : """Generates an RDD comprised of vectors containing i . i . d . samples drawn from the uniform distribution U ( 0.0 , 1.0 ) . : param sc : SparkContext used to create the RDD . : param numRows : Number of Vectors in the RDD . : param numCols : Number of elements in each Vector . : param numPartitions : Number of partitions in the RDD . : param seed : Seed for the RNG that generates the seed for the generator in each partition . : return : RDD of Vector with vectors containing i . i . d samples ~ ` U ( 0.0 , 1.0 ) ` . > > > import numpy as np > > > mat = np . matrix ( RandomRDDs . uniformVectorRDD ( sc , 10 , 10 ) . collect ( ) ) > > > mat . shape (10 , 10) > > > mat . max ( ) < = 1.0 and mat . min ( ) > = 0.0 True > > > RandomRDDs . uniformVectorRDD ( sc , 10 , 10 , 4 ) . getNumPartitions ( )"""
return callMLlibFunc ( "uniformVectorRDD" , sc . _jsc , numRows , numCols , numPartitions , seed )
def _is_valid_script_engine ( zap , engine ) : """Check if given script engine is valid ."""
engine_names = zap . script . list_engines short_names = [ e . split ( ' : ' ) [ 1 ] for e in engine_names ] return engine in engine_names or engine in short_names
def transform_annotation ( self , ann , duration ) : '''Apply the chord transformation . Parameters ann : jams . Annotation The chord annotation duration : number > 0 The target duration Returns data : dict data [ ' pitch ' ] : np . ndarray , shape = ( n , 12) data [ ' root ' ] : np . ndarray , shape = ( n , 13 ) or ( n , 1) data [ ' bass ' ] : np . ndarray , shape = ( n , 13 ) or ( n , 1) ` pitch ` is a binary matrix indicating pitch class activation at each frame . ` root ` is a one - hot matrix indicating the chord root ' s pitch class at each frame . ` bass ` is a one - hot matrix indicating the chord bass ( lowest note ) pitch class at each frame . If sparsely encoded , ` root ` and ` bass ` are integers in the range [ 0 , 12 ] where 12 indicates no chord . If densely encoded , ` root ` and ` bass ` have an extra final dimension which is active when there is no chord sounding .'''
# Construct a blank annotation with mask = 0 intervals , chords = ann . to_interval_values ( ) # Get the dtype for root / bass if self . sparse : dtype = np . int else : dtype = np . bool # If we don ' t have any labeled intervals , fill in a no - chord if not chords : intervals = np . asarray ( [ [ 0 , duration ] ] ) chords = [ 'N' ] # Suppress all intervals not in the encoder pitches = [ ] roots = [ ] basses = [ ] # default value when data is missing if self . sparse : fill = 12 else : fill = False for chord in chords : # Encode the pitches root , semi , bass = mir_eval . chord . encode ( chord ) pitches . append ( np . roll ( semi , root ) ) if self . sparse : if root in self . _classes : roots . append ( [ root ] ) basses . append ( [ ( root + bass ) % 12 ] ) else : roots . append ( [ fill ] ) basses . append ( [ fill ] ) else : if root in self . _classes : roots . extend ( self . encoder . transform ( [ [ root ] ] ) ) basses . extend ( self . encoder . transform ( [ [ ( root + bass ) % 12 ] ] ) ) else : roots . extend ( self . encoder . transform ( [ [ ] ] ) ) basses . extend ( self . encoder . transform ( [ [ ] ] ) ) pitches = np . asarray ( pitches , dtype = np . bool ) roots = np . asarray ( roots , dtype = dtype ) basses = np . asarray ( basses , dtype = dtype ) target_pitch = self . encode_intervals ( duration , intervals , pitches ) target_root = self . encode_intervals ( duration , intervals , roots , multi = False , dtype = dtype , fill = fill ) target_bass = self . encode_intervals ( duration , intervals , basses , multi = False , dtype = dtype , fill = fill ) if not self . sparse : target_root = _pad_nochord ( target_root ) target_bass = _pad_nochord ( target_bass ) return { 'pitch' : target_pitch , 'root' : target_root , 'bass' : target_bass }
def namebase ( self ) : """The same as : meth : ` name ` , but with one file extension stripped off . For example , ` ` Path ( ' / home / guido / python . tar . gz ' ) . name = = ' python . tar . gz ' ` ` , but ` ` Path ( ' / home / guido / python . tar . gz ' ) . namebase = = ' python . tar ' ` ` ."""
base , ext = self . module . splitext ( self . name ) return base
def next_future_job_delta ( self ) -> Optional [ float ] : """Give the amount of seconds before the next future job is due ."""
job = self . _get_next_future_job ( ) if not job : return None return ( job . at - datetime . now ( timezone . utc ) ) . total_seconds ( )
def AsJsonString ( self ) : """A JSON string representation of this User instance . Returns : A JSON string representation of this User instance"""
return json . dumps ( self . AsDict ( dt = False ) , sort_keys = True )
def shared_like ( param , suffix , init = 0 ) : '''Create a Theano shared variable like an existing parameter . Parameters param : Theano variable Theano variable to use for shape information . suffix : str Suffix to append to the parameter ' s name for the new variable . init : float or ndarray , optional Initial value of the shared variable . Defaults to 0. Returns shared : Theano shared variable A new shared variable with the same shape and data type as ` ` param ` ` .'''
return theano . shared ( np . zeros_like ( param . get_value ( ) ) + init , name = '{}_{}' . format ( param . name , suffix ) , broadcastable = param . broadcastable )
def readpar ( par_file , root ) : """Read StagYY par file . The namelist is populated in chronological order with : - : data : ` PAR _ DEFAULT ` , an internal dictionary defining defaults ; - : data : ` PAR _ DFLT _ FILE ` , the global configuration par file ; - ` ` par _ name _ defaultparameters ` ` if it is defined in ` ` par _ file ` ` ; - ` ` par _ file ` ` itself ; - ` ` parameters . dat ` ` if it can be found in the StagYY output directories . Args : par _ file ( : class : ` pathlib . Path ` ) : path of par file . root ( : class : ` pathlib . Path ` ) : path on which other paths are rooted . This is usually par . parent . Returns : : class : ` f90nml . namelist . Namelist ` : case insensitive dict of dict of values with first key being the namelist and second key the variables ' name ."""
par_nml = deepcopy ( PAR_DEFAULT ) if PAR_DFLT_FILE . is_file ( ) : _enrich_with_par ( par_nml , PAR_DFLT_FILE ) else : PAR_DFLT_FILE . parent . mkdir ( exist_ok = True ) f90nml . write ( par_nml , str ( PAR_DFLT_FILE ) ) if not par_file . is_file ( ) : raise NoParFileError ( par_file ) par_main = f90nml . read ( str ( par_file ) ) if 'default_parameters_parfile' in par_main : par_dflt = par_main [ 'default_parameters_parfile' ] . get ( 'par_name_defaultparameters' , 'par_defaults' ) par_dflt = root / par_dflt if not par_dflt . is_file ( ) : raise NoParFileError ( par_dflt ) _enrich_with_par ( par_nml , par_dflt ) _enrich_with_par ( par_nml , par_file ) par_out = root / par_nml [ 'ioin' ] [ 'output_file_stem' ] / '_parameters.dat' if par_out . is_file ( ) : _enrich_with_par ( par_nml , par_out ) par_out = root / par_nml [ 'ioin' ] [ 'hdf5_output_folder' ] / 'parameters.dat' if par_out . is_file ( ) : _enrich_with_par ( par_nml , par_out ) return par_nml
def mapping ( self ) : """Return the constructed mappings . Invert these to map internal indices to external ids . Returns ( user id map , user feature map , item id map , item id map ) : tuple of dictionaries"""
return ( self . _user_id_mapping , self . _user_feature_mapping , self . _item_id_mapping , self . _item_feature_mapping , )
def Add ( self , service , method , request , global_params = None ) : """Add a request to the batch . Args : service : A class inheriting base _ api . BaseApiService . method : A string indicated desired method from the service . See the example in the class docstring . request : An input message appropriate for the specified service . method . global _ params : Optional additional parameters to pass into method . PrepareHttpRequest . Returns : None"""
# Retrieve the configs for the desired method and service . method_config = service . GetMethodConfig ( method ) upload_config = service . GetUploadConfig ( method ) # Prepare the HTTP Request . http_request = service . PrepareHttpRequest ( method_config , request , global_params = global_params , upload_config = upload_config ) # Create the request and add it to our master list . api_request = self . ApiCall ( http_request , self . retryable_codes , service , method_config ) self . api_requests . append ( api_request )
def add_rect ( self , width , height , rid = None ) : """Add rectangle of widthxheight dimensions . Arguments : width ( int , float ) : Rectangle width height ( int , float ) : Rectangle height rid : Optional rectangle user id Returns : Rectangle : Rectangle with placemente coordinates None : If the rectangle couldn be placed ."""
assert ( width > 0 and height > 0 ) # Obtain the best section to place the rectangle . section , rotated = self . _select_fittest_section ( width , height ) if not section : return None if rotated : width , height = height , width # Remove section , split and store results self . _sections . remove ( section ) self . _split ( section , width , height ) # Store rectangle in the selected position rect = Rectangle ( section . x , section . y , width , height , rid ) self . rectangles . append ( rect ) return rect
def query_for_observations ( mjd , observable , runid_list ) : """Do a QUERY on the TAP service for all observations that are part of runid , where taken after mjd and have calibration ' observable ' . Schema is at : http : / / www . cadc - ccda . hia - iha . nrc - cnrc . gc . ca / tap / tables mjd : float observable : str ( 2 or 1 ) runid : tuple eg . ( ' 13AP05 ' , ' 13AP06 ' )"""
data = { "QUERY" : ( "SELECT Observation.target_name as TargetName, " "COORD1(CENTROID(Plane.position_bounds)) AS RA," "COORD2(CENTROID(Plane.position_bounds)) AS DEC, " "Plane.time_bounds_lower AS StartDate, " "Plane.time_exposure AS ExposureTime, " "Observation.instrument_name AS Instrument, " "Plane.energy_bandpassName AS Filter, " "Observation.observationID AS dataset_name, " "Observation.proposal_id AS ProposalID, " "Observation.proposal_pi AS PI " "FROM caom2.Observation AS Observation " "JOIN caom2.Plane AS Plane ON " "Observation.obsID = Plane.obsID " "WHERE ( Observation.collection = 'CFHT' ) " "AND Plane.time_bounds_lower > %d " "AND Plane.calibrationLevel=%s " "AND Observation.proposal_id IN %s " ) % ( mjd , observable , str ( runid_list ) ) , "REQUEST" : "doQuery" , "LANG" : "ADQL" , "FORMAT" : "votable" } result = requests . get ( storage . TAP_WEB_SERVICE , params = data , verify = False ) assert isinstance ( result , requests . Response ) logging . debug ( "Doing TAP Query using url: %s" % ( str ( result . url ) ) ) temp_file = tempfile . NamedTemporaryFile ( ) with open ( temp_file . name , 'w' ) as outfile : outfile . write ( result . text ) try : vot = parse ( temp_file . name ) . get_first_table ( ) except Exception as ex : logging . error ( str ( ex ) ) logging . error ( result . text ) raise ex vot . array . sort ( order = 'StartDate' ) t = vot . array temp_file . close ( ) logging . debug ( "Got {} lines from tap query" . format ( len ( t ) ) ) return t
def backup_list ( self , query , detail ) : """Lists base backups and basic information about them"""
import csv from wal_e . storage . base import BackupInfo bl = self . _backup_list ( detail ) # If there is no query , return an exhaustive list , otherwise # find a backup instead . if query is None : bl_iter = bl else : bl_iter = bl . find_all ( query ) # TODO : support switchable formats for difference needs . w_csv = csv . writer ( sys . stdout , dialect = 'excel-tab' ) w_csv . writerow ( BackupInfo . _fields ) for bi in bl_iter : w_csv . writerow ( [ getattr ( bi , k ) for k in BackupInfo . _fields ] ) sys . stdout . flush ( )
def has_segment_tables ( xmldoc , name = None ) : """Return True if the document contains a complete set of segment tables . Returns False otherwise . If name is given and not None then the return value is True only if the document ' s segment tables , if present , contain a segment list by that name ."""
try : names = lsctables . SegmentDefTable . get_table ( xmldoc ) . getColumnByName ( "name" ) lsctables . SegmentTable . get_table ( xmldoc ) lsctables . SegmentSumTable . get_table ( xmldoc ) except ( ValueError , KeyError ) : return False return name is None or name in names
def accepts ( ** schemas ) : """Create a decorator for validating function parameters . Example : : @ accepts ( a = " number " , body = { " + field _ ids " : [ int ] , " is _ ok " : bool } ) def f ( a , body ) : print ( a , body [ " field _ ids " ] , body . get ( " is _ ok " ) ) : param schemas : The schema for validating a given parameter ."""
validate = parse ( schemas ) . validate @ decorator def validating ( func , * args , ** kwargs ) : validate ( inspect . getcallargs ( func , * args , ** kwargs ) , adapt = False ) return func ( * args , ** kwargs ) return validating
def create_ap ( self , args ) : """申请接入点 申请指定配置的接入点资源 。 Args : - args : 请求参数 ( json ) , 参考 http : / / kirk - docs . qiniu . com / apidocs / Returns : 返回一个tuple对象 , 其格式为 ( < result > , < ResponseInfo > ) - result 成功返回申请到的接入点信息 , 失败返回 { " error " : " < errMsg string > " } - ResponseInfo 请求的Response信息"""
url = '{0}/v3/aps' . format ( self . host ) return self . __post ( url , args )
def is_enabled ( self , name ) : """Check if given service name is enabled"""
if self . services and name in self . services : return self . services [ name ] [ 'config' ] == 'enabled' return False
def set_connection ( connection = defaults . sqlalchemy_connection_string_default ) : """Set the connection string for SQLAlchemy : param str connection : SQLAlchemy connection string"""
cfp = defaults . config_file_path config = RawConfigParser ( ) if not os . path . exists ( cfp ) : with open ( cfp , 'w' ) as config_file : config [ 'database' ] = { 'sqlalchemy_connection_string' : connection } config . write ( config_file ) log . info ( 'create configuration file %s' , cfp ) else : config . read ( cfp ) config . set ( 'database' , 'sqlalchemy_connection_string' , connection ) with open ( cfp , 'w' ) as configfile : config . write ( configfile )
def fetch ( self , obj , include_meta = False , chunk_size = None , size = None , extra_info = None ) : """Fetches the object from storage . If ' include _ meta ' is False , only the bytes representing the stored object are returned . Note : if ' chunk _ size ' is defined , you must fully read the object ' s contents before making another request . If ' size ' is specified , only the first ' size ' bytes of the object will be returned . If the object if smaller than ' size ' , the entire object is returned . When ' include _ meta ' is True , what is returned from this method is a 2 - tuple : Element 0 : a dictionary containing metadata about the file . Element 1 : a stream of bytes representing the object ' s contents . The ' extra _ info ' parameter is included for backwards compatibility . It is no longer used at all , and will not be modified with swiftclient info , since swiftclient is not used any more ."""
return self . object_manager . fetch ( obj , include_meta = include_meta , chunk_size = chunk_size , size = size )
def backing_type_for ( value ) : """Returns the DynamoDB backing type for a given python value ' s type 4 - > ' N ' [ ' x ' , 3 ] - > ' L ' {2 , 4 } - > ' SS '"""
if isinstance ( value , str ) : vtype = "S" elif isinstance ( value , bytes ) : vtype = "B" # NOTE : numbers . Number check must come * * AFTER * * bool check since isinstance ( True , numbers . Number ) elif isinstance ( value , bool ) : vtype = "BOOL" elif isinstance ( value , numbers . Number ) : vtype = "N" elif isinstance ( value , dict ) : vtype = "M" elif isinstance ( value , list ) : vtype = "L" elif isinstance ( value , set ) : if not value : vtype = "SS" # doesn ' t matter , Set ( x ) should dump an empty set the same for all x else : inner = next ( iter ( value ) ) if isinstance ( inner , str ) : vtype = "SS" elif isinstance ( inner , bytes ) : vtype = "BS" elif isinstance ( inner , numbers . Number ) : vtype = "NS" else : raise ValueError ( f"Unknown set type for inner value {inner!r}" ) else : raise ValueError ( f"Can't dump unexpected type {type(value)!r} for value {value!r}" ) return vtype
def compare_notebooks ( notebook_expected , notebook_actual , fmt = None , allow_expected_differences = True , raise_on_first_difference = True , compare_outputs = False ) : """Compare the two notebooks , and raise with a meaningful message that explains the differences , if any"""
fmt = long_form_one_format ( fmt ) format_name = fmt . get ( 'format_name' ) # Expected differences allow_filtered_cell_metadata = allow_expected_differences allow_missing_code_cell_metadata = allow_expected_differences and format_name == 'sphinx' allow_missing_markdown_cell_metadata = allow_expected_differences and format_name in [ 'sphinx' , 'spin' ] allow_removed_final_blank_line = allow_expected_differences cell_metadata_filter = notebook_actual . get ( 'jupytext' , { } ) . get ( 'cell_metadata_filter' ) if format_name == 'sphinx' and notebook_actual . cells and notebook_actual . cells [ 0 ] . source == '%matplotlib inline' : notebook_actual . cells = notebook_actual . cells [ 1 : ] # Compare cells type and content test_cell_iter = iter ( notebook_actual . cells ) modified_cells = set ( ) modified_cell_metadata = set ( ) for i , ref_cell in enumerate ( notebook_expected . cells , 1 ) : try : test_cell = next ( test_cell_iter ) except StopIteration : if raise_on_first_difference : raise NotebookDifference ( 'No cell corresponding to {} cell #{}:\n{}' . format ( ref_cell . cell_type , i , ref_cell . source ) ) modified_cells . update ( range ( i , len ( notebook_expected . cells ) + 1 ) ) break ref_lines = [ line for line in ref_cell . source . splitlines ( ) if not _BLANK_LINE . match ( line ) ] test_lines = [ ] # 1 . test cell type if ref_cell . cell_type != test_cell . cell_type : if raise_on_first_difference : raise NotebookDifference ( "Unexpected cell type '{}' for {} cell #{}:\n{}" . format ( test_cell . cell_type , ref_cell . cell_type , i , ref_cell . source ) ) modified_cells . add ( i ) # 2 . test cell metadata if ( ref_cell . cell_type == 'code' and not allow_missing_code_cell_metadata ) or ( ref_cell . cell_type != 'code' and not allow_missing_markdown_cell_metadata ) : if allow_filtered_cell_metadata : ref_cell . metadata = { key : ref_cell . metadata [ key ] for key in ref_cell . metadata if key not in _IGNORE_CELL_METADATA } test_cell . metadata = { key : test_cell . metadata [ key ] for key in test_cell . metadata if key not in _IGNORE_CELL_METADATA } if ref_cell . metadata != test_cell . metadata : if raise_on_first_difference : try : compare ( ref_cell . metadata , test_cell . metadata ) except AssertionError as error : raise NotebookDifference ( "Metadata differ on {} cell #{}: {}\nCell content:\n{}" . format ( test_cell . cell_type , i , str ( error ) , ref_cell . source ) ) else : modified_cell_metadata . update ( set ( test_cell . metadata ) . difference ( ref_cell . metadata ) ) modified_cell_metadata . update ( set ( ref_cell . metadata ) . difference ( test_cell . metadata ) ) for key in set ( ref_cell . metadata ) . intersection ( test_cell . metadata ) : if ref_cell . metadata [ key ] != test_cell . metadata [ key ] : modified_cell_metadata . add ( key ) test_lines . extend ( [ line for line in test_cell . source . splitlines ( ) if not _BLANK_LINE . match ( line ) ] ) # 3 . test cell content if ref_lines != test_lines : if raise_on_first_difference : try : compare ( '\n' . join ( ref_lines ) , '\n' . join ( test_lines ) ) except AssertionError as error : raise NotebookDifference ( "Cell content differ on {} cell #{}: {}" . format ( test_cell . cell_type , i , str ( error ) ) ) else : modified_cells . add ( i ) # 3 . bis test entire cell content if not same_content ( ref_cell . source , test_cell . source , allow_removed_final_blank_line ) : try : compare ( ref_cell . source , test_cell . source ) except AssertionError as error : if raise_on_first_difference : raise NotebookDifference ( "Cell content differ on {} cell #{}: {}" . format ( test_cell . cell_type , i , str ( error ) ) ) modified_cells . add ( i ) if not compare_outputs : continue if ref_cell . cell_type != 'code' : continue ref_cell = filtered_cell ( ref_cell , preserve_outputs = compare_outputs , cell_metadata_filter = cell_metadata_filter ) test_cell = filtered_cell ( test_cell , preserve_outputs = compare_outputs , cell_metadata_filter = cell_metadata_filter ) try : compare ( ref_cell , test_cell ) except AssertionError as error : if raise_on_first_difference : raise NotebookDifference ( "Cell outputs differ on {} cell #{}: {}" . format ( test_cell [ 'cell_type' ] , i , str ( error ) ) ) modified_cells . add ( i ) # More cells in the actual notebook ? remaining_cell_count = 0 while True : try : test_cell = next ( test_cell_iter ) if raise_on_first_difference : raise NotebookDifference ( 'Additional {} cell: {}' . format ( test_cell . cell_type , test_cell . source ) ) remaining_cell_count += 1 except StopIteration : break if remaining_cell_count and not raise_on_first_difference : modified_cells . update ( range ( len ( notebook_expected . cells ) + 1 , len ( notebook_expected . cells ) + 1 + remaining_cell_count ) ) # Compare notebook metadata modified_metadata = False try : compare ( filtered_notebook_metadata ( notebook_expected ) , filtered_notebook_metadata ( notebook_actual ) ) except AssertionError as error : if raise_on_first_difference : raise NotebookDifference ( "Notebook metadata differ: {}" . format ( str ( error ) ) ) modified_metadata = True error = [ ] if modified_cells : error . append ( 'Cells {} differ ({}/{})' . format ( ',' . join ( [ str ( i ) for i in modified_cells ] ) , len ( modified_cells ) , len ( notebook_expected . cells ) ) ) if modified_cell_metadata : error . append ( "Cell metadata '{}' differ" . format ( "', '" . join ( [ str ( i ) for i in modified_cell_metadata ] ) ) ) if modified_metadata : error . append ( 'Notebook metadata differ' ) if error : raise NotebookDifference ( ' | ' . join ( error ) )
def changed ( self , src , path , dest ) : """If ` path ` does not have any parents , it is built . Otherwise , it will attempt to build every parent of ` path ` ( or their parents ) . Output file modification times are taken into account to prevent unnecessary builds ."""
modified = { path : self . parents [ path ] . updated } while True : for path in modified : if self . parents [ path ] : mtime = modified . pop ( path ) for parent in self . parents [ path ] : modified [ parent ] = max ( mtime , self . parents [ parent ] . updated ) break else : break for path in modified : self . _build ( src , path , dest , modified [ path ] )
def ensure_v8_src ( ) : """Ensure that v8 src are presents and up - to - date"""
path = local_path ( 'v8' ) if not os . path . isdir ( path ) : fetch_v8 ( path ) else : update_v8 ( path ) checkout_v8_version ( local_path ( "v8/v8" ) , V8_VERSION ) dependencies_sync ( path )
def check_gap ( xpub , api_key ) : """Call the ' v2 / receive / checkgap ' endpoint and returns the callback log for a given callback URI with parameters . : param str xpub : extended public key : param str api _ key : Blockchain . info API V2 key : return : an int"""
params = { 'key' : api_key , 'xpub' : xpub } resource = 'v2/receive/checkgap?' + util . urlencode ( params ) resp = util . call_api ( resource , base_url = 'https://api.blockchain.info/' ) json_resp = json . loads ( resp ) return json_resp [ 'gap' ]
def seek_missing_num ( sorted_array , num_elements ) : """Python function to determine the missing number in a sorted array . The function takes a sorted input array and its length as a parameter , and finds the missing element . Args : sorted _ array ( list ) : The sorted array of integers . num _ elements ( int ) : The actual number of elements in the array . Returns : int : Missing number from sorted array . Examples : > > > seek _ missing _ num ( [ 1 , 2 , 3 , 5 ] , 4) > > > seek _ missing _ num ( [ 1 , 3 , 4 , 5 ] , 4) > > > seek _ missing _ num ( [ 1 , 2 , 3 , 5 , 6 , 7 ] , 5)"""
start_element = 0 end_element = num_elements - 1 while start_element <= end_element : middle_element = int ( ( start_element + end_element ) / 2 ) if sorted_array [ middle_element ] != middle_element + 1 and sorted_array [ middle_element - 1 ] == middle_element : return middle_element + 1 elif sorted_array [ middle_element ] != middle_element + 1 : end_element = middle_element - 1 else : start_element = middle_element + 1 return - 1
def list_algorithms ( self , page_size = None ) : """Lists the algorithms visible to this client . Algorithms are returned in lexicographical order . : rtype : : class : ` . Algorithm ` iterator"""
params = { } if page_size is not None : params [ 'limit' ] = page_size return pagination . Iterator ( client = self . _client , path = '/mdb/{}/algorithms' . format ( self . _instance ) , params = params , response_class = mdb_pb2 . ListAlgorithmsResponse , items_key = 'algorithm' , item_mapper = Algorithm , )
def get_module_path ( modname ) : """Return module * modname * base path"""
return osp . abspath ( osp . dirname ( sys . modules [ modname ] . __file__ ) )
def class_for_type ( self , object_type ) : """Given an object _ type return the class associated with it ."""
if object_type not in self . class_mapping : raise ZenpyException ( "Unknown object_type: " + str ( object_type ) ) else : return self . class_mapping [ object_type ]
def peek ( self , default = _marker ) : """Return the item that will be next returned from ` ` next ( ) ` ` . Return ` ` default ` ` if there are no items left . If ` ` default ` ` is not provided , raise ` ` StopIteration ` ` ."""
if not hasattr ( self , '_peek' ) : try : self . _peek = next ( self . _it ) except StopIteration : if default is _marker : raise return default return self . _peek
async def generate_wallet_key ( config : Optional [ str ] ) -> str : """Generate wallet master key . Returned key is compatible with " RAW " key derivation method . It allows to avoid expensive key derivation for use cases when wallet keys can be stored in a secure enclave . : param config : ( optional ) key configuration json . " seed " : string , ( optional ) Seed that allows deterministic key creation ( if not set random one will be created ) . Can be UTF - 8 , base64 or hex string . : return : Error code"""
logger = logging . getLogger ( __name__ ) logger . debug ( "generate_wallet_key: >>> config: %r" , config ) if not hasattr ( generate_wallet_key , "cb" ) : logger . debug ( "generate_wallet_key: Creating callback" ) generate_wallet_key . cb = create_cb ( CFUNCTYPE ( None , c_int32 , c_int32 , c_char_p ) ) c_config = c_char_p ( config . encode ( 'utf-8' ) ) if config is not None else None key = await do_call ( 'indy_generate_wallet_key' , c_config , generate_wallet_key . cb ) res = key . decode ( ) logger . debug ( "generate_wallet_key: <<< res: %r" , res ) return res
def _validate_dependencies ( self , dependencies , field , value ) : """{ ' type ' : ( ' dict ' , ' hashable ' , ' list ' ) , ' check _ with ' : ' dependencies ' }"""
if isinstance ( dependencies , _str_type ) or not isinstance ( dependencies , ( Iterable , Mapping ) ) : dependencies = ( dependencies , ) if isinstance ( dependencies , Sequence ) : self . __validate_dependencies_sequence ( dependencies , field ) elif isinstance ( dependencies , Mapping ) : self . __validate_dependencies_mapping ( dependencies , field ) if ( self . document_error_tree . fetch_node_from ( self . schema_path + ( field , 'dependencies' ) ) is not None ) : return True
def verify ( token , key , algorithms , verify = True ) : """Verifies a JWS string ' s signature . Args : token ( str ) : A signed JWS to be verified . key ( str or dict ) : A key to attempt to verify the payload with . Can be individual JWK or JWK set . algorithms ( str or list ) : Valid algorithms that should be used to verify the JWS . Returns : str : The str representation of the payload , assuming the signature is valid . Raises : JWSError : If there is an exception verifying a token . Examples : > > > token = ' eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9 . eyJhIjoiYiJ9 . jiMyrsmD8AoHWeQgmxZ5yq8z0lXS67 _ QGs52AzC8Ru8' > > > jws . verify ( token , ' secret ' , algorithms = ' HS256 ' )"""
header , payload , signing_input , signature = _load ( token ) if verify : _verify_signature ( signing_input , header , signature , key , algorithms ) return payload
def print_gce_info ( zone , project , instance_name , data ) : """outputs information about our Rackspace instance"""
try : instance_info = _get_gce_compute ( ) . instances ( ) . get ( project = project , zone = zone , instance = instance_name ) . execute ( ) log_yellow ( pformat ( instance_info ) ) log_green ( "Instance state: %s" % instance_info [ 'status' ] ) log_green ( "Ip address: %s" % data [ 'ip_address' ] ) except HttpError as e : if e . resp . status != 404 : raise e log_yellow ( "Instance state: DOWN" ) log_green ( "project: %s" % project ) log_green ( "zone: %s" % zone ) log_green ( "disk_name: %s" % instance_name ) log_green ( "user: %s" % data [ 'username' ] ) log_green ( "ssh -i %s %s@%s" % ( env . key_filename , data [ 'username' ] , data [ 'ip_address' ] ) )
def node_dependencies ( context : Context ) : """Updates node . js dependencies"""
args = [ '--loglevel' , { 0 : 'silent' , 1 : 'warn' , 2 : 'info' } [ context . verbosity ] ] if not context . use_colour : args . append ( '--color false' ) args . append ( 'install' ) return context . shell ( 'npm' , * args )
def _run_validators ( self , value ) : """Perform validation on ` ` value ` ` . Raise a : exc : ` ValidationError ` if validation does not succeed ."""
if value in self . empty_values : return errors = [ ] for validator in self . validators : try : validator ( value ) except exceptions . ValidationError as err : if isinstance ( err . messages , dict ) : errors . append ( err . messages ) else : errors . extend ( err . messages ) if errors : raise exceptions . ValidationError ( errors )
def register_by_twine ( self ) : """register via the twine method : return :"""
check_call_no_output ( [ '{}' . format ( self . python ) , 'setup.py' , 'bdist_wheel' , ] ) # at this point there should be only one file in the ' dist ' folder filename = self . get_package_filename ( ) check_call_no_output ( [ 'twine' , 'register' , filename , ] )
def _handle_signals ( self , signum , frame ) : """Handler for all signals . This method must be used to handle all signals for the process . It is responsible for runnin the appropriate signal handlers registered with the ' handle ' method unless they are shutdown signals . Shutdown signals must trigger the ' shutdown ' method ."""
if signum in self . kill_signals : return self . shutdown ( signum ) for handler in self . _handlers [ signum ] : handler ( )
def filter ( self , filters ) : '''Apply filters to the pileup elements , and return a new Pileup with the filtered elements removed . Parameters filters : list of PileupElement - > bool callables A PileupUp element is retained if all filters return True when called on it .'''
new_elements = [ e for e in self . elements if all ( function ( e ) for function in filters ) ] return Pileup ( self . locus , new_elements )
def __verify_minion_publish ( self , load ) : '''Verify that the passed information authorized a minion to execute'''
# Verify that the load is valid if 'peer' not in self . opts : return False if not isinstance ( self . opts [ 'peer' ] , dict ) : return False if any ( key not in load for key in ( 'fun' , 'arg' , 'tgt' , 'ret' , 'id' ) ) : return False # If the command will make a recursive publish don ' t run if re . match ( 'publish.*' , load [ 'fun' ] ) : return False # Check the permissions for this minion perms = [ ] for match in self . opts [ 'peer' ] : if re . match ( match , load [ 'id' ] ) : # This is the list of funcs / modules ! if isinstance ( self . opts [ 'peer' ] [ match ] , list ) : perms . extend ( self . opts [ 'peer' ] [ match ] ) if ',' in load [ 'fun' ] : # ' arg ' : [ [ ' cat ' , ' / proc / cpuinfo ' ] , [ ] , [ ' foo ' ] ] load [ 'fun' ] = load [ 'fun' ] . split ( ',' ) arg_ = [ ] for arg in load [ 'arg' ] : arg_ . append ( arg . split ( ) ) load [ 'arg' ] = arg_ return self . ckminions . auth_check ( perms , load [ 'fun' ] , load [ 'arg' ] , load [ 'tgt' ] , load . get ( 'tgt_type' , 'glob' ) , publish_validate = True )
def list_functions ( mod_name ) : """Lists all functions declared in a module . http : / / stackoverflow . com / a / 1107150/3004221 Args : mod _ name : the module name Returns : A list of functions declared in that module ."""
mod = sys . modules [ mod_name ] return [ func . __name__ for func in mod . __dict__ . values ( ) if is_mod_function ( mod , func ) ]
def from_soup ( self , tag_prof_header , tag_prof_nav ) : """Returns the scraped user data from a twitter user page . : param tag _ prof _ header : captures the left hand part of user info : param tag _ prof _ nav : captures the upper part of user info : return : Returns a User object with captured data via beautifulsoup"""
self . user = tag_prof_header . find ( 'a' , { 'class' : 'ProfileHeaderCard-nameLink u-textInheritColor js-nav' } ) [ 'href' ] . strip ( "/" ) self . full_name = tag_prof_header . find ( 'a' , { 'class' : 'ProfileHeaderCard-nameLink u-textInheritColor js-nav' } ) . text location = tag_prof_header . find ( 'span' , { 'class' : 'ProfileHeaderCard-locationText u-dir' } ) if location is None : self . location = "None" else : self . location = location . text . strip ( ) blog = tag_prof_header . find ( 'span' , { 'class' : "ProfileHeaderCard-urlText u-dir" } ) if blog is None : blog = "None" else : self . blog = blog . text . strip ( ) date_joined = tag_prof_header . find ( 'div' , { 'class' : "ProfileHeaderCard-joinDate" } ) . find ( 'span' , { 'class' : 'ProfileHeaderCard-joinDateText js-tooltip u-dir' } ) [ 'title' ] if date_joined is None : self . data_joined = "Unknown" else : self . date_joined = date_joined . strip ( ) self . id = tag_prof_nav . find ( 'div' , { 'class' : 'ProfileNav' } ) [ 'data-user-id' ] tweets = tag_prof_nav . find ( 'span' , { 'class' : "ProfileNav-value" } ) [ 'data-count' ] if tweets is None : self . tweets = 0 else : self . tweets = int ( tweets ) following = tag_prof_nav . find ( 'li' , { 'class' : "ProfileNav-item ProfileNav-item--following" } ) . find ( 'span' , { 'class' : "ProfileNav-value" } ) [ 'data-count' ] if following is None : following = 0 else : self . following = int ( following ) followers = tag_prof_nav . find ( 'li' , { 'class' : "ProfileNav-item ProfileNav-item--followers" } ) . find ( 'span' , { 'class' : "ProfileNav-value" } ) [ 'data-count' ] if followers is None : self . followers = 0 else : self . followers = int ( followers ) likes = tag_prof_nav . find ( 'li' , { 'class' : "ProfileNav-item ProfileNav-item--favorites" } ) . find ( 'span' , { 'class' : "ProfileNav-value" } ) [ 'data-count' ] if likes is None : self . likes = 0 else : self . likes = int ( likes ) lists = tag_prof_nav . find ( 'li' , { 'class' : "ProfileNav-item ProfileNav-item--lists" } ) if lists is None : self . lists = 0 elif lists . find ( 'span' , { 'class' : "ProfileNav-value" } ) is None : self . lists = 0 else : lists = lists . find ( 'span' , { 'class' : "ProfileNav-value" } ) . text self . lists = int ( lists ) return ( self )
def main ( ) : """Runs test imaging pipeline using MPI ."""
# Check command line arguments . if len ( sys . argv ) < 2 : raise RuntimeError ( 'Usage: mpiexec -n <np> ' 'python mpi_imager_test.py <settings_file> <dir>' ) # Get the MPI communicator and initialise broadcast variables . comm = MPI . COMM_WORLD settings = None inputs = None grid_weights = None # Create log . log = logging . getLogger ( ) log . setLevel ( logging . DEBUG ) if len ( log . handlers ) == 0 : log . addHandler ( logging . StreamHandler ( sys . stdout ) ) if comm . Get_rank ( ) == 0 : # Load pipeline settings . with open ( sys . argv [ 1 ] ) as f : settings = json . load ( f ) # Get a list of input Measurement Sets to process . data_dir = str ( sys . argv [ 2 ] ) inputs = glob ( join ( data_dir , '*.ms' ) ) + glob ( join ( data_dir , '*.MS' ) ) inputs = filter ( None , inputs ) log . info ( 'Found input Measurement Sets: %s' , ', ' . join ( inputs ) ) # Distribute the list of Measurement Sets among processors . inputs = chunks ( inputs , comm . Get_size ( ) ) # Broadcast settings and scatter list of input files . comm . barrier ( ) settings = comm . bcast ( settings ) inputs = comm . scatter ( inputs ) # Record which file ( s ) this node is working on . log . debug ( 'Rank %d, processing [%s]' , comm . Get_rank ( ) , ', ' . join ( inputs ) ) # Create an imager and configure it . precision = settings [ 'precision' ] imager = oskar . Imager ( precision ) for key , value in settings [ 'imager' ] . items ( ) : setattr ( imager , key , value ) # Allocate a local visibility grid . grid_norm = 0. grid_dim = [ imager . plane_size , imager . plane_size ] grid_data = numpy . zeros ( grid_dim , dtype = 'c8' if precision == 'single' else 'c16' ) # Process data according to mode . if settings [ 'combine' ] : if imager . weighting == 'Uniform' or imager . algorithm == 'W-projection' : # If necessary , generate a local weights grid . local_weights = None if imager . weighting == 'Uniform' : grid_weights = numpy . zeros ( grid_dim , dtype = precision ) local_weights = numpy . zeros ( grid_dim , dtype = precision ) # Do a first pass for uniform weighting or W - projection . imager . coords_only = True for f in inputs : log . info ( 'Reading coordinates from %s' , f ) process_input_data ( f , imager , None , 0.0 , local_weights ) imager . coords_only = False # Get maximum number of W - projection planes required . num_w_planes = imager . num_w_planes num_w_planes = comm . allreduce ( num_w_planes , op = MPI . MAX ) imager . num_w_planes = num_w_planes # Combine ( reduce ) weights grids , and broadcast the result . if local_weights is not None : comm . Allreduce ( local_weights , grid_weights , op = MPI . SUM ) # Populate the local visibility grid . for f in inputs : log . info ( 'Reading visibilities from %s' , f ) grid_norm = process_input_data ( f , imager , grid_data , grid_norm , grid_weights ) # Combine ( reduce ) visibility grids . grid = numpy . zeros_like ( grid_data ) comm . Reduce ( grid_data , grid , op = MPI . SUM ) grid_norm = comm . reduce ( grid_norm , op = MPI . SUM ) # Finalise grid and save image . if comm . Get_rank ( ) == 0 : save_image ( imager , grid , grid_norm , settings [ 'output_file' ] ) log . info ( 'Finished. Output file is %s' , settings [ 'output_file' ] ) else : for f in inputs : # Clear the grid . grid_norm = 0. grid_data . fill ( 0 ) if imager . weighting == 'Uniform' : grid_weights = numpy . zeros ( grid_dim , dtype = precision ) # Do a first pass for uniform weighting or W - projection . if imager . weighting == 'Uniform' or imager . algorithm == 'W-projection' : imager . coords_only = True log . info ( 'Reading coordinates from %s' , f ) process_input_data ( f , imager , None , 0.0 , grid_weights ) imager . coords_only = False # Populate the local visibility grid . log . info ( 'Reading visibilities from %s' , f ) grid_norm = process_input_data ( f , imager , grid_data , grid_norm , grid_weights ) # Save image by finalising grid . output_file = splitext ( f ) [ 0 ] + '.fits' save_image ( imager , grid_data , grid_norm , output_file ) log . info ( 'Finished. Output file is %s' , output_file )
def new_type ( type_name : str , prefix : str or None = None ) -> str : """Creates a resource type with optionally a prefix . Using the rules of JSON - LD , we use prefixes to disambiguate between different types with the same name : one can Accept a device or a project . In eReuse . org there are different events with the same names , in linked - data terms they have different URI . In eReuse . org , we solve this with the following : " @ type " : " devices : Accept " / / the URI for these events is ' devices / events / accept ' " @ type " : " projects : Accept " / / the URI for these events is ' projects / events / accept Type is only used in events , when there are ambiguities . The rest of " @ type " : " devices : Accept " " @ type " : " Accept " But these not : " @ type " : " projects : Accept " / / it is an event from a project " @ type " : " Accept " / / it is an event from a device"""
if Naming . TYPE_PREFIX in type_name : raise TypeError ( 'Cannot create new type: type {} is already prefixed.' . format ( type_name ) ) prefix = ( prefix + Naming . TYPE_PREFIX ) if prefix is not None else '' return prefix + type_name
def groupby2 ( records , kfield , vfield ) : """: param records : a sequence of records with positional or named fields : param kfield : the index / name / tuple specifying the field to use as a key : param vfield : the index / name / tuple specifying the field to use as a value : returns : an list of pairs of the form ( key , [ value , . . . ] ) . > > > groupby2 ( [ ' A1 ' , ' A2 ' , ' B1 ' , ' B2 ' , ' B3 ' ] , 0 , 1) [ ( ' A ' , [ ' 1 ' , ' 2 ' ] ) , ( ' B ' , [ ' 1 ' , ' 2 ' , ' 3 ' ] ) ] Here is an example where the keyfield is a tuple of integers : > > > groupby2 ( [ ' A11 ' , ' A12 ' , ' B11 ' , ' B21 ' ] , ( 0 , 1 ) , 2) [ ( ( ' A ' , ' 1 ' ) , [ ' 1 ' , ' 2 ' ] ) , ( ( ' B ' , ' 1 ' ) , [ ' 1 ' ] ) , ( ( ' B ' , ' 2 ' ) , [ ' 1 ' ] ) ]"""
if isinstance ( kfield , tuple ) : kgetter = operator . itemgetter ( * kfield ) else : kgetter = operator . itemgetter ( kfield ) if isinstance ( vfield , tuple ) : vgetter = operator . itemgetter ( * vfield ) else : vgetter = operator . itemgetter ( vfield ) dic = groupby ( records , kgetter , lambda rows : [ vgetter ( r ) for r in rows ] ) return list ( dic . items ( ) )
def network_interface_present ( name , ip_configurations , subnet , virtual_network , resource_group , tags = None , virtual_machine = None , network_security_group = None , dns_settings = None , mac_address = None , primary = None , enable_accelerated_networking = None , enable_ip_forwarding = None , connection_auth = None , ** kwargs ) : '''. . versionadded : : 2019.2.0 Ensure a network interface exists . : param name : Name of the network interface . : param ip _ configurations : A list of dictionaries representing valid NetworkInterfaceIPConfiguration objects . The ' name ' key is required at minimum . At least one IP Configuration must be present . : param subnet : Name of the existing subnet assigned to the network interface . : param virtual _ network : Name of the existing virtual network containing the subnet . : param resource _ group : The resource group assigned to the virtual network . : param tags : A dictionary of strings can be passed as tag metadata to the network interface object . : param network _ security _ group : The name of the existing network security group to assign to the network interface . : param virtual _ machine : The name of the existing virtual machine to assign to the network interface . : param dns _ settings : An optional dictionary representing a valid NetworkInterfaceDnsSettings object . Valid parameters are : - ` ` dns _ servers ` ` : List of DNS server IP addresses . Use ' AzureProvidedDNS ' to switch to Azure provided DNS resolution . ' AzureProvidedDNS ' value cannot be combined with other IPs , it must be the only value in dns _ servers collection . - ` ` internal _ dns _ name _ label ` ` : Relative DNS name for this NIC used for internal communications between VMs in the same virtual network . - ` ` internal _ fqdn ` ` : Fully qualified DNS name supporting internal communications between VMs in the same virtual network . - ` ` internal _ domain _ name _ suffix ` ` : Even if internal _ dns _ name _ label is not specified , a DNS entry is created for the primary NIC of the VM . This DNS name can be constructed by concatenating the VM name with the value of internal _ domain _ name _ suffix . : param mac _ address : Optional string containing the MAC address of the network interface . : param primary : Optional boolean allowing the interface to be set as the primary network interface on a virtual machine with multiple interfaces attached . : param enable _ accelerated _ networking : Optional boolean indicating whether accelerated networking should be enabled for the interface . : param enable _ ip _ forwarding : Optional boolean indicating whether IP forwarding should be enabled for the interface . : param connection _ auth : A dict with subscription and authentication parameters to be used in connecting to the Azure Resource Manager API . Example usage : . . code - block : : yaml Ensure network interface exists : azurearm _ network . network _ interface _ present : - name : iface1 - subnet : vnet1 _ sn1 - virtual _ network : vnet1 - resource _ group : group1 - ip _ configurations : - name : iface1 _ ipc1 public _ ip _ address : pub _ ip2 - dns _ settings : internal _ dns _ name _ label : decisionlab - int - test - label - primary : True - enable _ accelerated _ networking : True - enable _ ip _ forwarding : False - network _ security _ group : nsg1 - connection _ auth : { { profile } } - require : - azurearm _ network : Ensure subnet exists - azurearm _ network : Ensure network security group exists - azurearm _ network : Ensure another public IP exists'''
ret = { 'name' : name , 'result' : False , 'comment' : '' , 'changes' : { } } if not isinstance ( connection_auth , dict ) : ret [ 'comment' ] = 'Connection information must be specified via connection_auth dictionary!' return ret iface = __salt__ [ 'azurearm_network.network_interface_get' ] ( name , resource_group , azurearm_log_level = 'info' , ** connection_auth ) if 'error' not in iface : # tag changes tag_changes = __utils__ [ 'dictdiffer.deep_diff' ] ( iface . get ( 'tags' , { } ) , tags or { } ) if tag_changes : ret [ 'changes' ] [ 'tags' ] = tag_changes # mac _ address changes if mac_address and ( mac_address != iface . get ( 'mac_address' ) ) : ret [ 'changes' ] [ 'mac_address' ] = { 'old' : iface . get ( 'mac_address' ) , 'new' : mac_address } # primary changes if primary is not None : if primary != iface . get ( 'primary' , True ) : ret [ 'changes' ] [ 'primary' ] = { 'old' : iface . get ( 'primary' ) , 'new' : primary } # enable _ accelerated _ networking changes if enable_accelerated_networking is not None : if enable_accelerated_networking != iface . get ( 'enable_accelerated_networking' ) : ret [ 'changes' ] [ 'enable_accelerated_networking' ] = { 'old' : iface . get ( 'enable_accelerated_networking' ) , 'new' : enable_accelerated_networking } # enable _ ip _ forwarding changes if enable_ip_forwarding is not None : if enable_ip_forwarding != iface . get ( 'enable_ip_forwarding' ) : ret [ 'changes' ] [ 'enable_ip_forwarding' ] = { 'old' : iface . get ( 'enable_ip_forwarding' ) , 'new' : enable_ip_forwarding } # network _ security _ group changes nsg_name = None if iface . get ( 'network_security_group' ) : nsg_name = iface [ 'network_security_group' ] [ 'id' ] . split ( '/' ) [ - 1 ] if network_security_group and ( network_security_group != nsg_name ) : ret [ 'changes' ] [ 'network_security_group' ] = { 'old' : nsg_name , 'new' : network_security_group } # virtual _ machine changes vm_name = None if iface . get ( 'virtual_machine' ) : vm_name = iface [ 'virtual_machine' ] [ 'id' ] . split ( '/' ) [ - 1 ] if virtual_machine and ( virtual_machine != vm_name ) : ret [ 'changes' ] [ 'virtual_machine' ] = { 'old' : vm_name , 'new' : virtual_machine } # dns _ settings changes if dns_settings : if not isinstance ( dns_settings , dict ) : ret [ 'comment' ] = 'DNS settings must be provided as a dictionary!' return ret for key in dns_settings : if dns_settings [ key ] . lower ( ) != iface . get ( 'dns_settings' , { } ) . get ( key , '' ) . lower ( ) : ret [ 'changes' ] [ 'dns_settings' ] = { 'old' : iface . get ( 'dns_settings' ) , 'new' : dns_settings } break # ip _ configurations changes comp_ret = __utils__ [ 'azurearm.compare_list_of_dicts' ] ( iface . get ( 'ip_configurations' , [ ] ) , ip_configurations , [ 'public_ip_address' , 'subnet' ] ) if comp_ret . get ( 'comment' ) : ret [ 'comment' ] = '"ip_configurations" {0}' . format ( comp_ret [ 'comment' ] ) return ret if comp_ret . get ( 'changes' ) : ret [ 'changes' ] [ 'ip_configurations' ] = comp_ret [ 'changes' ] if not ret [ 'changes' ] : ret [ 'result' ] = True ret [ 'comment' ] = 'Network interface {0} is already present.' . format ( name ) return ret if __opts__ [ 'test' ] : ret [ 'result' ] = None ret [ 'comment' ] = 'Network interface {0} would be updated.' . format ( name ) return ret else : ret [ 'changes' ] = { 'old' : { } , 'new' : { 'name' : name , 'ip_configurations' : ip_configurations , 'dns_settings' : dns_settings , 'network_security_group' : network_security_group , 'virtual_machine' : virtual_machine , 'enable_accelerated_networking' : enable_accelerated_networking , 'enable_ip_forwarding' : enable_ip_forwarding , 'mac_address' : mac_address , 'primary' : primary , 'tags' : tags , } } if __opts__ [ 'test' ] : ret [ 'comment' ] = 'Network interface {0} would be created.' . format ( name ) ret [ 'result' ] = None return ret iface_kwargs = kwargs . copy ( ) iface_kwargs . update ( connection_auth ) iface = __salt__ [ 'azurearm_network.network_interface_create_or_update' ] ( name = name , subnet = subnet , virtual_network = virtual_network , resource_group = resource_group , ip_configurations = ip_configurations , dns_settings = dns_settings , enable_accelerated_networking = enable_accelerated_networking , enable_ip_forwarding = enable_ip_forwarding , mac_address = mac_address , primary = primary , network_security_group = network_security_group , virtual_machine = virtual_machine , tags = tags , ** iface_kwargs ) if 'error' not in iface : ret [ 'result' ] = True ret [ 'comment' ] = 'Network interface {0} has been created.' . format ( name ) return ret ret [ 'comment' ] = 'Failed to create network interface {0}! ({1})' . format ( name , iface . get ( 'error' ) ) return ret
def compact_allele_name ( raw_allele ) : """Turn HLA - A * 02:01 into A0201 or H - 2 - D - b into H - 2Db or HLA - DPA1*01:05 - DPB1*100:01 into DPA10105 - DPB110001"""
parsed_alleles = parse_classi_or_classii_allele_name ( raw_allele ) normalized_list = [ ] if len ( parsed_alleles ) == 2 : alpha , beta = parsed_alleles # by convention the alpha allelle is omitted since it ' s assumed # to be DRA1*01:01 if alpha == _DRA1_0101 : parsed_alleles = [ beta ] for parsed_allele in parsed_alleles : if len ( parsed_allele . allele_family ) > 0 : normalized_list . append ( "%s%s%s" % ( parsed_allele . gene , parsed_allele . allele_family , parsed_allele . allele_code ) ) else : # mice don ' t have allele families normalized_list . append ( "%s%s" % ( parsed_allele . gene , parsed_allele . allele_code ) ) return "-" . join ( normalized_list )
def _header ( self , pam = False ) : """Return file header as byte string ."""
if pam or self . magicnum == b'P7' : header = "\n" . join ( ( "P7" , "HEIGHT %i" % self . height , "WIDTH %i" % self . width , "DEPTH %i" % self . depth , "MAXVAL %i" % self . maxval , "\n" . join ( "TUPLTYPE %s" % unicode ( i ) for i in self . tupltypes ) , "ENDHDR\n" ) ) elif self . maxval == 1 : header = "P4 %i %i\n" % ( self . width , self . height ) elif self . depth == 1 : header = "P5 %i %i %i\n" % ( self . width , self . height , self . maxval ) else : header = "P6 %i %i %i\n" % ( self . width , self . height , self . maxval ) if sys . version_info [ 0 ] > 2 : header = bytes ( header , 'ascii' ) return header
def generate_binary_ulid ( timestamp = None , monotonic = False ) : """Generate the bytes for an ULID . : param timestamp : An optional timestamp override . If ` None ` , the current time is used . : type timestamp : int | float | datetime . datetime | None : param monotonic : Attempt to ensure ULIDs are monotonically increasing . Monotonic behavior is not guaranteed when used from multiple threads . : type monotonic : bool : return : Bytestring of length 16. : rtype : bytes"""
global _last_entropy , _last_timestamp if timestamp is None : timestamp = time . time ( ) elif isinstance ( timestamp , datetime . datetime ) : timestamp = calendar . timegm ( timestamp . utctimetuple ( ) ) ts = int ( timestamp * 1000.0 ) ts_bytes = _to_binary ( ( ts >> shift ) & 0xFF for shift in ( 40 , 32 , 24 , 16 , 8 , 0 ) ) entropy = os . urandom ( 10 ) if monotonic and _last_timestamp == ts and _last_entropy is not None : while entropy < _last_entropy : entropy = os . urandom ( 10 ) _last_entropy = entropy _last_timestamp = ts return ts_bytes + entropy
def _updateFrame ( self ) : """Updates the frame for the given sender ."""
mov = self . movie ( ) if mov : self . setIcon ( QtGui . QIcon ( mov . currentPixmap ( ) ) )
def addFreetextAnnot ( self , rect , text , fontsize = 12 , fontname = None , color = None , rotate = 0 ) : """Add a ' FreeText ' annotation in rectangle ' rect ' ."""
CheckParent ( self ) val = _fitz . Page_addFreetextAnnot ( self , rect , text , fontsize , fontname , color , rotate ) if not val : return val . thisown = True val . parent = weakref . proxy ( self ) self . _annot_refs [ id ( val ) ] = val return val
def post_message ( message , chat_id = None , token = None ) : '''Send a message to a Telegram chat . : param message : The message to send to the Telegram chat . : param chat _ id : ( optional ) The Telegram chat id . : param token : ( optional ) The Telegram API token . : return : Boolean if message was sent successfully . CLI Example : . . code - block : : bash salt ' * ' telegram . post _ message message = " Hello Telegram ! "'''
if not chat_id : chat_id = _get_chat_id ( ) if not token : token = _get_token ( ) if not message : log . error ( 'message is a required option.' ) return _post_message ( message = message , chat_id = chat_id , token = token )
def cache_key ( self , template_name , skip = None ) : """Generate a cache key for the template name , dirs , and skip . If skip is provided , only origins that match template _ name are included in the cache key . This ensures each template is only parsed and cached once if contained in different extend chains like : x - > a - > a y - > a - > a z - > a - > a"""
dirs_prefix = '' skip_prefix = '' tenant_prefix = '' if skip : matching = [ origin . name for origin in skip if origin . template_name == template_name ] if matching : skip_prefix = self . generate_hash ( matching ) if connection . tenant : tenant_prefix = str ( connection . tenant . pk ) return '-' . join ( s for s in ( str ( template_name ) , tenant_prefix , skip_prefix , dirs_prefix ) if s )
def make_vslc_label ( self , gene_label , allele1_label , allele2_label ) : """Make a Variant Single Locus Complement ( VSLC ) in monarch - style . : param gene _ label : : param allele1 _ label : : param allele2 _ label : : return :"""
vslc_label = '' if gene_label is None and allele1_label is None and allele2_label is None : LOG . error ( "Not enough info to make vslc label" ) return None top = self . make_variant_locus_label ( gene_label , allele1_label ) bottom = '' if allele2_label is not None : bottom = self . make_variant_locus_label ( gene_label , allele2_label ) vslc_label = '/' . join ( ( top , bottom ) ) return vslc_label
def background_task_method ( task ) : """Decorate an object method as a background task ( called with help of gearman ) . You have to create a task which will handle the gearman call . The method arguments will be encoded as JSON . : param task : name of the task : type task : str : return : decorated function"""
# TODO ako vysledok vrat nejaky JOB ID , aby sa dalo checkovat na pozadi # TODO vytvorit este vseobecny background _ task nielen pre metody def decorator_fn ( fn ) : gearman = None @ inject ( config = Config ) def gearman_connect ( config ) : # type : ( Config ) - > GearmanService if 'GEARMAN' not in config or 'host' not in config [ 'GEARMAN' ] or 'GEARMAN_TASK_NAME' not in config : raise Exception ( "Missing gearman settings (trying to use backgorund task)" ) gearman_host = ( config [ 'GEARMAN' ] [ 'host' ] , config [ 'GEARMAN' ] [ 'port' ] ) if config [ 'GEARMAN' ] [ 'port' ] else config [ 'GEARMAN' ] [ 'host' ] gearman_service = GearmanService ( { 'HOST' : [ gearman_host ] , 'TASK_NAME' : config [ 'GEARMAN_TASK_NAME' ] } ) gearman_service . set_blocking ( False ) return gearman_service def get_gearman_client ( ) : # type : ( ) - > GearmanService global gearman if not gearman : gearman = gearman_connect ( ) return gearman @ wraps ( fn ) def background_task_decorator ( * args , ** kwargs ) : # The first of the args is self . t = RawTask ( task , dict ( method = fn . __name__ , args = args [ 1 : ] , kwargs = kwargs ) ) t_result = get_gearman_client ( ) . call ( t , [ JsonTask ] ) return t_result . result background_task_decorator . _background_fn = fn return background_task_decorator return decorator_fn
def from_google ( cls , google_x , google_y , zoom ) : """Creates a tile from Google format X Y and zoom"""
max_tile = ( 2 ** zoom ) - 1 assert 0 <= google_x <= max_tile , 'Google X needs to be a value between 0 and (2^zoom) -1.' assert 0 <= google_y <= max_tile , 'Google Y needs to be a value between 0 and (2^zoom) -1.' return cls ( tms_x = google_x , tms_y = ( 2 ** zoom - 1 ) - google_y , zoom = zoom )
def is_local_maximum ( image , labels , footprint ) : '''Return a boolean array of points that are local maxima image - intensity image labels - find maxima only within labels . Zero is reserved for background . footprint - binary mask indicating the neighborhood to be examined must be a matrix with odd dimensions , center is taken to be the point in question .'''
assert ( ( np . all ( footprint . shape ) & 1 ) == 1 ) footprint = ( footprint != 0 ) footprint_extent = ( np . array ( footprint . shape ) - 1 ) // 2 if np . all ( footprint_extent == 0 ) : return labels > 0 result = ( labels > 0 ) . copy ( ) # Create a labels matrix with zeros at the borders that might be # hit by the footprint . big_labels = np . zeros ( np . array ( labels . shape ) + footprint_extent * 2 , labels . dtype ) big_labels [ [ slice ( fe , - fe ) for fe in footprint_extent ] ] = labels # Find the relative indexes of each footprint element image_strides = np . array ( image . strides ) // image . dtype . itemsize big_strides = np . array ( big_labels . strides ) // big_labels . dtype . itemsize result_strides = np . array ( result . strides ) // result . dtype . itemsize footprint_offsets = np . mgrid [ [ slice ( - fe , fe + 1 ) for fe in footprint_extent ] ] footprint_offsets = footprint_offsets [ : , footprint ] # Order by distance , low to high and get rid of center pt . d = np . sum ( footprint_offsets ** 2 , 0 ) footprint_offsets , d = footprint_offsets [ : , d > 0 ] , d [ d > 0 ] footprint_offsets = footprint_offsets [ : , np . lexsort ( [ d ] ) ] fp_image_offsets = np . sum ( image_strides [ : , np . newaxis ] * footprint_offsets , 0 ) fp_big_offsets = np . sum ( big_strides [ : , np . newaxis ] * footprint_offsets , 0 ) # Get the index of each labeled pixel in the image and big _ labels arrays indexes = np . mgrid [ [ slice ( 0 , x ) for x in labels . shape ] ] [ : , labels > 0 ] image_indexes = np . sum ( image_strides [ : , np . newaxis ] * indexes , 0 ) big_indexes = np . sum ( big_strides [ : , np . newaxis ] * ( indexes + footprint_extent [ : , np . newaxis ] ) , 0 ) result_indexes = np . sum ( result_strides [ : , np . newaxis ] * indexes , 0 ) # Now operate on the raveled images big_labels_raveled = big_labels . ravel ( ) image_raveled = image . ravel ( ) result_raveled = result . ravel ( ) # A hit is a hit if the label at the offset matches the label at the pixel # and if the intensity at the pixel is greater or equal to the intensity # at the offset . for fp_image_offset , fp_big_offset in zip ( fp_image_offsets , fp_big_offsets ) : same_label = ( big_labels_raveled [ big_indexes + fp_big_offset ] == big_labels_raveled [ big_indexes ] ) less_than = ( image_raveled [ image_indexes [ same_label ] ] < image_raveled [ image_indexes [ same_label ] + fp_image_offset ] ) mask = ~ same_label mask [ same_label ] = ~ less_than result_raveled [ result_indexes [ ~ mask ] ] = False result_indexes = result_indexes [ mask ] big_indexes = big_indexes [ mask ] image_indexes = image_indexes [ mask ] return result
def list_saml_providers ( region = None , key = None , keyid = None , profile = None ) : '''List SAML providers . CLI Example : . . code - block : : bash salt myminion boto _ iam . list _ saml _ providers'''
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile ) try : providers = [ ] info = conn . list_saml_providers ( ) for arn in info [ 'list_saml_providers_response' ] [ 'list_saml_providers_result' ] [ 'saml_provider_list' ] : providers . append ( arn [ 'arn' ] . rsplit ( '/' , 1 ) [ 1 ] ) return providers except boto . exception . BotoServerError as e : log . debug ( __utils__ [ 'boto.get_error' ] ( e ) ) log . error ( 'Failed to get list of SAML providers.' ) return False
def _mapped_populations ( mdl1 , mdl2 ) : """Method to get the populations for states in mdl 1 from populations inferred in mdl 2 . Resorts to 0 if population is not present ."""
return_vect = np . zeros ( mdl1 . n_states_ ) for i in range ( mdl1 . n_states_ ) : try : # there has to be a better way to do this mdl1_unmapped = mdl1 . inverse_transform ( [ i ] ) [ 0 ] [ 0 ] mdl2_mapped = mdl2 . mapping_ [ mdl1_unmapped ] return_vect [ i ] = mdl2 . populations_ [ mdl2_mapped ] except : pass return return_vect
def plan_validation ( user , plan = None , on_activation = False ) : """Validates validator that represents quotas in a given system : param user : : param plan : : return :"""
if plan is None : # if plan is not given , the default is to use current plan of the user plan = user . userplan . plan quota_dict = plan . get_quota_dict ( ) validators = getattr ( settings , 'PLANS_VALIDATORS' , { } ) validators = import_name ( validators ) errors = { 'required_to_activate' : [ ] , 'other' : [ ] , } for quota in validators : validator = import_name ( validators [ quota ] ) if on_activation : validator . on_activation ( user , quota_dict ) else : try : validator ( user , quota_dict ) except ValidationError as e : if validator . required_to_activate : errors [ 'required_to_activate' ] . extend ( e . messages ) else : errors [ 'other' ] . extend ( e . messages ) return errors
def get_index ( cls ) : """Gets the index for this model . The index for this model is specified in ` settings . ES _ INDEXES ` which is a dict of mapping type - > index name . By default , this uses ` . get _ mapping _ type ( ) ` to determine the mapping and returns the value in ` settings . ES _ INDEXES ` for that or ` ` settings . ES _ INDEXES [ ' default ' ] ` ` . Override this to compute it differently . : returns : index name to use"""
indexes = settings . ES_INDEXES index = indexes . get ( cls . get_mapping_type_name ( ) ) or indexes [ 'default' ] if not ( isinstance ( index , six . string_types ) ) : # FIXME - not sure what to do here , but we only want one # index and somehow this isn ' t one index . index = index [ 0 ] return index
def _makedirs ( name , mode = 0o777 , exist_ok = False ) : """Source : https : / / github . com / python / cpython / blob / 3ce3dea60646d8a5a1c952469a2eb65f937875b3 / Lib / os . py # L196 - L226"""
head , tail = os . path . split ( name ) if not tail : head , tail = os . path . split ( head ) if head and tail and not os . path . exists ( head ) : try : _makedirs ( head , exist_ok = exist_ok ) except OSError as e : if e . errno != errno . EEXIST : raise cdir = os . curdir if isinstance ( tail , bytes ) : cdir = bytes ( os . curdir , "ASCII" ) if tail == cdir : return try : os . mkdir ( name , mode ) except OSError : if not exist_ok or not os . path . isdir ( name ) : raise
def generate_boosted_machine ( self ) : """generate _ boosted _ machine ( ) - > strong Creates a single strong classifier from this cascade by concatenating all strong classifiers . * * Returns : * * ` ` strong ` ` : : py : class : ` bob . learn . boosting . BoostedMachine ` The strong classifier as a combination of all classifiers in this cascade ."""
strong = bob . learn . boosting . BoostedMachine ( ) for machine , index in zip ( self . cascade , self . indices ) : weak = machine . weak_machines weights = machine . weights for i in range ( len ( weak ) ) : strong . add_weak_machine ( weak [ i ] , weights [ i ] ) return strong
def get_op_restrictions_by_content_operation ( self , content_id , operation_key , expand = None , start = None , limit = None , callback = None ) : """Returns info about all restrictions of given operation . : param content _ id ( string ) : The content ID to query on . : param operation _ key ( string ) : The operation key to query on . : param expand ( string ) : OPTIONAL : A comma separated list of properties to expand on the content properties . Default : Again , this is unclear / inconsistent when reading documentation . The REST documentation claims that both are default : " group " " restrictions . user , restrictions . group " : param start ( int ) : Pagination start count . : param limit ( int ) : Pagination return count limit . : param callback : OPTIONAL : The callback to execute on the resulting data , before the method returns . Default : None ( no callback , raw data returned ) . : return : The JSON data returned from the content / { id } / restriction / byOperation / { operationKey } endpoint , or the results of the callback . Will raise requests . HTTPError on bad input , potentially ."""
params = { } if expand : params [ "expand" ] = expand if start is not None : params [ "start" ] = int ( start ) if limit is not None : params [ "limit" ] = int ( limit ) return self . _service_get_request ( "rest/api/content/{id}/restriction/byOperation/{opkey}" "" . format ( id = content_id , opkey = operation_key ) , params = params , callback = callback )
def add_domain_name ( list_name , item_name ) : '''Adds a domain name to a domain name list . list _ name ( str ) : The name of the specific policy domain name list to append to . item _ name ( str ) : The domain name to append . CLI Example : . . code - block : : bash salt ' * ' bluecoat _ sslv . add _ domain _ name MyDomainName foo . bar . com'''
payload = { "jsonrpc" : "2.0" , "id" : "ID0" , "method" : "add_policy_domain_names" , "params" : [ list_name , { "item_name" : item_name } ] } response = __proxy__ [ 'bluecoat_sslv.call' ] ( payload , True ) return _validate_change_result ( response )
def remove_frequencies ( self , fmin , fmax ) : """Remove frequencies from the dataset"""
self . data . query ( 'frequency > {0} and frequency < {1}' . format ( fmin , fmax ) , inplace = True ) g = self . data . groupby ( 'frequency' ) print ( 'Remaining frequencies:' ) print ( sorted ( g . groups . keys ( ) ) )
def create ( self , key , value ) : """Atomically create the given key only if the key doesn ' t exist . This verifies that the create _ revision of a key equales to 0 , then creates the key with the value . This operation takes place in a transaction . : param key : key in etcd to create : param value : value of the key : type value : bytes or string : returns : status of transaction , ` ` True ` ` if the create was successful , ` ` False ` ` otherwise : rtype : bool"""
base64_key = _encode ( key ) base64_value = _encode ( value ) txn = { 'compare' : [ { 'key' : base64_key , 'result' : 'EQUAL' , 'target' : 'CREATE' , 'create_revision' : 0 } ] , 'success' : [ { 'request_put' : { 'key' : base64_key , 'value' : base64_value , } } ] , 'failure' : [ ] } result = self . transaction ( txn ) if 'succeeded' in result : return result [ 'succeeded' ] return False
def removeDataFrameColumns ( self , columns ) : """Removes columns from the dataframe . : param columns : [ ( int , str ) ] : return : ( bool ) True on success , False on failure ."""
if not self . editable : return False if columns : deleted = 0 errored = False for ( position , name ) in columns : position = position - deleted if position < 0 : position = 0 self . beginRemoveColumns ( QtCore . QModelIndex ( ) , position , position ) try : self . _dataFrame . drop ( name , axis = 1 , inplace = True ) except ValueError as e : errored = True continue self . endRemoveColumns ( ) deleted += 1 self . dataChanged . emit ( ) if errored : return False else : return True return False
def arguments_from_optionable ( parser , component , prefix = "" ) : """Add argparse arguments from all options of one : class : ` Optionable ` > > > # Let ' s build a dummy optionable component : > > > comp = Optionable ( ) > > > comp . add _ option ( " num " , Numeric ( default = 1 , max = 12 , help = " An exemple of option " ) ) > > > comp . add _ option ( " title " , Text ( help = " The title of the title " ) ) > > > comp . add _ option ( " ok " , Boolean ( help = " is it ok ? " , default = True ) ) > > > comp . add _ option ( " cool " , Boolean ( help = " is it cool ? " , default = False ) ) > > > # one can then register all the options of this component to a arg parser > > > parser = argparse . ArgumentParser ( prog = " PROG " ) > > > arguments _ from _ optionable ( parser , comp ) > > > parser . print _ help ( ) usage : PROG [ - h ] [ - - num NUM ] [ - - title TITLE ] [ - - not - ok ] [ - - cool ] < BLANKLINE > optional arguments : - h , - - help show this help message and exit - - num NUM An exemple of option - - title TITLE The title of the title - - not - ok is it ok ? - - cool is it cool ? The option values for a componant can then be retrieved with : func : ` get _ config _ for ` . . doctest : : : hide : > > > import argparse > > > args = argparse . Namespace ( ) > > > args . num = 1 > > > args . title = " My title " > > > args . ok = True > > > args . cool = False > > > args = parser . parse _ args ( ) # doctest : + SKIP > > > config = get _ config _ for ( args , comp ) > > > comp ( " input " , * * config ) # doctest : + SKIP " comp _ result " """
for option in component . options : if component . options [ option ] . hidden : continue argument_from_option ( parser , component , option , prefix = prefix )
def host_keys ( keydir = None , private = True , certs = True ) : '''Return the minion ' s host keys CLI Example : . . code - block : : bash salt ' * ' ssh . host _ keys salt ' * ' ssh . host _ keys keydir = / etc / ssh salt ' * ' ssh . host _ keys keydir = / etc / ssh private = False salt ' * ' ssh . host _ keys keydir = / etc / ssh certs = False'''
# TODO : support parsing sshd _ config for the key directory if not keydir : if __grains__ [ 'kernel' ] == 'Linux' : keydir = '/etc/ssh' else : # If keydir is None , os . listdir ( ) will blow up raise SaltInvocationError ( 'ssh.host_keys: Please specify a keydir' ) keys = { } fnre = re . compile ( r'ssh_host_(?P<type>.+)_key(?P<pub>(?P<cert>-cert)?\.pub)?' ) for fn_ in os . listdir ( keydir ) : m = fnre . match ( fn_ ) if m : if not m . group ( 'pub' ) and private is False : log . info ( 'Skipping private key file %s as ' 'private is set to False' , fn_ ) continue if m . group ( 'cert' ) and certs is False : log . info ( 'Skipping key file %s as certs is set to False' , fn_ ) continue kname = m . group ( 'type' ) if m . group ( 'pub' ) : kname += m . group ( 'pub' ) try : with salt . utils . files . fopen ( os . path . join ( keydir , fn_ ) , 'r' ) as _fh : # As of RFC 4716 " a key file is a text file , containing a # sequence of lines " , although some SSH implementations # ( e . g . OpenSSH ) manage their own format ( s ) . Please see # #20708 for a discussion about how to handle SSH key files # in the future keys [ kname ] = salt . utils . stringutils . to_unicode ( _fh . readline ( ) ) # only read the whole file if it is not in the legacy 1.1 # binary format if keys [ kname ] != "SSH PRIVATE KEY FILE FORMAT 1.1\n" : keys [ kname ] += salt . utils . stringutils . to_unicode ( _fh . read ( ) ) keys [ kname ] = keys [ kname ] . strip ( ) except ( IOError , OSError ) : keys [ kname ] = '' return keys
def _verify_params ( self ) : """Verifies the parameters don ' t use any reserved parameter . Raises : ValueError : If a reserved parameter is used ."""
reserved_in_use = self . _RESERVED_PARAMS . intersection ( self . extra_params ) if reserved_in_use : raise ValueError ( "Using a reserved parameter" , reserved_in_use )
def raw_urlsafe_b64encode ( b ) : '''Base64 encode using URL - safe encoding with padding removed . @ param b bytes to decode @ return bytes decoded'''
b = to_bytes ( b ) b = base64 . urlsafe_b64encode ( b ) b = b . rstrip ( b'=' ) # strip padding return b
def _generate_examples ( self , split_subsets , extraction_map ) : """Returns the examples in the raw ( text ) form ."""
source , _ = self . builder_config . language_pair def _get_local_paths ( ds , extract_dirs ) : rel_paths = ds . get_path ( source ) if len ( extract_dirs ) == 1 : extract_dirs = extract_dirs * len ( rel_paths ) return [ os . path . join ( ex_dir , rel_path ) if rel_path else ex_dir for ex_dir , rel_path in zip ( extract_dirs , rel_paths ) ] for ss_name in split_subsets : logging . info ( "Generating examples from: %s" , ss_name ) ds = DATASET_MAP [ ss_name ] extract_dirs = extraction_map [ ss_name ] files = _get_local_paths ( ds , extract_dirs ) if ss_name . startswith ( "czeng" ) : if ss_name . endswith ( "16pre" ) : sub_generator = functools . partial ( _parse_tsv , language_pair = ( "en" , "cs" ) ) elif ss_name . endswith ( "17" ) : filter_path = _get_local_paths ( _CZENG17_FILTER , extraction_map [ _CZENG17_FILTER . name ] ) [ 0 ] sub_generator = functools . partial ( _parse_czeng , filter_path = filter_path ) else : sub_generator = _parse_czeng elif len ( files ) == 2 : if ss_name . endswith ( "_frde" ) : sub_generator = _parse_frde_bitext else : sub_generator = _parse_parallel_sentences elif len ( files ) == 1 : fname = files [ 0 ] # Note : Due to formatting used by ` download _ manager ` , the file # extension may not be at the end of the file path . if ".tsv" in fname : sub_generator = _parse_tsv elif ss_name . startswith ( "newscommentary_v14" ) : sub_generator = functools . partial ( _parse_tsv , language_pair = self . builder_config . language_pair ) elif "tmx" in fname : sub_generator = _parse_tmx elif ss_name . startswith ( "wikiheadlines" ) : sub_generator = _parse_wikiheadlines else : raise ValueError ( "Unsupported file format: %s" % fname ) else : raise ValueError ( "Invalid number of files: %d" % len ( files ) ) for ex in sub_generator ( * files ) : if not all ( ex . values ( ) ) : continue # TODO ( adarob ) : Add subset feature . # ex [ " subset " ] = subset yield ex
def browse ( self ) : """Utility to browse through the records in the warc file . This returns an iterator over ( record , offset , size ) for each record in the file . If the file is gzip compressed , the offset and size will corresponds to the compressed file . The payload of each record is limited to 1MB to keep memory consumption under control ."""
offset = 0 for record in self . reader : # Just read the first 1MB of the payload . # This will make sure memory consuption is under control and it # is possible to look at the first MB of the payload , which is # typically sufficient to read http headers in the payload . record . payload = StringIO ( record . payload . read ( 1024 * 1024 ) ) self . reader . finish_reading_current_record ( ) next_offset = self . tell ( ) yield record , offset , next_offset - offset offset = next_offset
def scan_to_best_match ( fname , motifs , ncpus = None , genome = None , score = False ) : """Scan a FASTA file with motifs . Scan a FASTA file and return a dictionary with the best match per motif . Parameters fname : str Filename of a sequence file in FASTA format . motifs : list List of motif instances . Returns result : dict Dictionary with motif scanning results ."""
# Initialize scanner s = Scanner ( ncpus = ncpus ) s . set_motifs ( motifs ) s . set_threshold ( threshold = 0.0 ) if genome : s . set_genome ( genome ) if isinstance ( motifs , six . string_types ) : motifs = read_motifs ( motifs ) logger . debug ( "scanning %s..." , fname ) result = dict ( [ ( m . id , [ ] ) for m in motifs ] ) if score : it = s . best_score ( fname ) else : it = s . best_match ( fname ) for scores in it : for motif , score in zip ( motifs , scores ) : result [ motif . id ] . append ( score ) # Close the pool and reclaim memory del s return result
def _parse ( name ) : """Return dict of parts forming ` name ` . Raise ` ValueError ` if string ` name ` cannot be correctly parsed . The default implementation uses ` NodeNamingPolicy . _ NODE _ NAME _ RE ` to parse the name back into constituent parts . This is ideally the inverse of : meth : ` _ format ` - - it should be able to parse a node name string into the parameter values that were used to form it ."""
match = NodeNamingPolicy . _NODE_NAME_RE . match ( name ) if match : return match . groupdict ( ) else : raise ValueError ( "Cannot parse node name `{name}`" . format ( name = name ) )
def publish_and_get_event ( self , resource ) : """Publish and get the event from base station ."""
l_subscribed = False this_event = None if not self . __subscribed : self . _get_event_stream ( ) self . _subscribe_myself ( ) l_subscribed = True status = self . publish ( action = 'get' , resource = resource , mode = None , publish_response = False ) if status == 'success' : i = 0 while not this_event and i < 2 : self . __event_handle . wait ( 5.0 ) self . __event_handle . clear ( ) _LOGGER . debug ( "Instance %s resource: %s" , str ( i ) , resource ) for event in self . __events : if event [ 'resource' ] == resource : this_event = event self . __events . remove ( event ) break i = i + 1 if l_subscribed : self . _unsubscribe_myself ( ) self . _close_event_stream ( ) l_subscribed = False return this_event
def check_migrations_applied ( migrate ) : """A built - in check to see if all migrations have been applied correctly . It ' s automatically added to the list of Dockerflow checks if a ` flask _ migrate . Migrate < https : / / flask - migrate . readthedocs . io / > ` _ object is passed to the : class : ` ~ dockerflow . flask . app . Dockerflow ` class during instantiation , e . g . : : from flask import Flask from flask _ migrate import Migrate from flask _ sqlalchemy import SQLAlchemy from dockerflow . flask import Dockerflow app = Flask ( _ _ name _ _ ) app . config [ ' SQLALCHEMY _ DATABASE _ URI ' ] = ' sqlite : / / / / tmp / test . db ' db = SQLAlchemy ( app ) migrate = Migrate ( app , db ) dockerflow = Dockerflow ( app , db = db , migrate = migrate )"""
errors = [ ] from alembic . migration import MigrationContext from alembic . script import ScriptDirectory from sqlalchemy . exc import DBAPIError , SQLAlchemyError # pass in Migrate . directory here explicitly to be compatible with # older versions of Flask - Migrate that required the directory to be passed config = migrate . get_config ( directory = migrate . directory ) script = ScriptDirectory . from_config ( config ) try : with migrate . db . engine . connect ( ) as connection : context = MigrationContext . configure ( connection ) db_heads = set ( context . get_current_heads ( ) ) script_heads = set ( script . get_heads ( ) ) except ( DBAPIError , SQLAlchemyError ) as e : msg = "Can't connect to database to check migrations: {!s}" . format ( e ) return [ Info ( msg , id = health . INFO_CANT_CHECK_MIGRATIONS ) ] if db_heads != script_heads : msg = "Unapplied migrations found: {}" . format ( ', ' . join ( script_heads ) ) errors . append ( Warning ( msg , id = health . WARNING_UNAPPLIED_MIGRATION ) ) return errors
def serve_protected_file ( request , path ) : """Serve protected files to authenticated users with read permissions ."""
path = path . rstrip ( '/' ) try : file_obj = File . objects . get ( file = path ) except File . DoesNotExist : raise Http404 ( 'File not found %s' % path ) if not file_obj . has_read_permission ( request ) : if settings . DEBUG : raise PermissionDenied else : raise Http404 ( 'File not found %s' % path ) return server . serve ( request , file_obj = file_obj . file , save_as = False )
def search_course ( self , xqdm , kcdm = None , kcmc = None ) : """课程查询 @ structure [ { ' 任课教师 ' : str , ' 课程名称 ' : str , ' 教学班号 ' : str , ' 课程代码 ' : str , ' 班级容量 ' : int } ] : param xqdm : 学期代码 : param kcdm : 课程代码 : param kcmc : 课程名称"""
return self . query ( SearchCourse ( xqdm , kcdm , kcmc ) )
async def connect_async ( self , loop = None , timeout = None ) : """Set up async connection on specified event loop or on default event loop ."""
if self . deferred : raise Exception ( "Error, database not properly initialized " "before opening connection" ) if self . _async_conn : return elif self . _async_wait : await self . _async_wait else : self . _loop = loop self . _async_wait = asyncio . Future ( loop = self . _loop ) conn = self . _async_conn_cls ( database = self . database , loop = self . _loop , timeout = timeout , ** self . connect_params_async ) try : await conn . connect ( ) except Exception as e : if not self . _async_wait . done ( ) : self . _async_wait . set_exception ( e ) self . _async_wait = None raise else : self . _task_data = TaskLocals ( loop = self . _loop ) self . _async_conn = conn self . _async_wait . set_result ( True )
def import_parallel_gateway_to_graph ( diagram_graph , process_id , process_attributes , element ) : """Adds to graph the new element that represents BPMN parallel gateway . Parallel gateway doesn ' t have additional attributes . Separate method is used to improve code readability . : param diagram _ graph : NetworkX graph representing a BPMN process diagram , : param process _ id : string object , representing an ID of process element , : param process _ attributes : dictionary that holds attribute values of ' process ' element , which is parent of imported flow node , : param element : object representing a BPMN XML ' parallelGateway ' ."""
BpmnDiagramGraphImport . import_gateway_to_graph ( diagram_graph , process_id , process_attributes , element )
async def count ( query , clear_limit = False ) : """Perform * COUNT * aggregated query asynchronously . : return : number of objects in ` ` select ( ) ` ` query"""
clone = query . clone ( ) if query . _distinct or query . _group_by or query . _limit or query . _offset : if clear_limit : clone . _limit = clone . _offset = None sql , params = clone . sql ( ) wrapped = 'SELECT COUNT(1) FROM (%s) AS wrapped_select' % sql raw = query . model . raw ( wrapped , * params ) return ( await scalar ( raw ) ) or 0 else : clone . _returning = [ peewee . fn . Count ( peewee . SQL ( '*' ) ) ] clone . _order_by = None return ( await scalar ( clone ) ) or 0
def _parse_button ( self , keypad , component_xml ) : """Parses a button device that part of a keypad ."""
button_xml = component_xml . find ( 'Button' ) name = button_xml . get ( 'Engraving' ) button_type = button_xml . get ( 'ButtonType' ) direction = button_xml . get ( 'Direction' ) # Hybrid keypads have dimmer buttons which have no engravings . if button_type == 'SingleSceneRaiseLower' : name = 'Dimmer ' + direction if not name : name = "Unknown Button" button = Button ( self . _lutron , keypad , name = name , num = int ( component_xml . get ( 'ComponentNumber' ) ) , button_type = button_type , direction = direction ) return button
def define ( self , id , schema ) : """Add a schema to the list of definition : param id : id of the schema . : param schema : the schema as a dict or a : class : schemabuilder . primitives . Generic : return : reference to schema . : rtype : : class : ` schemabuilder . schema . Ref `"""
self . definitions [ id ] = schema self . _schema = None return self . ref ( id )
def global_set ( self , key , value ) : """Set ` ` key ` ` to ` ` value ` ` globally ( not at any particular branch or revision )"""
( key , value ) = map ( self . pack , ( key , value ) ) try : return self . sql ( 'global_insert' , key , value ) except IntegrityError : return self . sql ( 'global_update' , value , key )
def version_binary ( self ) : '''Return version number which is stored in binary format . Returns : str : < major 0-255 > . < minior 0-255 > . < build 0-65535 > or None if not found'''
# Under MSI ' Version ' is a ' REG _ DWORD ' which then sets other registry # values like DisplayVersion to x . x . x to the same value . # However not everyone plays by the rules , so we need to check first . # version _ binary _ data will be None if the reg value does not exist . # Some installs set ' Version ' to REG _ SZ ( string ) which is not # the MSI standard try : item_value , item_type = self . __reg_query_value ( self . __reg_uninstall_handle , 'version' ) except pywintypes . error as exc : # pylint : disable = no - member if exc . winerror == winerror . ERROR_FILE_NOT_FOUND : # Not Found return '' , '' version_binary_text = '' version_src = '' if item_value : if item_type == win32con . REG_DWORD : if isinstance ( item_value , six . integer_types ) : version_binary_raw = item_value if version_binary_raw : # Major . Minor . Build version_binary_text = '{0}.{1}.{2}' . format ( version_binary_raw >> 24 & 0xff , version_binary_raw >> 16 & 0xff , version_binary_raw & 0xffff ) version_src = 'binary-version' elif ( item_type == win32con . REG_SZ and isinstance ( item_value , six . string_types ) and self . __version_pattern . match ( item_value ) is not None ) : # Hey , version should be a int / REG _ DWORD , an installer has set # it to a string version_binary_text = item_value . strip ( ' ' ) version_src = 'binary-version (string)' return ( version_binary_text , version_src )
def castroData_from_ipix ( self , ipix , colwise = False ) : """Build a CastroData object for a particular pixel"""
# pix = utils . skydir _ to _ pix if colwise : ipix = self . _tsmap . ipix_swap_axes ( ipix , colwise ) norm_d = self . _norm_vals [ ipix ] nll_d = self . _nll_vals [ ipix ] return CastroData ( norm_d , nll_d , self . _refSpec , self . _norm_type )
def searchtop ( self , n = 10 ) : """Return the top n best resulta ( or possibly less if not enough is found )"""
solutions = PriorityQueue ( [ ] , lambda x : x . score , self . minimize , length = n , blockworse = False , blockequal = False , duplicates = False ) for solution in self : solutions . append ( solution ) return solutions
def upload_file ( self , api_token , file_path , ** kwargs ) : """Upload a file suitable to be passed as a file _ attachment . : param api _ token : The user ' s login api _ token . : type api _ token : str : param file _ path : The path of the file to be uploaded . : type file _ path : str : return : The HTTP response to the request . : rtype : : class : ` requests . Response `"""
params = { 'token' : api_token , 'file_name' : os . path . basename ( file_path ) } with open ( file_path , 'rb' ) as f : files = { 'file' : f } return self . _post ( 'upload_file' , params , files , ** kwargs )
def handleTickGeneric ( self , msg ) : """holds latest tick bid / ask / last price"""
df2use = self . marketData if self . contracts [ msg . tickerId ] . m_secType in ( "OPT" , "FOP" ) : df2use = self . optionsData # create tick holder for ticker if msg . tickerId not in df2use . keys ( ) : df2use [ msg . tickerId ] = df2use [ 0 ] . copy ( ) if msg . tickType == dataTypes [ "FIELD_OPTION_IMPLIED_VOL" ] : df2use [ msg . tickerId ] [ 'iv' ] = round ( float ( msg . value ) , 2 ) # elif msg . tickType = = dataTypes [ " FIELD _ OPTION _ HISTORICAL _ VOL " ] : # df2use [ msg . tickerId ] [ ' historical _ iv ' ] = round ( float ( msg . value ) , 2) # fire callback self . ibCallback ( caller = "handleTickGeneric" , msg = msg )
def makeB64UrlSafe ( b64str ) : """Make a base64 string URL Safe"""
if isinstance ( b64str , six . text_type ) : b64str = b64str . encode ( ) # see RFC 4648 , sec . 5 return b64str . replace ( b'+' , b'-' ) . replace ( b'/' , b'_' )
def key_func ( * keys , ** kwargs ) : """Creates a " key function " based on given keys . Resulting function will perform lookup using specified keys , in order , on the object passed to it as an argument . For example , ` ` key _ func ( ' a ' , ' b ' ) ( foo ) ` ` is equivalent to ` ` foo [ ' a ' ] [ ' b ' ] ` ` . : param keys : Lookup keys : param default : Optional keyword argument specifying default value that will be returned when some lookup key is not present : return : Unary key function"""
ensure_argcount ( keys , min_ = 1 ) ensure_keyword_args ( kwargs , optional = ( 'default' , ) ) keys = list ( map ( ensure_string , keys ) ) if 'default' in kwargs : default = kwargs [ 'default' ] def getitems ( obj ) : for key in keys : try : obj = obj [ key ] except KeyError : return default return obj else : if len ( keys ) == 1 : getitems = operator . itemgetter ( keys [ 0 ] ) else : def getitems ( obj ) : for key in keys : obj = obj [ key ] return obj return getitems
def update_picks ( self , games = None , points = None ) : '''games can be dict of { game . id : winner _ id } for all picked games to update'''
if games : game_dict = { g . id : g for g in self . gameset . games . filter ( id__in = games ) } game_picks = { pick . game . id : pick for pick in self . gamepicks . filter ( game__id__in = games ) } for key , winner in games . items ( ) : game = game_dict [ key ] if not game . has_started : pick = game_picks [ key ] pick . winner_id = winner pick . save ( ) if points is not None : self . points = points self . save ( ) if games or points : self . updated_signal . send ( sender = self . __class__ , pickset = self , auto_pick = False )