signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def wait_until_page_contains_elements ( self , timeout , * locators ) : """This is a copy of ` Wait Until Page Contains Element ` but it allows multiple arguments in order to wait for more than one element . | * Argument * | * Description * | * Example * | | timeout | maximum time to wait , if set to $ { None } it will use Selenium ' s default timeout | 5s | | * locators | Selenium 2 element locator ( s ) | id = MyId |"""
self . _wait_until_no_error ( timeout , self . _wait_for_elements , locators )
def __loaddate ( ) : '''載入檔案 檔案依據 http : / / www . twse . com . tw / ch / trading / trading _ days . php'''
csv_path = os . path . join ( os . path . dirname ( __file__ ) , 'opendate.csv' ) with open ( csv_path ) as csv_file : csv_data = csv . reader ( csv_file ) result = { } result [ 'close' ] = [ ] result [ 'open' ] = [ ] for i in csv_data : if i [ 1 ] == '0' : # 0 = 休市 result [ 'close' ] . append ( datetime . strptime ( i [ 0 ] , '%Y/%m/%d' ) . date ( ) ) elif i [ 1 ] == '1' : # 1 = 開市 result [ 'open' ] . append ( datetime . strptime ( i [ 0 ] , '%Y/%m/%d' ) . date ( ) ) else : pass return result
def main ( ) : """Shows useful information about how - to configure alias on a first run and configure automatically on a second . It ' ll be only visible when user type fuck and when alias isn ' t configured ."""
settings . init ( ) configuration_details = shell . how_to_configure ( ) if ( configuration_details and configuration_details . can_configure_automatically ) : if _is_already_configured ( configuration_details ) : logs . already_configured ( configuration_details ) return elif _is_second_run ( ) : _configure ( configuration_details ) logs . configured_successfully ( configuration_details ) return else : _record_first_run ( ) logs . how_to_configure_alias ( configuration_details )
def _createFromObject ( obj , * args , ** kwargs ) : """Creates an RTI given an object . Auto - detects which RTI class to return . The * args and * * kwargs parameters are passed to the RTI constructor . It is therefor important that all memory RTIs accept the same parameters in the constructor ( with exception of the FieldRti which is not auto - detected ) ."""
if is_a_sequence ( obj ) : return SequenceRti ( obj , * args , ** kwargs ) elif is_a_mapping ( obj ) : return MappingRti ( obj , * args , ** kwargs ) elif is_an_array ( obj ) : return ArrayRti ( obj , * args , ** kwargs ) elif isinstance ( obj , bytearray ) : return ArrayRti ( np . array ( obj ) , * args , ** kwargs ) else : return ScalarRti ( obj , * args , ** kwargs )
def save_scatter_table ( self , fn , description = "" ) : """Save the scattering lookup tables . Save the state of the scattering lookup tables to a file . This can be loaded later with load _ scatter _ table . Other variables will not be saved , but this does not matter because the results of the computations are based only on the contents of the table . Args : fn : The name of the scattering table file . description ( optional ) : A description of the table ."""
data = { "description" : description , "time" : datetime . now ( ) , "psd_scatter" : ( self . num_points , self . D_max , self . _psd_D , self . _S_table , self . _Z_table , self . _angular_table , self . _m_table , self . geometries ) , "version" : tmatrix_aux . VERSION } pickle . dump ( data , file ( fn , 'w' ) , pickle . HIGHEST_PROTOCOL )
def set_statics ( self ) : """Create statics directory and copy files in it"""
if not os . path . exists ( self . results_dir ) : return None try : shutil . copytree ( os . path . join ( self . templates_dir , 'css' ) , os . path . join ( self . results_dir , 'css' ) ) shutil . copytree ( os . path . join ( self . templates_dir , 'scripts' ) , os . path . join ( self . results_dir , 'scripts' ) ) shutil . copytree ( os . path . join ( self . templates_dir , 'fonts' ) , os . path . join ( self . results_dir , 'fonts' ) ) except OSError as e : if e . errno == 17 : # File exists print ( "WARNING : existing output directory for static files, will not replace them" ) else : # in all other cases , re - raise exceptions raise try : shutil . copytree ( os . path . join ( self . templates_dir , 'img' ) , os . path . join ( self . results_dir , 'img' ) ) except OSError as e : pass
def psetex ( self , name , value , time_ms ) : """Set the value of key ` ` name ` ` to ` ` value ` ` that expires in ` ` time _ ms ` ` milliseconds . ` ` time _ ms ` ` can be represented by an integer or a Python timedelta object"""
with self . pipe as pipe : return pipe . psetex ( self . redis_key ( name ) , time_ms = time_ms , value = self . valueparse . encode ( value = value ) )
def rank ( matrix , atol = 1e-13 , rtol = 0 ) : """Estimate the rank , i . e . , the dimension of the column space , of a matrix . The algorithm used by this function is based on the singular value decomposition of ` stoichiometry _ matrix ` . Parameters matrix : ndarray The matrix should be at most 2 - D . A 1 - D array with length k will be treated as a 2 - D with shape ( 1 , k ) atol : float The absolute tolerance for a zero singular value . Singular values smaller than ` ` atol ` ` are considered to be zero . rtol : float The relative tolerance for a zero singular value . Singular values less than the relative tolerance times the largest singular value are considered to be zero . Notes If both ` atol ` and ` rtol ` are positive , the combined tolerance is the maximum of the two ; that is : : tol = max ( atol , rtol * smax ) Singular values smaller than ` ` tol ` ` are considered to be zero . Returns int The estimated rank of the matrix . See Also numpy . linalg . matrix _ rank matrix _ rank is basically the same as this function , but it does not provide the option of the absolute tolerance ."""
matrix = np . atleast_2d ( matrix ) sigma = svd ( matrix , compute_uv = False ) tol = max ( atol , rtol * sigma [ 0 ] ) return int ( ( sigma >= tol ) . sum ( ) )
def present ( name , value , zone , record_type , ttl = None , identifier = None , region = None , key = None , keyid = None , profile = None , wait_for_sync = True , split_dns = False , private_zone = False ) : '''Ensure the Route53 record is present . name Name of the record . value Value of the record . As a special case , you can pass in : ` private : < Name tag > ` to have the function autodetermine the private IP ` public : < Name tag > ` to have the function autodetermine the public IP zone The zone to create the record in . record _ type The record type ( A , NS , MX , TXT , etc . ) ttl The time to live for the record . identifier The unique identifier to use for this record . region The region to connect to . key Secret key to be used . keyid Access key to be used . profile A dict with region , key and keyid , or a pillar key ( string ) that contains a dict with region , key and keyid . wait _ for _ sync Wait for an INSYNC change status from Route53 before returning success . split _ dns Route53 supports parallel public and private DNS zones with the same name . private _ zone If using split _ dns , specify if this is the private zone .'''
ret = { 'name' : name , 'result' : True , 'comment' : '' , 'changes' : { } } # If a list is passed in for value , change it to a comma - separated string # So it will work with subsequent boto module calls and string functions if isinstance ( value , list ) : value = ',' . join ( value ) elif value . startswith ( 'private:' ) or value . startswith ( 'public:' ) : name_tag = value . split ( ':' , 1 ) [ 1 ] in_states = ( 'pending' , 'rebooting' , 'running' , 'stopping' , 'stopped' ) r = __salt__ [ 'boto_ec2.find_instances' ] ( name = name_tag , return_objs = True , in_states = in_states , profile = profile ) if not r : ret [ 'comment' ] = 'Error: instance with Name tag {0} not found' . format ( name_tag ) ret [ 'result' ] = False return ret if len ( r ) > 1 : ret [ 'comment' ] = 'Error: Name tag {0} matched more than one instance' . format ( name_tag ) ret [ 'result' ] = False return ret instance = r [ 0 ] private_ip = getattr ( instance , 'private_ip_address' , None ) public_ip = getattr ( instance , 'ip_address' , None ) if value . startswith ( 'private:' ) : value = private_ip log . info ( 'Found private IP %s for instance %s' , private_ip , name_tag ) else : if public_ip is None : ret [ 'comment' ] = 'Error: No Public IP assigned to instance with Name {0}' . format ( name_tag ) ret [ 'result' ] = False return ret value = public_ip log . info ( 'Found public IP %s for instance %s' , public_ip , name_tag ) try : record = __salt__ [ 'boto_route53.get_record' ] ( name , zone , record_type , False , region , key , keyid , profile , split_dns , private_zone , identifier ) except SaltInvocationError as err : ret [ 'comment' ] = 'Error: {0}' . format ( err ) ret [ 'result' ] = False return ret if isinstance ( record , dict ) and not record : if __opts__ [ 'test' ] : ret [ 'comment' ] = 'Route53 record {0} set to be added.' . format ( name ) ret [ 'result' ] = None return ret added = __salt__ [ 'boto_route53.add_record' ] ( name , value , zone , record_type , identifier , ttl , region , key , keyid , profile , wait_for_sync , split_dns , private_zone ) if added : ret [ 'changes' ] [ 'old' ] = None ret [ 'changes' ] [ 'new' ] = { 'name' : name , 'value' : value , 'record_type' : record_type , 'ttl' : ttl , 'identifier' : identifier } ret [ 'comment' ] = 'Added {0} Route53 record.' . format ( name ) else : ret [ 'result' ] = False ret [ 'comment' ] = 'Failed to add {0} Route53 record.' . format ( name ) return ret elif record : need_to_update = False # Values can be a comma separated list and some values will end with a # period ( even if we set it without one ) . To easily check this we need # to split and check with the period stripped from the input and what ' s # in route53. # TODO : figure out if this will cause us problems with some records . _values = [ x . rstrip ( '.' ) for x in value . split ( ',' ) ] _r_values = [ x . rstrip ( '.' ) for x in record [ 'value' ] . split ( ',' ) ] _values . sort ( ) _r_values . sort ( ) if _values != _r_values : need_to_update = True if identifier and identifier != record [ 'identifier' ] : need_to_update = True if ttl and six . text_type ( ttl ) != six . text_type ( record [ 'ttl' ] ) : need_to_update = True if need_to_update : if __opts__ [ 'test' ] : ret [ 'comment' ] = 'Route53 record {0} set to be updated.' . format ( name ) ret [ 'result' ] = None return ret updated = __salt__ [ 'boto_route53.update_record' ] ( name , value , zone , record_type , identifier , ttl , region , key , keyid , profile , wait_for_sync , split_dns , private_zone ) if updated : ret [ 'changes' ] [ 'old' ] = record ret [ 'changes' ] [ 'new' ] = { 'name' : name , 'value' : value , 'record_type' : record_type , 'ttl' : ttl , 'identifier' : identifier } ret [ 'comment' ] = 'Updated {0} Route53 record.' . format ( name ) else : ret [ 'result' ] = False ret [ 'comment' ] = 'Failed to update {0} Route53 record.' . format ( name ) else : ret [ 'comment' ] = '{0} exists.' . format ( name ) return ret
def _supported_imts ( self ) : """Updates the list of supported IMTs from the tables"""
imt_list = [ ] for key in self . imls : if "SA" in key : imt_list . append ( imt_module . SA ) elif key == "T" : continue else : try : factory = getattr ( imt_module , key ) except Exception : continue imt_list . append ( factory ) return imt_list
def from_inline ( cls : Type [ CertificationType ] , version : int , currency : str , blockhash : Optional [ str ] , inline : str ) -> CertificationType : """Return Certification instance from inline document Only self . pubkey _ to is populated . You must populate self . identity with an Identity instance to use raw / sign / signed _ raw methods : param version : Version of document : param currency : Name of the currency : param blockhash : Hash of the block : param inline : Inline document : return :"""
cert_data = Certification . re_inline . match ( inline ) if cert_data is None : raise MalformedDocumentError ( "Certification ({0})" . format ( inline ) ) pubkey_from = cert_data . group ( 1 ) pubkey_to = cert_data . group ( 2 ) blockid = int ( cert_data . group ( 3 ) ) if blockid == 0 or blockhash is None : timestamp = BlockUID . empty ( ) else : timestamp = BlockUID ( blockid , blockhash ) signature = cert_data . group ( 4 ) return cls ( version , currency , pubkey_from , pubkey_to , timestamp , signature )
def _maybe_update ( self , user , attribute , new_value ) : """DRY helper . If the specified attribute of the user differs from the specified value , it will be updated ."""
old_value = getattr ( user , attribute ) if new_value != old_value : self . stderr . write ( _ ( 'Setting {attribute} for user "{username}" to "{new_value}"' ) . format ( attribute = attribute , username = user . username , new_value = new_value ) ) setattr ( user , attribute , new_value )
def make_authentiq_blueprint ( client_id = None , client_secret = None , scope = "openid profile" , redirect_url = None , redirect_to = None , login_url = None , authorized_url = None , session_class = None , storage = None , hostname = "connect.authentiq.io" , ) : """Make a blueprint for authenticating with authentiq using OAuth 2 . This requires a client ID and client secret from authentiq . You should either pass them to this constructor , or make sure that your Flask application config defines them , using the variables : envvar : ` AUTHENTIQ _ OAUTH _ CLIENT _ ID ` and : envvar : ` AUTHENTIQ _ OAUTH _ CLIENT _ SECRET ` . Args : client _ id ( str ) : The client ID for your application on Authentiq . client _ secret ( str ) : The client secret for your application on Authentiq . scope ( str , optional ) : comma - separated list of scopes for the OAuth token . redirect _ url ( str ) : the URL to redirect to after the authentication dance is complete . redirect _ to ( str ) : if ` ` redirect _ url ` ` is not defined , the name of the view to redirect to after the authentication dance is complete . The actual URL will be determined by : func : ` flask . url _ for ` . login _ url ( str , optional ) : the URL path for the ` ` login ` ` view . Defaults to ` ` / authentiq ` ` . authorized _ url ( str , optional ) : the URL path for the ` ` authorized ` ` view . Defaults to ` ` / authentiq / authorized ` ` . session _ class ( class , optional ) : The class to use for creating a Requests session . Defaults to : class : ` ~ flask _ dance . consumer . requests . OAuth2Session ` . storage : A token storage class , or an instance of a token storage class , to use for this blueprint . Defaults to : class : ` ~ flask _ dance . consumer . storage . session . SessionStorage ` . hostname ( str , optional ) : If using a private instance of authentiq CE / EE , specify the hostname , default is ` ` connect . authentiq . io ` ` : rtype : : class : ` ~ flask _ dance . consumer . OAuth2ConsumerBlueprint ` : returns : A : ref : ` blueprint < flask : blueprints > ` to attach to your Flask app ."""
authentiq_bp = OAuth2ConsumerBlueprint ( "authentiq" , __name__ , client_id = client_id , client_secret = client_secret , scope = scope , base_url = "https://{hostname}/" . format ( hostname = hostname ) , authorization_url = "https://{hostname}/authorize" . format ( hostname = hostname ) , token_url = "https://{hostname}/token" . format ( hostname = hostname ) , redirect_url = redirect_url , redirect_to = redirect_to , login_url = login_url , authorized_url = authorized_url , session_class = session_class , storage = storage , ) authentiq_bp . from_config [ "client_id" ] = "AUTHENTIQ_OAUTH_CLIENT_ID" authentiq_bp . from_config [ "client_secret" ] = "AUTHENTIQ_OAUTH_CLIENT_SECRET" @ authentiq_bp . before_app_request def set_applocal_session ( ) : ctx = stack . top ctx . authentiq_oauth = authentiq_bp . session return authentiq_bp
def new_message ( cls , from_user , to_users , subject , content ) : """Create a new Message and Thread . Mark thread as unread for all recipients , and mark thread as read and deleted from inbox by creator ."""
thread = Thread . objects . create ( subject = subject ) for user in to_users : thread . userthread_set . create ( user = user , deleted = False , unread = True ) thread . userthread_set . create ( user = from_user , deleted = True , unread = False ) msg = cls . objects . create ( thread = thread , sender = from_user , content = content ) message_sent . send ( sender = cls , message = msg , thread = thread , reply = False ) return msg
def _selectView ( self ) : """Matches the view selection to the trees selection ."""
scene = self . uiGanttVIEW . scene ( ) scene . blockSignals ( True ) scene . clearSelection ( ) for item in self . uiGanttTREE . selectedItems ( ) : item . viewItem ( ) . setSelected ( True ) scene . blockSignals ( False ) curr_item = self . uiGanttTREE . currentItem ( ) vitem = curr_item . viewItem ( ) if vitem : self . uiGanttVIEW . centerOn ( vitem )
def send ( self , data ) : """This method keeps trying to send a message relying on the run method to reopen the websocket in case it was closed ."""
while not self . stopped ( ) : try : self . ws . send ( data ) return except websocket . WebSocketConnectionClosedException : # config . LOGGER . debug ( ' WebSocket closed , retrying send . ' ) # TODO ( investigate infinite loop ) time . sleep ( 0.1 )
def similarity ( state_a , state_b ) : """The ( L2 ) distance between the counts of the state addresses in the history of the path . : param state _ a : The first state to compare : param state _ b : The second state to compare"""
count_a = Counter ( state_a . history . bbl_addrs ) count_b = Counter ( state_b . history . bbl_addrs ) normal_distance = sum ( ( count_a . get ( addr , 0 ) - count_b . get ( addr , 0 ) ) ** 2 for addr in set ( list ( count_a . keys ( ) ) + list ( count_b . keys ( ) ) ) ) ** 0.5 return 1.0 / ( 1 + normal_distance )
def set_border_style_type ( self , border_style_type ) : """Set the border style using the specified border style type . The border style type should be an integer value recognized by the border style factory for this formatter instance . The built - in border style types are provided by the ` MenuBorderStyleType ` class , or custom border style types can be provided if using a custom border style factory . : param border _ style _ type : an integer value representing the border style type ."""
style = self . __border_style_factory . create_border ( border_style_type ) self . set_border_style ( style ) return self
def authenticate ( remote_addr , password , cert , key , verify_cert = True ) : '''Authenticate with a remote LXDaemon . remote _ addr : An URL to a remote Server , you also have to give cert and key if you provide remote _ addr and its a TCP Address ! Examples : https : / / myserver . lan : 8443 password : The password of the remote . cert : PEM Formatted SSL Certificate . Examples : ~ / . config / lxc / client . crt key : PEM Formatted SSL Key . Examples : ~ / . config / lxc / client . key verify _ cert : True Wherever to verify the cert , this is by default True but in the most cases you want to set it off as LXD normaly uses self - signed certificates . CLI Example : . . code - block : : bash $ salt ' * ' lxd . authenticate https : / / srv01:8443 < yourpass > ~ / . config / lxc / client . crt ~ / . config / lxc / client . key false See the ` requests - docs ` _ for the SSL stuff . . . _ requests - docs : http : / / docs . python - requests . org / en / master / user / advanced / # ssl - cert - verification'''
client = pylxd_client_get ( remote_addr , cert , key , verify_cert ) if client . trusted : return True try : client . authenticate ( password ) except pylxd . exceptions . LXDAPIException as e : # Wrong password raise CommandExecutionError ( six . text_type ( e ) ) return client . trusted
def _last ( self , ** spec ) : """Get the latest entry in this category , optionally including subcategories"""
for record in self . _entries ( spec ) . order_by ( orm . desc ( model . Entry . local_date ) , orm . desc ( model . Entry . id ) ) [ : 1 ] : return entry . Entry ( record ) return None
def add ( self , child ) : """Adds a typed child object to the component type . @ param child : Child object to be added ."""
if isinstance ( child , FatComponent ) : self . add_child_component ( child ) else : Fat . add ( self , child )
def delete_checkpoint ( self , checkpoint_dir ) : """Removes subdirectory within checkpoint _ folder Parameters checkpoint _ dir : path to checkpoint"""
if os . path . isfile ( checkpoint_dir ) : shutil . rmtree ( os . path . dirname ( checkpoint_dir ) ) else : shutil . rmtree ( checkpoint_dir )
def identifier ( self ) : """These models have server - generated identifiers . If we don ' t already have it in memory , then assume that it has not yet been generated ."""
if self . primary_key not in self . _data : return 'Unknown' return str ( self . _data [ self . primary_key ] )
def parse ( query_string , info = { } ) : """: returns : a normalized query _ dict as in the following examples : > > > parse ( ' kind = stats ' , { ' stats ' : { ' mean ' : 0 , ' max ' : 1 } } ) { ' kind ' : [ ' mean ' , ' max ' ] , ' k ' : [ 0 , 1 ] , ' rlzs ' : False } > > > parse ( ' kind = rlzs ' , { ' stats ' : { } , ' num _ rlzs ' : 3 } ) { ' kind ' : [ ' rlz - 000 ' , ' rlz - 001 ' , ' rlz - 002 ' ] , ' k ' : [ 0 , 1 , 2 ] , ' rlzs ' : True } > > > parse ( ' kind = mean ' , { ' stats ' : { ' mean ' : 0 , ' max ' : 1 } } ) { ' kind ' : [ ' mean ' ] , ' k ' : [ 0 ] , ' rlzs ' : False } > > > parse ( ' kind = rlz - 3 & imt = PGA & site _ id = 0 ' , { ' stats ' : { } } ) { ' kind ' : [ ' rlz - 3 ' ] , ' imt ' : [ ' PGA ' ] , ' site _ id ' : [ 0 ] , ' k ' : [ 3 ] , ' rlzs ' : True }"""
qdic = parse_qs ( query_string ) loss_types = info . get ( 'loss_types' , [ ] ) for key , val in qdic . items ( ) : # for instance , convert site _ id to an int if key == 'loss_type' : qdic [ key ] = [ loss_types [ k ] for k in val ] else : qdic [ key ] = [ lit_eval ( v ) for v in val ] if info : qdic [ 'k' ] , qdic [ 'kind' ] , qdic [ 'rlzs' ] = _normalize ( qdic [ 'kind' ] , info ) return qdic
def dispatch ( self ) : """Wraps the dispatch method to add session support ."""
try : webapp2 . RequestHandler . dispatch ( self ) finally : self . session_store . save_sessions ( self . response )
def typechecked_func ( func , force = False , argType = None , resType = None , prop_getter = False ) : """Works like typechecked , but is only applicable to functions , methods and properties ."""
if not pytypes . checking_enabled and not pytypes . do_logging_in_typechecked : return func assert ( _check_as_func ( func ) ) if not force and is_no_type_check ( func ) : return func if hasattr ( func , 'do_typecheck' ) : func . do_typecheck = True return func elif hasattr ( func , 'do_logging' ) : # actually shouldn ' t happen return _typeinspect_func ( func , True , func . do_logging , argType , resType , prop_getter ) else : return _typeinspect_func ( func , True , False , argType , resType , prop_getter )
def cal_gpa ( grades ) : """根据成绩数组计算课程平均绩点和 gpa , 算法不一定与学校一致 , 结果仅供参考 : param grades : : meth : ` models . StudentSession . get _ my _ achievements ` 返回的成绩数组 : return : 包含了课程平均绩点和 gpa 的元组"""
# 课程总数 courses_sum = len ( grades ) # 课程绩点和 points_sum = 0 # 学分和 credit_sum = 0 # 课程学分 x 课程绩点之和 gpa_points_sum = 0 for grade in grades : point = get_point ( grade . get ( '补考成绩' ) or grade [ '成绩' ] ) credit = float ( grade [ '学分' ] ) points_sum += point credit_sum += credit gpa_points_sum += credit * point ave_point = points_sum / courses_sum gpa = gpa_points_sum / credit_sum return round ( ave_point , 5 ) , round ( gpa , 5 )
def on_state_changed ( self , state ) : """Connect / disconnect sig _ key _ pressed signal ."""
if state : self . editor . sig_key_pressed . connect ( self . _on_key_pressed ) else : self . editor . sig_key_pressed . disconnect ( self . _on_key_pressed )
def main ( ) : """Program entry point"""
parser = argparse . ArgumentParser ( ) parser . add_argument ( "path" , help = "Path to the CAPTCHA image file" ) parser . add_argument ( "--prefix" , help = "Checkpoint prefix [Default 'ocr']" , default = 'ocr' ) parser . add_argument ( "--epoch" , help = "Checkpoint epoch [Default 100]" , type = int , default = 100 ) args = parser . parse_args ( ) init_state_names , init_state_arrays = lstm_init_states ( batch_size = 1 ) img = read_img ( args . path ) sample = SimpleBatch ( data_names = [ 'data' ] + init_state_names , data = [ mx . nd . array ( img ) ] + init_state_arrays ) mod = load_module ( args . prefix , args . epoch , sample . data_names , sample . provide_data ) mod . forward ( sample ) prob = mod . get_outputs ( ) [ 0 ] . asnumpy ( ) prediction = CtcMetrics . ctc_label ( np . argmax ( prob , axis = - 1 ) . tolist ( ) ) # Predictions are 1 to 10 for digits 0 to 9 respectively ( prediction 0 means no - digit ) prediction = [ p - 1 for p in prediction ] print ( "Digits:" , prediction )
def getParser ( ) : "Creates and returns the argparse parser object ."
parser = argparse . ArgumentParser ( description = __description__ , formatter_class = argparse . RawDescriptionHelpFormatter ) parser . add_argument ( 'images' , nargs = '+' , help = 'The images used for training (in the learning case) or to transform (in the transformation case)' ) apply_group = parser . add_argument_group ( 'apply an existing model' ) apply_group . add_argument ( '--load-model' , dest = 'lmodel' , default = False , help = 'Location of the pickled intensity range model to load. Activated application mode.' ) train_group = parser . add_argument_group ( 'train a new model and save and/or apply it' ) train_group . add_argument ( '--save-model' , dest = 'smodel' , default = False , help = 'Save the trained model under this name as a pickled object (should end in .pkl). Activates training mode.' ) train_group . add_argument ( '--cutoffp' , dest = 'cutoffp' , type = sequenceOfIntegersGeAscendingStrict , default = '1,99' , help = 'Colon-separated lower and upper cut-off percentile values to exclude intensity outliers during the model training.' ) train_group . add_argument ( '--landmarkp' , dest = 'landmarkp' , default = 'L4' , help = 'The landmark percentiles, based on which to train the model. Can be L2, L3, L4 or a colon-separated, ordered list of percentiles.' ) train_group . add_argument ( '--stdspace' , dest = 'stdspace' , default = 'auto' , help = 'Two colon-separated intensity values to roughly define the average intensity space to learn. In most cases should be left set to \'auto\'' ) shared_group = parser . add_argument_group ( 'shared arguments' ) shared_group . add_argument ( '--save-images' , dest = 'simages' , default = False , help = 'Save the transformed images under this location. Required for the application mode, optional for the learning mode.' ) shared_group . add_argument ( '--threshold' , type = float , default = 0 , help = 'All voxel with an intensity > threshold are considered as foreground. Supply either this or a mask for each image.' ) shared_group . add_argument ( '--masks' , nargs = '+' , help = 'A number of binary foreground mask, one for each image. Alternative to supplying a threshold. Overrides the threshold parameter if supplied.' ) shared_group . add_argument ( '--ignore' , dest = 'ignore' , action = 'store_true' , help = 'Ignore possible loss of information during the intensity transformation. Should only be used when you know what you are doing.' ) parser . add_argument ( '-v' , '--verbose' , dest = 'verbose' , action = 'store_true' , help = 'Verbose output' ) parser . add_argument ( '-d' , '--debug' , dest = 'debug' , action = 'store_true' , help = 'Display debug information.' ) parser . add_argument ( '-f' , '--force' , dest = 'force' , action = 'store_true' , help = 'Overwrite existing files (both model and images)' ) return parser
def open_with_encoding ( filename , encoding , mode = 'r' ) : """Return opened file with a specific encoding ."""
return io . open ( filename , mode = mode , encoding = encoding , newline = '' )
def _get_result_wrapper ( self , query ) : """Get result wrapper class ."""
cursor = RowsCursor ( self . _rows , self . _cursor . description ) return query . _get_cursor_wrapper ( cursor )
def fast_median ( a ) : """Fast median operation for masked array using 50th - percentile"""
a = checkma ( a ) # return scoreatpercentile ( a . compressed ( ) , 50) if a . count ( ) > 0 : out = np . percentile ( a . compressed ( ) , 50 ) else : out = np . ma . masked return out
def p_localparamdecl_integer ( self , p ) : 'localparamdecl : LOCALPARAM INTEGER param _ substitution _ list SEMICOLON'
paramlist = [ Localparam ( rname , rvalue , lineno = p . lineno ( 3 ) ) for rname , rvalue in p [ 3 ] ] p [ 0 ] = Decl ( tuple ( paramlist ) , lineno = p . lineno ( 1 ) ) p . set_lineno ( 0 , p . lineno ( 1 ) )
def key_string_to_lens_path ( key_string ) : """Converts a key string like ' foo . bar . 0 . wopper ' to [ ' foo ' , ' bar ' , 0 , ' wopper ' ] : param { String } keyString The dot - separated key string : return { [ String ] } The lens array containing string or integers"""
return map ( if_else ( isinstance ( int ) , # convert to int lambda s : int ( s ) , # Leave the string alone identity ) , key_string . split ( '.' ) )
def query ( dataset_key , query , query_type = 'sql' , profile = 'default' , parameters = None , ** kwargs ) : """Query an existing dataset : param dataset _ key : Dataset identifier , in the form of owner / id or of a url : type dataset _ key : str : param query : SQL or SPARQL query : type query : str : param query _ type : The type of the query . Must be either ' sql ' or ' sparql ' . ( Default value = ' sql ' ) : type query _ type : { ' sql ' , ' sparql ' } , optional : param parameters : parameters to the query - if SPARQL query , this should be a dict containing named parameters , if SQL query , then this should be a list containing positional parameters . Boolean values will be converted to xsd : boolean , Integer values to xsd : integer , and other Numeric values to xsd : decimal . anything else is treated as a String literal ( Default value = None ) : type parameters : query parameters , optional : param profile : Configuration profile ( account ) to use . ( Default value = ' default ' ) : type profile : str , optional : returns : Object containing the results of the query : rtype : Results : raises RuntimeError : If a server error occurs Examples > > > import datadotworld as dw > > > results = dw . query ( . . . ' jonloyens / an - intro - to - dataworld - dataset ' , . . . ' SELECT * FROM ` DataDotWorldBBallStats ` , ` DataDotWorldBBallTeam ` ' . . . ' WHERE DataDotWorldBBallTeam . Name = DataDotWorldBBallStats . Name ' ) > > > df = results . dataframe > > > df . shape (8 , 6)"""
return _get_instance ( profile , ** kwargs ) . query ( dataset_key , query , query_type = query_type , parameters = parameters , ** kwargs )
def update ( self ) : """Update the status of the range setting ."""
self . _controller . update ( self . _id , wake_if_asleep = False ) data = self . _controller . get_charging_params ( self . _id ) if data and ( time . time ( ) - self . __manual_update_time > 60 ) : self . __maxrange_state = data [ 'charge_to_max_range' ]
def synchelp ( f ) : '''The synchelp decorator allows the transparent execution of a coroutine using the global loop from a thread other than the event loop . In both use cases , teh actual work is done by the global event loop . Examples : Use as a decorator : : @ s _ glob . synchelp async def stuff ( x , y ) : await dostuff ( ) Calling the stuff function as regular async code using the standard await syntax : : valu = await stuff ( x , y ) Calling the stuff function as regular sync code outside of the event loop thread : : valu = stuff ( x , y )'''
def wrap ( * args , ** kwargs ) : coro = f ( * args , ** kwargs ) if not iAmLoop ( ) : return sync ( coro ) return coro return wrap
def augknt ( knots , order ) : """Augment a knot vector . Parameters : knots : Python list or rank - 1 array , the original knot vector ( without endpoint repeats ) order : int , > = 0 , order of spline Returns : list _ of _ knots : rank - 1 array that has ( ` order ` + 1 ) copies of ` ` knots [ 0 ] ` ` , then ` ` knots [ 1 : - 1 ] ` ` , and finally ( ` order ` + 1 ) copies of ` ` knots [ - 1 ] ` ` . Caveats : ` order ` is the spline order ` p ` , not ` p ` + 1 , and existing knots are never deleted . The knot vector always becomes longer by calling this function ."""
if isinstance ( knots , np . ndarray ) and knots . ndim > 1 : raise ValueError ( "knots must be a list or a rank-1 array" ) knots = list ( knots ) # ensure Python list # One copy of knots [ 0 ] and knots [ - 1 ] will come from " knots " itself , # so we only need to prepend / append " order " copies . return np . array ( [ knots [ 0 ] ] * order + knots + [ knots [ - 1 ] ] * order )
def _process_event ( self , data , url , services_incl_filter = None , services_excl_filter = None , custom_tags = None ) : '''Main event processing loop . An event will be created for a service status change . Service checks on the server side can be used to provide the same functionality'''
hostname = data [ 'svname' ] service_name = data [ 'pxname' ] key = "%s:%s" % ( hostname , service_name ) status = self . host_status [ url ] [ key ] custom_tags = [ ] if custom_tags is None else custom_tags if self . _is_service_excl_filtered ( service_name , services_incl_filter , services_excl_filter ) : return data_status = data [ 'status' ] if status is None : self . host_status [ url ] [ key ] = data_status return if status != data_status and data_status in ( 'up' , 'down' ) : # If the status of a host has changed , we trigger an event try : lastchg = int ( data [ 'lastchg' ] ) except Exception : lastchg = 0 # Create the event object ev = self . _create_event ( data_status , hostname , lastchg , service_name , data [ 'back_or_front' ] , custom_tags = custom_tags ) self . event ( ev ) # Store this host status so we can check against it later self . host_status [ url ] [ key ] = data_status
def refresh ( self ) : """Reloads the contents for this box based on the parameters . : return < bool >"""
self . setDirty ( False ) self . blockSignals ( True ) self . setUpdatesEnabled ( False ) self . clear ( ) locales = self . _availableLocales if not locales : locales = self . allLocales ( ) if not self . showLanguage ( ) : if self . isTranslated ( ) : sorter = lambda x : x . get_territory_name ( base ) else : sorter = lambda x : x . get_territory_name ( ) else : if self . isTranslated ( ) : sorter = lambda x : x . get_language_name ( base ) else : sorter = lambda x : x . get_language_name ( ) locales = sorted ( locales , key = sorter ) index = 0 for i , locale in enumerate ( locales ) : babel_locale = babel . Locale . parse ( locale ) code = '{0}_{1}' . format ( babel_locale . language , babel_locale . territory ) keys = { } if self . isTranslated ( ) : keys [ 'lang' ] = babel_locale . get_language_name ( base ) keys [ 'territory' ] = babel_locale . get_territory_name ( base ) keys [ 'script' ] = babel_locale . get_script_name ( base ) else : keys [ 'lang' ] = babel_locale . get_language_name ( ) keys [ 'territory' ] = babel_locale . get_territory_name ( ) keys [ 'script' ] = babel_locale . get_script_name ( ) if self . showLanguage ( ) : opts = '' if self . showScriptName ( ) and keys [ 'script' ] : opts += keys [ 'script' ] if self . showTerritory ( ) and keys [ 'territory' ] : if opts : opts += ', ' opts += keys [ 'territory' ] if opts : opts = ' (' + opts + ')' label = keys [ 'lang' ] + opts elif self . showTerritory ( ) : label = keys [ 'territory' ] else : label = code self . addItem ( label ) self . setItemData ( i , wrapVariant ( str ( code ) ) ) name = babel_locale . territory . lower ( ) ico = 'img/flags/{0}.png' . format ( name ) flag = QtGui . QIcon ( resources . find ( ico ) ) if flag . isNull ( ) : ico = 'img/flags/_United Nations.png' flag = QtGui . QIcon ( resources . find ( ico ) ) self . setItemIcon ( i , flag ) if code == self . baseLocale ( ) : index = i self . setCurrentIndex ( index ) self . setUpdatesEnabled ( True ) self . blockSignals ( False )
def setHint ( self , hint ) : """Sets the hint for this widget . : param hint | < str >"""
self . _hint = hint self . detailWidget ( ) . setHint ( hint )
def get_instruction ( self , idx , off = None ) : """Get a particular instruction by using ( default ) the index of the address if specified : param idx : index of the instruction ( the position in the list of the instruction ) : type idx : int : param off : address of the instruction : type off : int : rtype : an : class : ` Instruction ` object"""
if off != None : idx = self . off_to_pos ( off ) return [ i for i in self . get_instructions ( ) ] [ idx ]
def find_objects ( self , terms = None , type = None , chunksize = None , ** kwargs ) : """Find objects in Fedora . Find query should be generated via keyword args , based on the fields in Fedora documentation . By default , the query uses a contains ( ~ ) search for all search terms . Calls : meth : ` ApiFacade . findObjects ` . Results seem to return consistently in ascending PID order . Example usage - search for all objects where the owner contains ' jdoe ' : : repository . find _ objects ( ownerId = ' jdoe ' ) Supports all search operators provided by Fedora findObjects query ( exact , gt , gte , lt , lte , and contains ) . To specify the type of query for a particular search term , call find _ objects like this : : repository . find _ objects ( ownerId _ _ exact = ' lskywalker ' ) repository . find _ objects ( date _ _ gt = ' 20010302 ' ) : param type : type of objects to return ; defaults to : class : ` DigitalObject ` : param chunksize : number of objects to return at a time : rtype : generator for list of objects"""
type = type or self . default_object_type find_opts = { 'chunksize' : chunksize } search_operators = { 'exact' : '=' , 'gt' : '>' , 'gte' : '>=' , 'lt' : '<' , 'lte' : '<=' , 'contains' : '~' } if terms is not None : find_opts [ 'terms' ] = terms else : conditions = [ ] for field , value in six . iteritems ( kwargs ) : if '__' in field : field , filtr = field . split ( '__' ) if filtr not in search_operators : raise Exception ( "Unsupported search filter '%s'" % filtr ) op = search_operators [ filtr ] else : op = search_operators [ 'contains' ] # default search mode if field in self . search_fields_aliases : field = self . search_fields_aliases [ field ] if field not in self . search_fields : raise Exception ( "Error generating Fedora findObjects query: unknown search field '%s'" % field ) if ' ' in value : # if value contains whitespace , it must be delimited with single quotes value = "'%s'" % value conditions . append ( "%s%s%s" % ( field , op , value ) ) query = ' ' . join ( conditions ) find_opts [ 'query' ] = query r = self . api . findObjects ( ** find_opts ) chunk = parse_xml_object ( SearchResults , r . content , r . url ) while True : for result in chunk . results : yield type ( self . api , result . pid ) if chunk . session_token : r = self . api . findObjects ( session_token = chunk . session_token , ** find_opts ) chunk = parse_xml_object ( SearchResults , r . content , r . url ) else : break
def prox_line ( xy , step ) : """2D projection onto 2 lines"""
return np . concatenate ( ( prox_xline ( xy [ 0 ] , step ) , prox_yline ( xy [ 1 ] , step ) ) )
def rotate ( self ) : '''Move the first address to the last position .'''
item = self . _address_infos . pop ( 0 ) self . _address_infos . append ( item )
def rmq_ssl_is_enabled_on_unit ( self , sentry_unit , port = None ) : """Check a single juju rmq unit for ssl and port in the config file ."""
host = sentry_unit . info [ 'public-address' ] unit_name = sentry_unit . info [ 'unit_name' ] conf_file = '/etc/rabbitmq/rabbitmq.config' conf_contents = str ( self . file_contents_safe ( sentry_unit , conf_file , max_wait = 16 ) ) # Checks conf_ssl = 'ssl' in conf_contents conf_port = str ( port ) in conf_contents # Port explicitly checked in config if port and conf_port and conf_ssl : self . log . debug ( 'SSL is enabled @{}:{} ' '({})' . format ( host , port , unit_name ) ) return True elif port and not conf_port and conf_ssl : self . log . debug ( 'SSL is enabled @{} but not on port {} ' '({})' . format ( host , port , unit_name ) ) return False # Port not checked ( useful when checking that ssl is disabled ) elif not port and conf_ssl : self . log . debug ( 'SSL is enabled @{}:{} ' '({})' . format ( host , port , unit_name ) ) return True elif not conf_ssl : self . log . debug ( 'SSL not enabled @{}:{} ' '({})' . format ( host , port , unit_name ) ) return False else : msg = ( 'Unknown condition when checking SSL status @{}:{} ' '({})' . format ( host , port , unit_name ) ) amulet . raise_status ( amulet . FAIL , msg )
def lookup ( name , min_similarity_ratio = .75 ) : """Look up for a Stan function with similar functionality to a Python function ( or even an R function , see examples ) . If the function is not present on the lookup table , then attempts to find similar one and prints the results . This function requires package ` pandas ` . Parameters name : str Name of the function one wants to look for . min _ similarity _ ratio : float In case no exact match is found on the lookup table , the function will attempt to find similar names using ` difflib . SequenceMatcher . ratio ( ) ` , and then results with calculated ratio below ` min _ similarity _ ratio ` will be discarded . Examples # Look up for a Stan function similar to scipy . stats . skewnorm lookup ( " scipy . stats . skewnorm " ) # Look up for a Stan function similar to R dnorm lookup ( " R . dnorm " ) # Look up for a Stan function similar to numpy . hstack lookup ( " numpy . hstack " ) # List Stan log probability mass functions lookup ( " lpmfs " ) # List Stan log cumulative density functions lookup ( " lcdfs " ) Returns A pandas . core . frame . DataFrame if exact or at least one similar result is found , None otherwise ."""
if lookuptable is None : build ( ) if name not in lookuptable . keys ( ) : from difflib import SequenceMatcher from operator import itemgetter print ( "No match for " + name + " in the lookup table." ) lkt_keys = list ( lookuptable . keys ( ) ) mapfunction = lambda x : SequenceMatcher ( a = name , b = x ) . ratio ( ) similars = list ( map ( mapfunction , lkt_keys ) ) similars = zip ( range ( len ( similars ) ) , similars ) similars = list ( filter ( lambda x : x [ 1 ] >= min_similarity_ratio , similars ) ) similars = sorted ( similars , key = itemgetter ( 1 ) ) if ( len ( similars ) ) : print ( "But the following similar entries were found: " ) for i in range ( len ( similars ) ) : print ( lkt_keys [ similars [ i ] [ 0 ] ] + " ===> with similary " "ratio of " + str ( round ( similars [ i ] [ 1 ] , 3 ) ) + "" ) print ( "Will return results for entry" " " + lkt_keys [ similars [ i ] [ 0 ] ] + " " "(which is the most similar entry found)." ) return lookup ( lkt_keys [ similars [ i ] [ 0 ] ] ) else : print ( "And no similar entry found. You may try to decrease" "the min_similarity_ratio parameter." ) return entries = stanftable [ lookuptable [ name ] ] if not len ( entries ) : return "Found no equivalent Stan function available for " + name try : import pandas as pd except ImportError : raise ImportError ( 'Package pandas is require to use this ' 'function.' ) return pd . DataFrame ( entries )
def parse_arguments ( ) : """Collect command - line arguments . Let the caller run parse _ args ( ) , as sphinx - argparse requires a function that returns an instance of argparse . ArgumentParser"""
# Pull a few settings from the environment , should they exist base_dn = os . environ [ 'PUDL_BASE_DN' ] if 'PUDL_BASE_DN' in os . environ else 'OU=Departments,DC=example,DC=com' domain = os . environ [ 'PUDL_DOMAIN' ] . upper ( ) if 'PUDL_DOMAIN' in os . environ else 'EXAMPLE' page_size = os . environ [ 'PUDL_PAGE_SIZE' ] . upper ( ) if 'PUDL_PAGE_SIZE' in os . environ else 300 tls_no_verify = bool ( os . environ [ 'PUDL_TLS_NO_VERIFY' ] . lower ( ) . capitalize ( ) ) if 'PUDL_TLS_NO_VERIFY' in os . environ else False parser = argparse . ArgumentParser ( prog = 'pudl' , description = 'A script for interacting with Active ' + 'Directory, leveraging python-ldap' ) parser . add_argument ( '-V' , '--version' , action = 'version' , version = 'pudl v' + pudl_version , help = "Print the version number and exit" ) subparsers = parser . add_subparsers ( dest = 'subcommand' , help = 'Sub-command help' ) parser_common = subparsers . add_parser ( 'common' , add_help = False ) parser_common . add_argument ( '--user' , '-u' , action = 'store' , dest = 'user' , help = 'The ldap user (bind dn) to connect as. ' + 'The full DN will work, or often, just the CN may be ' + 'sufficient, such as "John Smith", or more commonly, ' + 'specify the domain and sAMAccountName. Defaults to ' + '{0}\\username. The domain ' . format ( domain ) + 'portion may be overridden with PUDL_DOMAIN' , default = '{0}\\{1}' . format ( domain , getpass . getuser ( ) ) ) parser_common . add_argument ( '--password' , '-p' , action = 'store' , dest = 'password' , help = "The connecting user's password" ) parser_common . add_argument ( '--host' , '-H' , action = 'store' , dest = 'host' , help = 'The AD/LDAP host, defaults to ldap' , default = 'ldap' ) parser_common . add_argument ( '--port' , '-P' , action = 'store' , dest = 'port' , help = 'The ldap port, defaults to 389. 389 is ' + 'is the standard port' , type = int , default = 389 ) parser_common . add_argument ( '--page-size' , '-s' , action = 'store' , dest = 'page_size' , help = 'The ldap results are paged, specify the ' + 'number of results per page, defaults to ' + '{0}. May be overridden with PUDL_PAGE_SIZE' . format ( page_size ) , type = int , default = page_size ) parser_common . add_argument ( '--base-dn' , '-b' , action = 'store' , dest = 'base_dn' , default = base_dn , help = "The Base DN to use, defaults to {0}. " . format ( base_dn ) + "May be overridden with PUDL_BASE_DN" ) parser_common . add_argument ( '--attribute' , '-a' , action = 'append' , dest = 'attributes' , metavar = 'ATTRIBUTE' , help = "Attributes to include in results objects. Note that " + "any nested objects return all attributes. Maybe be used " + "multiple times, and if not specified, all " + "attributes are included in top-level objects" ) parser_common . add_argument ( '--grep' , '-g' , action = 'append' , dest = 'grep' , help = 'Filter results to only those matching the specified ' + 'regular expression (compares against all attributes). ' + 'May be used multiple times' ) parser_common . add_argument ( '--attributes-only' , '-A' , action = 'store_true' , dest = 'attributes_only' , help = "Only display a list of attributes " + "that are present for the object type returned by the LDAP query" ) parser_common . add_argument ( '--output-format' , '-f' , action = 'store' , dest = 'output_format' , choices = [ 'json' , 'yaml' ] , default = 'json' , help = "Output format, defaults to json." ) parser_common . add_argument ( '--verbose' , '-v' , action = 'store_true' , dest = 'verbose' , help = 'Turn on verbose output' , default = False ) parser_common . add_argument ( '--debug' , '-d' , action = 'store_true' , dest = 'debug' , default = False , help = "Print out debugging information, very chatty" ) parser_common . add_argument ( '--tls-no-verify' , '-V' , action = 'store_true' , dest = 'tls_no_verify' , default = tls_no_verify , help = "Don't verify the authenticity " + "of the server's certificate, defaults to " + "{0} and may be overridden with " . format ( tls_no_verify ) + "PUDL_TLS_NO_VERIFY" ) parser_user = subparsers . add_parser ( 'user' , parents = [ parser_common ] , conflict_handler = 'resolve' , help = 'Pull user objects from AD' ) parser_user . add_argument ( nargs = "*" , dest = 'samaccountnames' , help = 'sAMAccountNames for any user objects that are to be ' + 'looked up. If unspecified, returns all users under the base ' + 'DN provided' ) parser_user . add_argument ( '--explicit-membership-only' , '-e' , action = 'store_true' , dest = 'explicit_membership_only' , default = False , help = "Only show membership for users that is explicit, " + "not taking into account group nesting. Defaults to False" ) parser_group = subparsers . add_parser ( 'group' , parents = [ parser_common ] , conflict_handler = 'resolve' , help = 'Pull group objects from AD' ) parser_group . add_argument ( nargs = "*" , dest = 'samaccountnames' , help = "sAMAccountNames for any group objects that are to be " + 'looked up. If unspecified, returns all groups under the base ' + 'DN provided. sAMAccountName may not be present in group ' + 'objects in modern AD schemas' ) parser_group . add_argument ( '--explicit-membership-only' , '-e' , action = 'store_true' , dest = 'explicit_membership_only' , default = False , help = "Only show membership for users that is explicit, " + "not taking into account group nesting. Defaults to False" ) parser_computer = subparsers . add_parser ( 'computer' , parents = [ parser_common ] , conflict_handler = 'resolve' , help = 'Pull computer objects from AD' ) parser_computer . add_argument ( nargs = "*" , dest = 'samaccountnames' , help = "sAMAccountNames for any computer objects that are to be " + 'looked up. If unspecified, returns all computers under ' + 'the base DN provided.' ) # sphinx is not add _ help = False aware . . . del subparsers . choices [ 'common' ] return parser
def resume ( self , container_id = None , sudo = None ) : '''resume a stopped OciImage container , if it exists Equivalent command line example : singularity oci resume < container _ ID > Parameters container _ id : the id to stop . sudo : Add sudo to the command . If the container was created by root , you need sudo to interact and get its state . Returns return _ code : the return code to indicate if the container was resumed .'''
return self . _state_command ( container_id , command = 'resume' , sudo = sudo )
def get_nn_info ( self , structure , n ) : """Get all near - neighbor information . Args : structure : ( Structure ) pymatgen Structure n : ( int ) index of target site Returns : siw ( list of dicts ) : each dictionary provides information about a single near neighbor , where key ' site ' gives access to the corresponding Site object , ' image ' gives the image location , and ' weight ' provides the weight that a given near - neighbor site contributes to the coordination number ( 1 or smaller ) , ' site _ index ' gives index of the corresponding site in the original structure ."""
nndata = self . get_nn_data ( structure , n ) if not self . weighted_cn : max_key = max ( nndata . cn_weights , key = lambda k : nndata . cn_weights [ k ] ) nn = nndata . cn_nninfo [ max_key ] for entry in nn : entry [ "weight" ] = 1 return nn else : for entry in nndata . all_nninfo : weight = 0 for cn in nndata . cn_nninfo : for cn_entry in nndata . cn_nninfo [ cn ] : if entry [ "site" ] == cn_entry [ "site" ] : weight += nndata . cn_weights [ cn ] entry [ "weight" ] = weight return nndata . all_nninfo
def _attachment_uri ( self , attachid ) : """Returns the URI for the given attachment ID ."""
att_uri = self . url . replace ( 'xmlrpc.cgi' , 'attachment.cgi' ) att_uri = att_uri + '?id=%s' % attachid return att_uri
def calc_one_vert_gauss ( one_vert , xyz = None , std = None ) : """Calculate how many electrodes influence one vertex , using a Gaussian function . Parameters one _ vert : ndarray vector of xyz position of a vertex xyz : ndarray nChan X 3 with the position of all the channels std : float distance in mm of the Gaussian kernel Returns ndarray one vector with values for one vertex"""
trans = empty ( xyz . shape [ 0 ] ) for i , one_xyz in enumerate ( xyz ) : trans [ i ] = gauss ( norm ( one_vert - one_xyz ) , std ) return trans
def setup_timezone ( timezone : str ) -> None : """Shortcut helper to configure timezone for backend application . : param timezone : Timezone to use , e . g . " UTC " , " Europe / Kiev " ."""
if timezone and hasattr ( time , 'tzset' ) : tz_root = '/usr/share/zoneinfo' tz_filename = os . path . join ( tz_root , * ( timezone . split ( '/' ) ) ) if os . path . exists ( tz_root ) and not os . path . exists ( tz_filename ) : raise ValueError ( 'Incorrect timezone value: {0}' . format ( timezone ) ) os . environ [ 'TZ' ] = timezone time . tzset ( )
def Collect ( self , knowledge_base , artifact_definition , searcher ) : """Collects values using a Windows Registry value artifact definition . Args : knowledge _ base ( KnowledgeBase ) : to fill with preprocessing information . artifact _ definition ( artifacts . ArtifactDefinition ) : artifact definition . searcher ( dfwinreg . WinRegistrySearcher ) : Windows Registry searcher to preprocess the Windows Registry . Raises : PreProcessFail : if the Windows Registry key or value cannot be read ."""
for source in artifact_definition . sources : if source . type_indicator not in ( artifact_definitions . TYPE_INDICATOR_WINDOWS_REGISTRY_KEY , artifact_definitions . TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE ) : continue if source . type_indicator == ( artifact_definitions . TYPE_INDICATOR_WINDOWS_REGISTRY_KEY ) : key_value_pairs = [ { 'key' : key } for key in source . keys ] else : key_value_pairs = source . key_value_pairs for key_value_pair in key_value_pairs : key_path = key_value_pair [ 'key' ] # The artifact definitions currently incorrectly define # CurrentControlSet so we correct it here for now . # Also see : https : / / github . com / ForensicArtifacts / artifacts / issues / 120 key_path_upper = key_path . upper ( ) if key_path_upper . startswith ( '%%CURRENT_CONTROL_SET%%' ) : key_path = '{0:s}{1:s}' . format ( 'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet' , key_path [ 23 : ] ) find_spec = registry_searcher . FindSpec ( key_path_glob = key_path ) for key_path in searcher . Find ( find_specs = [ find_spec ] ) : try : registry_key = searcher . GetKeyByPath ( key_path ) except IOError as exception : raise errors . PreProcessFail ( ( 'Unable to retrieve Windows Registry key: {0:s} with error: ' '{1!s}' ) . format ( key_path , exception ) ) if registry_key : value_name = key_value_pair . get ( 'value' , None ) self . _ParseKey ( knowledge_base , registry_key , value_name )
def _execute ( self , parts , expectation = None , format_callback = None ) : """Really execute a redis command : param list parts : The list of command parts : param mixed expectation : Optional response expectation : rtype : : class : ` ~ tornado . concurrent . Future ` : raises : : exc : ` ~ tredis . exceptions . SubscribedError `"""
future = concurrent . TracebackFuture ( ) try : command = self . _build_command ( parts ) except ValueError as error : future . set_exception ( error ) return future def on_locked ( _ ) : if self . ready : if self . _clustering : cmd = Command ( command , self . _pick_cluster_host ( parts ) , expectation , format_callback ) else : LOGGER . debug ( 'Connection: %r' , self . _connection ) cmd = Command ( command , self . _connection , expectation , format_callback ) LOGGER . debug ( '_execute(%r, %r, %r) on %s' , cmd . command , expectation , format_callback , cmd . connection . name ) cmd . connection . execute ( cmd , future ) else : LOGGER . critical ( 'Lock released & not ready, aborting command' ) # Wait until the cluster is ready , letting cluster discovery through if not self . ready and not self . _connected . is_set ( ) : self . io_loop . add_future ( self . _connected . wait ( ) , lambda f : self . io_loop . add_future ( self . _busy . acquire ( ) , on_locked ) ) else : self . io_loop . add_future ( self . _busy . acquire ( ) , on_locked ) # Release the lock when the future is complete self . io_loop . add_future ( future , lambda r : self . _busy . release ( ) ) return future
def getopenfilenames ( parent = None , caption = '' , basedir = '' , filters = '' , selectedfilter = '' , options = None ) : """Wrapper around QtGui . QFileDialog . getOpenFileNames static method Returns a tuple ( filenames , selectedfilter ) - - when dialog box is canceled , returns a tuple ( empty list , empty string ) Compatible with PyQt > = v4.4 ( API # 1 and # 2 ) and PySide > = v1.0"""
return _qfiledialog_wrapper ( 'getOpenFileNames' , parent = parent , caption = caption , basedir = basedir , filters = filters , selectedfilter = selectedfilter , options = options )
def find_cross_contamination ( databases , pair , tmpdir = 'tmp' , log = 'log.txt' , threads = 1 ) : """Usese mash to find out whether or not a sample has more than one genus present , indicating cross - contamination . : param databases : A databases folder , which must contain refseq . msh , a mash sketch that has one representative per genus from refseq . : param tmpdir : Temporary directory to store mash result files in . : param pair : Array with path to forward reads at index 0 and path to reverse reads at index o : param log : Logfile to write to . : param threads : Number of threads to run mash wit . : return : cross _ contam : a bool that is True if more than one genus is found , and False otherwise . : return : genera _ present : A string . If only one genus is found , string is just genus . If more than one genus is found , the string is a list of genera present , separated by colons ( i . e . for Escherichia and Salmonella found , string would be ' Escherichia : Salmonella ' . If no genus found , return ' NA '"""
genera_present = list ( ) out , err , cmd = mash . screen ( '{}/refseq.msh' . format ( databases ) , pair [ 0 ] , pair [ 1 ] , threads = threads , w = '' , i = '0.95' , output_file = os . path . join ( tmpdir , 'screen.tab' ) , returncmd = True ) write_to_logfile ( log , out , err , cmd ) screen_output = mash . read_mash_screen ( os . path . join ( tmpdir , 'screen.tab' ) ) for item in screen_output : mash_genus = item . query_id . split ( '/' ) [ - 3 ] if mash_genus == 'Shigella' : mash_genus = 'Escherichia' if mash_genus not in genera_present : genera_present . append ( mash_genus ) if len ( genera_present ) == 1 : genera_present = genera_present [ 0 ] elif len ( genera_present ) == 0 : genera_present = 'NA' else : tmpstr = '' for mash_genus in genera_present : tmpstr += mash_genus + ':' genera_present = tmpstr [ : - 1 ] return genera_present
def OnBorderChoice ( self , event ) : """Change the borders that are affected by color and width changes"""
choicelist = event . GetEventObject ( ) . GetItems ( ) self . borderstate = choicelist [ event . GetInt ( ) ]
def _depth_limited_walk ( top , max_depth = None ) : '''Walk the directory tree under root up till reaching max _ depth . With max _ depth = None ( default ) , do not limit depth .'''
for root , dirs , files in salt . utils . path . os_walk ( top ) : if max_depth is not None : rel_depth = root . count ( os . path . sep ) - top . count ( os . path . sep ) if rel_depth >= max_depth : del dirs [ : ] yield ( six . text_type ( root ) , list ( dirs ) , list ( files ) )
def load_delimited ( filename , converters , delimiter = r'\s+' ) : r"""Utility function for loading in data from an annotation file where columns are delimited . The number of columns is inferred from the length of the provided converters list . Examples > > > # Load in a one - column list of event times ( floats ) > > > load _ delimited ( ' events . txt ' , [ float ] ) > > > # Load in a list of labeled events , separated by commas > > > load _ delimited ( ' labeled _ events . csv ' , [ float , str ] , ' , ' ) Parameters filename : str Path to the annotation file converters : list of functions Each entry in column ` ` n ` ` of the file will be cast by the function ` ` converters [ n ] ` ` . delimiter : str Separator regular expression . By default , lines will be split by any amount of whitespace . Returns columns : tuple of lists Each list in this tuple corresponds to values in one of the columns in the file ."""
# Initialize list of empty lists n_columns = len ( converters ) columns = tuple ( list ( ) for _ in range ( n_columns ) ) # Create re object for splitting lines splitter = re . compile ( delimiter ) # Note : we do io manually here for two reasons . # 1 . The csv module has difficulties with unicode , which may lead # to failures on certain annotation strings # 2 . numpy ' s text loader does not handle non - numeric data with _open ( filename , mode = 'r' ) as input_file : for row , line in enumerate ( input_file , 1 ) : # Split each line using the supplied delimiter data = splitter . split ( line . strip ( ) , n_columns - 1 ) # Throw a helpful error if we got an unexpected # of columns if n_columns != len ( data ) : raise ValueError ( 'Expected {} columns, got {} at ' '{}:{:d}:\n\t{}' . format ( n_columns , len ( data ) , filename , row , line ) ) for value , column , converter in zip ( data , columns , converters ) : # Try converting the value , throw a helpful error on failure try : converted_value = converter ( value ) except : raise ValueError ( "Couldn't convert value {} using {} " "found at {}:{:d}:\n\t{}" . format ( value , converter . __name__ , filename , row , line ) ) column . append ( converted_value ) # Sane output if n_columns == 1 : return columns [ 0 ] else : return columns
def local_port_range ( self ) : """Tuple of ( low _ port , high _ port ) reflecting the local port range assigned to outbound connections . We use this as part of a heuristic to determine whether a connection is inbound or outbound ."""
if self . _local_port_range is None : with open ( '/proc/sys/net/ipv4/ip_local_port_range' , 'r' ) as f : self . _local_port_range = tuple ( map ( int , f . read ( ) . split ( '\t' ) ) ) return self . _local_port_range
def load_data ( handle , reader = None ) : '''Unpack data into a raw data wrapper'''
if not reader : reader = os . path . splitext ( handle ) [ 1 ] [ 1 : ] . lower ( ) if reader not in _READERS : raise NeuroMError ( 'Do not have a loader for "%s" extension' % reader ) filename = _get_file ( handle ) try : return _READERS [ reader ] ( filename ) except Exception as e : L . exception ( 'Error reading file %s, using "%s" loader' , filename , reader ) raise RawDataError ( 'Error reading file %s:\n%s' % ( filename , str ( e ) ) )
def dump ( node ) : """Dump initialized object structure to yaml"""
from qubell . api . private . platform import Auth , QubellPlatform from qubell . api . private . organization import Organization from qubell . api . private . application import Application from qubell . api . private . instance import Instance from qubell . api . private . revision import Revision from qubell . api . private . environment import Environment from qubell . api . private . zone import Zone from qubell . api . private . manifest import Manifest # Exclude keys from dump # Format : { ' ClassName ' : [ ' fields ' , ' to ' , ' exclude ' ] } exclusion_list = { Auth : [ 'cookies' ] , QubellPlatform : [ 'auth' , ] , Organization : [ 'auth' , 'organizationId' , 'zone' ] , Application : [ 'auth' , 'applicationId' , 'organization' ] , Instance : [ 'auth' , 'instanceId' , 'application' ] , Manifest : [ 'name' , 'content' ] , Revision : [ 'auth' , 'revisionId' ] , Environment : [ 'auth' , 'environmentId' , 'organization' ] , Zone : [ 'auth' , 'zoneId' , 'organization' ] , } def obj_presenter ( dumper , obj ) : for x in exclusion_list . keys ( ) : if isinstance ( obj , x ) : # Find class fields = obj . __dict__ . copy ( ) for excl_item in exclusion_list [ x ] : try : fields . pop ( excl_item ) except : log . warn ( 'No item %s in object %s' % ( excl_item , x ) ) return dumper . represent_mapping ( 'tag:yaml.org,2002:map' , fields ) return dumper . represent_mapping ( 'tag:yaml.org,2002:map' , obj . __dict__ ) noalias_dumper = yaml . dumper . Dumper noalias_dumper . ignore_aliases = lambda self , data : True yaml . add_representer ( unicode , lambda dumper , value : dumper . represent_scalar ( u'tag:yaml.org,2002:str' , value ) ) yaml . add_multi_representer ( object , obj_presenter ) serialized = yaml . dump ( node , default_flow_style = False , Dumper = noalias_dumper ) return serialized
def _addLink ( self , dirTree , dirID , dirSeq , dirPath , name ) : """Add tree reference and name . ( Hardlink ) ."""
logger . debug ( "Link %d-%d-%d '%s%s'" , dirTree , dirID , dirSeq , dirPath , name ) # assert dirTree ! = 0 , ( dirTree , dirID , dirSeq , dirPath , name ) assert ( dirTree , dirID , dirSeq ) not in self . links , ( dirTree , dirID , dirSeq ) self . links [ ( dirTree , dirID , dirSeq ) ] = ( dirPath , name ) assert len ( self . links ) == 1 , self . links # Cannot have multiple hardlinks to a directory logger . debug ( "%s" , self )
def show_sbridges ( self ) : """Visualize salt bridges ."""
for i , saltb in enumerate ( self . plcomplex . saltbridges ) : if saltb . protispos : for patom in saltb . positive_atoms : cmd . select ( 'PosCharge-P' , 'PosCharge-P or (id %i & %s)' % ( patom , self . protname ) ) for latom in saltb . negative_atoms : cmd . select ( 'NegCharge-L' , 'NegCharge-L or (id %i & %s)' % ( latom , self . ligname ) ) for sbgroup in [ [ 'ps-sbl-1-%i' % i , 'Chargecenter-P' , saltb . positive_center ] , [ 'ps-sbl-2-%i' % i , 'Chargecenter-L' , saltb . negative_center ] ] : cmd . pseudoatom ( sbgroup [ 0 ] , pos = sbgroup [ 2 ] ) cmd . pseudoatom ( sbgroup [ 1 ] , pos = sbgroup [ 2 ] ) cmd . distance ( 'Saltbridges' , 'ps-sbl-1-%i' % i , 'ps-sbl-2-%i' % i ) else : for patom in saltb . negative_atoms : cmd . select ( 'NegCharge-P' , 'NegCharge-P or (id %i & %s)' % ( patom , self . protname ) ) for latom in saltb . positive_atoms : cmd . select ( 'PosCharge-L' , 'PosCharge-L or (id %i & %s)' % ( latom , self . ligname ) ) for sbgroup in [ [ 'ps-sbp-1-%i' % i , 'Chargecenter-P' , saltb . negative_center ] , [ 'ps-sbp-2-%i' % i , 'Chargecenter-L' , saltb . positive_center ] ] : cmd . pseudoatom ( sbgroup [ 0 ] , pos = sbgroup [ 2 ] ) cmd . pseudoatom ( sbgroup [ 1 ] , pos = sbgroup [ 2 ] ) cmd . distance ( 'Saltbridges' , 'ps-sbp-1-%i' % i , 'ps-sbp-2-%i' % i ) if self . object_exists ( 'Saltbridges' ) : cmd . set ( 'dash_color' , 'yellow' , 'Saltbridges' ) cmd . set ( 'dash_gap' , 0.5 , 'Saltbridges' )
def status_log ( func , message , * args , ** kwargs ) : """Emits header message , executes a callable , and echoes the return strings ."""
click . echo ( message ) log = func ( * args , ** kwargs ) if log : out = [ ] for line in log . split ( '\n' ) : if not line . startswith ( '#' ) : out . append ( line ) click . echo ( black ( '\n' . join ( out ) ) )
def setup_local_geometry ( self , isite , coords , optimization = None ) : """Sets up the AbstractGeometry for the local geometry of site with index isite . : param isite : Index of the site for which the local geometry has to be set up : param coords : The coordinates of the ( local ) neighbors"""
self . local_geometry = AbstractGeometry ( central_site = self . structure . cart_coords [ isite ] , bare_coords = coords , centering_type = self . centering_type , include_central_site_in_centroid = self . include_central_site_in_centroid , optimization = optimization )
def epsilon_crit ( self ) : """returns the critical projected mass density in units of M _ sun / Mpc ^ 2 ( physical units ) : return : critical projected mass density"""
if not hasattr ( self , '_Epsilon_Crit' ) : const_SI = const . c ** 2 / ( 4 * np . pi * const . G ) # c ^ 2 / ( 4 * pi * G ) in units of [ kg / m ] conversion = const . Mpc / const . M_sun # converts [ kg / m ] to [ M _ sun / Mpc ] factor = const_SI * conversion # c ^ 2 / ( 4 * pi * G ) in units of [ M _ sun / Mpc ] self . _Epsilon_Crit = self . D_s / ( self . D_d * self . D_ds ) * factor # [ M _ sun / Mpc ^ 2] return self . _Epsilon_Crit
def _binary_insert ( lst , elem , key , lo = 0 , hi = None ) : """Insert an element into a sorted list , and keep the list sorted . The major difference from bisect . bisect _ left is that this function supports a key method , so user doesn ' t have to create the key array for each insertion . : param list lst : The list . Must be pre - ordered . : param object element : An element to insert into the list . : param func key : A method to get the key for each element in the list . : param int lo : Lower bound of the search . : param int hi : Upper bound of the search . : return : None"""
if lo < 0 : raise ValueError ( "lo must be a non-negative number" ) if hi is None : hi = len ( lst ) while lo < hi : mid = ( lo + hi ) // 2 if key ( lst [ mid ] ) < key ( elem ) : lo = mid + 1 else : hi = mid lst . insert ( lo , elem )
def size ( self ) : """Bytes uploaded of the file so far . Note that we only have the file size if the file was requested directly , not if it ' s part of a folder listing ."""
if hasattr ( self . f . latestRevision , 'size' ) : return int ( self . f . latestRevision . size ) return None
def get_version ( ) : """Reads the version ( MAJOR . MINOR ) from this module ."""
release = get_release ( ) split_version = release . split ( "." ) if len ( split_version ) == 3 : return "." . join ( split_version [ : 2 ] ) return release
def set_split_extents_by_split_size ( self ) : """Sets split shape : attr : ` split _ shape ` and split extents ( : attr : ` split _ begs ` and : attr : ` split _ ends ` ) from values in : attr : ` split _ size ` and : attr : ` split _ num _ slices _ per _ axis ` ."""
if self . split_size is None : if ( _np . all ( [ s is not None for s in self . split_num_slices_per_axis ] ) and _np . all ( [ s > 0 for s in self . split_num_slices_per_axis ] ) ) : self . split_size = _np . product ( self . split_num_slices_per_axis ) else : raise ValueError ( ( "Got invalid self.split_num_slices_per_axis=%s, all elements " + "need to be integers greater than zero when self.split_size is None." ) % self . split_num_slices_per_axis ) self . logger . debug ( "Pre cannonicalise: self.split_num_slices_per_axis=%s" , self . split_num_slices_per_axis ) self . split_num_slices_per_axis = calculate_num_slices_per_axis ( self . split_num_slices_per_axis , self . split_size , self . array_shape ) self . logger . debug ( "Post cannonicalise: self.split_num_slices_per_axis=%s" , self . split_num_slices_per_axis ) # Define the start and stop indices ( extents ) for each axis slice self . split_shape = self . split_num_slices_per_axis . copy ( ) self . split_begs = [ [ ] , ] * len ( self . array_shape ) self . split_ends = [ [ ] , ] * len ( self . array_shape ) for i in range ( len ( self . array_shape ) ) : self . split_begs [ i ] , self . split_ends [ i ] = self . calculate_axis_split_extents ( self . split_shape [ i ] , self . array_shape [ i ] )
def iter_packages ( self ) : """Iterate over the packages within this family , in no particular order . Returns : ` Package ` iterator ."""
for package in self . repository . iter_packages ( self . resource ) : yield Package ( package )
def set_include_entities ( self , include ) : """Sets ' include entities ' parameter to either include or exclude the entities node within the results : param include : Boolean to trigger the ' include entities ' parameter : raises : TwitterSearchException"""
if not isinstance ( include , bool ) : raise TwitterSearchException ( 1008 ) self . arguments . update ( { 'include_entities' : 'true' if include else 'false' } )
def parse ( readDataInstance ) : """Returns a new L { NetMetaDataHeader } object . @ type readDataInstance : L { ReadData } @ param readDataInstance : A L { ReadData } object with data to be parsed as a L { NetMetaDataHeader } object . @ rtype : L { NetMetaDataHeader } @ return : A new L { NetMetaDataHeader } object ."""
nmh = NetMetaDataHeader ( ) nmh . signature . value = readDataInstance . readDword ( ) nmh . majorVersion . value = readDataInstance . readWord ( ) nmh . minorVersion . value = readDataInstance . readWord ( ) nmh . reserved . value = readDataInstance . readDword ( ) nmh . versionLength . value = readDataInstance . readDword ( ) nmh . versionString . value = readDataInstance . readAlignedString ( ) nmh . flags . value = readDataInstance . readWord ( ) nmh . numberOfStreams . value = readDataInstance . readWord ( ) return nmh
def record_rename ( object_id , input_params = { } , always_retry = True , ** kwargs ) : """Invokes the / record - xxxx / rename API method . For more info , see : https : / / wiki . dnanexus . com / API - Specification - v1.0.0 / Name # API - method % 3A - % 2Fclass - xxxx % 2Frename"""
return DXHTTPRequest ( '/%s/rename' % object_id , input_params , always_retry = always_retry , ** kwargs )
def set_dependent_orders ( self , accountID , tradeSpecifier , ** kwargs ) : """Create , replace and cancel a Trade ' s dependent Orders ( Take Profit , Stop Loss and Trailing Stop Loss ) through the Trade itself Args : accountID : Account Identifier tradeSpecifier : Specifier for the Trade takeProfit : The specification of the Take Profit to create / modify / cancel . If takeProfit is set to null , the Take Profit Order will be cancelled if it exists . If takeProfit is not provided , the exisiting Take Profit Order will not be modified . If a sub - field of takeProfit is not specified , that field will be set to a default value on create , and be inherited by the replacing order on modify . stopLoss : The specification of the Stop Loss to create / modify / cancel . If stopLoss is set to null , the Stop Loss Order will be cancelled if it exists . If stopLoss is not provided , the exisiting Stop Loss Order will not be modified . If a sub - field of stopLoss is not specified , that field will be set to a default value on create , and be inherited by the replacing order on modify . trailingStopLoss : The specification of the Trailing Stop Loss to create / modify / cancel . If trailingStopLoss is set to null , the Trailing Stop Loss Order will be cancelled if it exists . If trailingStopLoss is not provided , the exisiting Trailing Stop Loss Order will not be modified . If a sub - field of trailngStopLoss is not specified , that field will be set to a default value on create , and be inherited by the replacing order on modify . Returns : v20 . response . Response containing the results from submitting the request"""
request = Request ( 'PUT' , '/v3/accounts/{accountID}/trades/{tradeSpecifier}/orders' ) request . set_path_param ( 'accountID' , accountID ) request . set_path_param ( 'tradeSpecifier' , tradeSpecifier ) body = EntityDict ( ) if 'takeProfit' in kwargs : body . set ( 'takeProfit' , kwargs [ 'takeProfit' ] ) if 'stopLoss' in kwargs : body . set ( 'stopLoss' , kwargs [ 'stopLoss' ] ) if 'trailingStopLoss' in kwargs : body . set ( 'trailingStopLoss' , kwargs [ 'trailingStopLoss' ] ) request . set_body_dict ( body . dict ) response = self . ctx . request ( request ) if response . content_type is None : return response if not response . content_type . startswith ( "application/json" ) : return response jbody = json . loads ( response . raw_body ) parsed_body = { } # Parse responses as defined by the API specification if str ( response . status ) == "200" : if jbody . get ( 'takeProfitOrderCancelTransaction' ) is not None : parsed_body [ 'takeProfitOrderCancelTransaction' ] = self . ctx . transaction . OrderCancelTransaction . from_dict ( jbody [ 'takeProfitOrderCancelTransaction' ] , self . ctx ) if jbody . get ( 'takeProfitOrderTransaction' ) is not None : parsed_body [ 'takeProfitOrderTransaction' ] = self . ctx . transaction . TakeProfitOrderTransaction . from_dict ( jbody [ 'takeProfitOrderTransaction' ] , self . ctx ) if jbody . get ( 'takeProfitOrderFillTransaction' ) is not None : parsed_body [ 'takeProfitOrderFillTransaction' ] = self . ctx . transaction . OrderFillTransaction . from_dict ( jbody [ 'takeProfitOrderFillTransaction' ] , self . ctx ) if jbody . get ( 'takeProfitOrderCreatedCancelTransaction' ) is not None : parsed_body [ 'takeProfitOrderCreatedCancelTransaction' ] = self . ctx . transaction . OrderCancelTransaction . from_dict ( jbody [ 'takeProfitOrderCreatedCancelTransaction' ] , self . ctx ) if jbody . get ( 'stopLossOrderCancelTransaction' ) is not None : parsed_body [ 'stopLossOrderCancelTransaction' ] = self . ctx . transaction . OrderCancelTransaction . from_dict ( jbody [ 'stopLossOrderCancelTransaction' ] , self . ctx ) if jbody . get ( 'stopLossOrderTransaction' ) is not None : parsed_body [ 'stopLossOrderTransaction' ] = self . ctx . transaction . StopLossOrderTransaction . from_dict ( jbody [ 'stopLossOrderTransaction' ] , self . ctx ) if jbody . get ( 'stopLossOrderFillTransaction' ) is not None : parsed_body [ 'stopLossOrderFillTransaction' ] = self . ctx . transaction . OrderFillTransaction . from_dict ( jbody [ 'stopLossOrderFillTransaction' ] , self . ctx ) if jbody . get ( 'stopLossOrderCreatedCancelTransaction' ) is not None : parsed_body [ 'stopLossOrderCreatedCancelTransaction' ] = self . ctx . transaction . OrderCancelTransaction . from_dict ( jbody [ 'stopLossOrderCreatedCancelTransaction' ] , self . ctx ) if jbody . get ( 'trailingStopLossOrderCancelTransaction' ) is not None : parsed_body [ 'trailingStopLossOrderCancelTransaction' ] = self . ctx . transaction . OrderCancelTransaction . from_dict ( jbody [ 'trailingStopLossOrderCancelTransaction' ] , self . ctx ) if jbody . get ( 'trailingStopLossOrderTransaction' ) is not None : parsed_body [ 'trailingStopLossOrderTransaction' ] = self . ctx . transaction . TrailingStopLossOrderTransaction . from_dict ( jbody [ 'trailingStopLossOrderTransaction' ] , self . ctx ) if jbody . get ( 'relatedTransactionIDs' ) is not None : parsed_body [ 'relatedTransactionIDs' ] = jbody . get ( 'relatedTransactionIDs' ) if jbody . get ( 'lastTransactionID' ) is not None : parsed_body [ 'lastTransactionID' ] = jbody . get ( 'lastTransactionID' ) elif str ( response . status ) == "400" : if jbody . get ( 'takeProfitOrderCancelRejectTransaction' ) is not None : parsed_body [ 'takeProfitOrderCancelRejectTransaction' ] = self . ctx . transaction . OrderCancelRejectTransaction . from_dict ( jbody [ 'takeProfitOrderCancelRejectTransaction' ] , self . ctx ) if jbody . get ( 'takeProfitOrderRejectTransaction' ) is not None : parsed_body [ 'takeProfitOrderRejectTransaction' ] = self . ctx . transaction . TakeProfitOrderRejectTransaction . from_dict ( jbody [ 'takeProfitOrderRejectTransaction' ] , self . ctx ) if jbody . get ( 'stopLossOrderCancelRejectTransaction' ) is not None : parsed_body [ 'stopLossOrderCancelRejectTransaction' ] = self . ctx . transaction . OrderCancelRejectTransaction . from_dict ( jbody [ 'stopLossOrderCancelRejectTransaction' ] , self . ctx ) if jbody . get ( 'stopLossOrderRejectTransaction' ) is not None : parsed_body [ 'stopLossOrderRejectTransaction' ] = self . ctx . transaction . StopLossOrderRejectTransaction . from_dict ( jbody [ 'stopLossOrderRejectTransaction' ] , self . ctx ) if jbody . get ( 'trailingStopLossOrderCancelRejectTransaction' ) is not None : parsed_body [ 'trailingStopLossOrderCancelRejectTransaction' ] = self . ctx . transaction . OrderCancelRejectTransaction . from_dict ( jbody [ 'trailingStopLossOrderCancelRejectTransaction' ] , self . ctx ) if jbody . get ( 'trailingStopLossOrderRejectTransaction' ) is not None : parsed_body [ 'trailingStopLossOrderRejectTransaction' ] = self . ctx . transaction . TrailingStopLossOrderRejectTransaction . from_dict ( jbody [ 'trailingStopLossOrderRejectTransaction' ] , self . ctx ) if jbody . get ( 'lastTransactionID' ) is not None : parsed_body [ 'lastTransactionID' ] = jbody . get ( 'lastTransactionID' ) if jbody . get ( 'relatedTransactionIDs' ) is not None : parsed_body [ 'relatedTransactionIDs' ] = jbody . get ( 'relatedTransactionIDs' ) if jbody . get ( 'errorCode' ) is not None : parsed_body [ 'errorCode' ] = jbody . get ( 'errorCode' ) if jbody . get ( 'errorMessage' ) is not None : parsed_body [ 'errorMessage' ] = jbody . get ( 'errorMessage' ) elif str ( response . status ) == "401" : if jbody . get ( 'errorCode' ) is not None : parsed_body [ 'errorCode' ] = jbody . get ( 'errorCode' ) if jbody . get ( 'errorMessage' ) is not None : parsed_body [ 'errorMessage' ] = jbody . get ( 'errorMessage' ) elif str ( response . status ) == "404" : if jbody . get ( 'errorCode' ) is not None : parsed_body [ 'errorCode' ] = jbody . get ( 'errorCode' ) if jbody . get ( 'errorMessage' ) is not None : parsed_body [ 'errorMessage' ] = jbody . get ( 'errorMessage' ) elif str ( response . status ) == "405" : if jbody . get ( 'errorCode' ) is not None : parsed_body [ 'errorCode' ] = jbody . get ( 'errorCode' ) if jbody . get ( 'errorMessage' ) is not None : parsed_body [ 'errorMessage' ] = jbody . get ( 'errorMessage' ) # Unexpected response status else : parsed_body = jbody response . body = parsed_body return response
def initialize ( cls ) -> None : """Initializes the ` ` SIGCHLD ` ` handler . The signal handler is run on an ` . IOLoop ` to avoid locking issues . Note that the ` . IOLoop ` used for signal handling need not be the same one used by individual Subprocess objects ( as long as the ` ` IOLoops ` ` are each running in separate threads ) . . . versionchanged : : 5.0 The ` ` io _ loop ` ` argument ( deprecated since version 4.1 ) has been removed . Availability : Unix"""
if cls . _initialized : return io_loop = ioloop . IOLoop . current ( ) cls . _old_sigchld = signal . signal ( signal . SIGCHLD , lambda sig , frame : io_loop . add_callback_from_signal ( cls . _cleanup ) , ) cls . _initialized = True
def _handle_tag_salt_error ( self , tag , data ) : '''Handle a _ salt _ error event'''
if self . connected : log . debug ( 'Forwarding salt error event tag=%s' , tag ) self . _fire_master ( data , tag )
def new ( self ) : # type : ( ) - > None '''A method to create a new UDF Primary Volume Descriptor . Parameters : None . Returns : Nothing .'''
if self . _initialized : raise pycdlibexception . PyCdlibInternalError ( 'UDF Primary Volume Descriptor already initialized' ) self . desc_tag = UDFTag ( ) self . desc_tag . new ( 1 ) # FIXME : we should let the user set serial _ number self . vol_desc_seqnum = 0 # FIXME : we should let the user set this self . desc_num = 0 # FIXME : we should let the user set this self . vol_ident = _ostaunicode_zero_pad ( 'CDROM' , 32 ) # According to UDF 2.60 , 2.2.2.5 , the VolumeSetIdentifier should have # at least the first 16 characters be a unique value . Further , the # first 8 bytes of that should be a time value in ASCII hexadecimal # representation . To make it truly unique , we use that time plus a # random value , all ASCII encoded . unique = format ( int ( time . time ( ) ) , '08x' ) + format ( random . getrandbits ( 26 ) , '08x' ) self . vol_set_ident = _ostaunicode_zero_pad ( unique , 128 ) self . desc_char_set = _unicodecharset ( ) self . explanatory_char_set = _unicodecharset ( ) self . vol_abstract_length = 0 # FIXME : we should let the user set this self . vol_abstract_extent = 0 # FIXME : we should let the user set this self . vol_copyright_length = 0 # FIXME : we should let the user set this self . vol_copyright_extent = 0 # FIXME : we should let the user set this self . app_ident = UDFEntityID ( ) self . app_ident . new ( ) self . recording_date = UDFTimestamp ( ) self . recording_date . new ( ) self . impl_ident = UDFEntityID ( ) self . impl_ident . new ( 0 , b'*pycdlib' ) self . implementation_use = b'\x00' * 64 # FIXME : we should let the user set this self . predecessor_vol_desc_location = 0 # FIXME : we should let the user set this self . max_interchange_level = 2 self . _initialized = True
def fit_angle ( fit1 , fit2 , degrees = True ) : """Finds the angle between the nominal vectors"""
return N . degrees ( angle ( fit1 . normal , fit2 . normal ) )
def obtain_all_bond_lengths ( sp1 , sp2 , default_bl = None ) : """Obtain bond lengths for all bond orders from bond length database Args : sp1 ( Specie ) : First specie . sp2 ( Specie ) : Second specie . default _ bl : If a particular type of bond does not exist , use this bond length as a default value ( bond order = 1 ) . If None , a ValueError will be thrown . Return : A dict mapping bond order to bond length in angstrom"""
if isinstance ( sp1 , Element ) : sp1 = sp1 . symbol if isinstance ( sp2 , Element ) : sp2 = sp2 . symbol syms = tuple ( sorted ( [ sp1 , sp2 ] ) ) if syms in bond_lengths : return bond_lengths [ syms ] . copy ( ) elif default_bl is not None : return { 1 : default_bl } else : raise ValueError ( "No bond data for elements {} - {}" . format ( * syms ) )
def step ( self , action ) : """Apply sequence of actions to sequence of environments actions - > ( observations , rewards , news ) where ' news ' is a boolean vector indicating whether each element is new ."""
obs , rews , news , infos = self . env . step ( action ) self . ret = self . ret * self . gamma + rews obs = self . _filter_observation ( obs ) if self . ret_rms : self . ret_rms . update ( np . array ( [ self . ret ] ) ) rews = np . clip ( rews / np . sqrt ( self . ret_rms . var + self . epsilon ) , - self . cliprew , self . cliprew ) return obs , rews , news , infos
def infer_call_result ( self , caller , context = None ) : """infer what a class is returning when called"""
if ( self . is_subtype_of ( "%s.type" % ( BUILTINS , ) , context ) and len ( caller . args ) == 3 ) : result = self . _infer_type_call ( caller , context ) yield result return dunder_call = None try : metaclass = self . metaclass ( context = context ) if metaclass is not None : dunder_call = next ( metaclass . igetattr ( "__call__" , context ) ) except exceptions . AttributeInferenceError : pass if dunder_call and dunder_call . qname ( ) != "builtins.type.__call__" : context = contextmod . bind_context_to_node ( context , self ) yield from dunder_call . infer_call_result ( caller , context ) else : # Call type . _ _ call _ _ if not set metaclass # ( since type is the default metaclass ) yield bases . Instance ( self )
def run_reports ( reportlets_dir , out_dir , subject_label , run_uuid , config = None , packagename = None ) : """Runs the reports . . testsetup : : > > > from shutil import copytree > > > from tempfile import TemporaryDirectory > > > new _ path = Path ( _ _ file _ _ ) . resolve ( ) . parent . parent > > > test _ data _ path = new _ path / ' data ' / ' tests ' / ' work ' > > > tmpdir = TemporaryDirectory ( ) > > > os . chdir ( tmpdir . name ) # noqa > > > testdir = Path ( ) . resolve ( ) > > > data _ dir = copytree ( test _ data _ path , testdir / ' work ' ) > > > ( testdir / ' fmriprep ' ) . mkdir ( parents = True , exist _ ok = True ) . . doctest : : > > > run _ reports ( str ( testdir / ' work ' / ' reportlets ' ) , . . . str ( testdir / ' out ' ) , ' 01 ' , ' madeoutuuid ' ) . . testcleanup : : > > > tmpdir . cleanup ( )"""
report = Report ( Path ( reportlets_dir ) , out_dir , run_uuid , config = config , subject_id = subject_label , packagename = packagename ) return report . generate_report ( )
def show_messages ( self ) : """Show all messages ."""
string = self . header if self . static_message is not None : string += self . static_message . to_html ( ) for message in self . dynamic_messages : string += message . to_html ( ) string += self . footer print ( string ) self . setHtml ( string )
def morphological_chan_vese ( image , iterations , init_level_set = 'checkerboard' , smoothing = 1 , lambda1 = 1 , lambda2 = 1 , iter_callback = lambda x : None ) : """Morphological Active Contours without Edges ( MorphACWE ) Active contours without edges implemented with morphological operators . It can be used to segment objects in images and volumes without well defined borders . It is required that the inside of the object looks different on average than the outside ( i . e . , the inner area of the object should be darker or lighter than the outer area on average ) . Parameters image : ( M , N ) or ( L , M , N ) array Grayscale image or volume to be segmented . iterations : uint Number of iterations to run init _ level _ set : str , ( M , N ) array , or ( L , M , N ) array Initial level set . If an array is given , it will be binarized and used as the initial level set . If a string is given , it defines the method to generate a reasonable initial level set with the shape of the ` image ` . Accepted values are ' checkerboard ' and ' circle ' . See the documentation of ` checkerboard _ level _ set ` and ` circle _ level _ set ` respectively for details about how these level sets are created . smoothing : uint , optional Number of times the smoothing operator is applied per iteration . Reasonable values are around 1-4 . Larger values lead to smoother segmentations . lambda1 : float , optional Weight parameter for the outer region . If ` lambda1 ` is larger than ` lambda2 ` , the outer region will contain a larger range of values than the inner region . lambda2 : float , optional Weight parameter for the inner region . If ` lambda2 ` is larger than ` lambda1 ` , the inner region will contain a larger range of values than the outer region . iter _ callback : function , optional If given , this function is called once per iteration with the current level set as the only argument . This is useful for debugging or for plotting intermediate results during the evolution . Returns out : ( M , N ) or ( L , M , N ) array Final segmentation ( i . e . , the final level set ) See also circle _ level _ set , checkerboard _ level _ set Notes This is a version of the Chan - Vese algorithm that uses morphological operators instead of solving a partial differential equation ( PDE ) for the evolution of the contour . The set of morphological operators used in this algorithm are proved to be infinitesimally equivalent to the Chan - Vese PDE ( see [ 1 ] _ ) . However , morphological operators are do not suffer from the numerical stability issues typically found in PDEs ( it is not necessary to find the right time step for the evolution ) , and are computationally faster . The algorithm and its theoretical derivation are described in [ 1 ] _ . References . . [ 1 ] A Morphological Approach to Curvature - based Evolution of Curves and Surfaces , Pablo Márquez - Neila , Luis Baumela , Luis Álvarez . In IEEE Transactions on Pattern Analysis and Machine Intelligence ( PAMI ) , 2014 , DOI 10.1109 / TPAMI . 2013.106"""
init_level_set = _init_level_set ( init_level_set , image . shape ) _check_input ( image , init_level_set ) u = np . int8 ( init_level_set > 0 ) iter_callback ( u ) for _ in range ( iterations ) : # inside = u > 0 # outside = u < = 0 c0 = ( image * ( 1 - u ) ) . sum ( ) / float ( ( 1 - u ) . sum ( ) + 1e-8 ) c1 = ( image * u ) . sum ( ) / float ( u . sum ( ) + 1e-8 ) # Image attachment du = np . gradient ( u ) abs_du = np . abs ( du ) . sum ( 0 ) aux = abs_du * ( lambda1 * ( image - c1 ) ** 2 - lambda2 * ( image - c0 ) ** 2 ) u [ aux < 0 ] = 1 u [ aux > 0 ] = 0 # Smoothing for _ in range ( smoothing ) : u = _curvop ( u ) iter_callback ( u ) return u
def cas ( self , key , value , cas , time , compress_level = - 1 ) : """Add a key / value to server ony if it does not exist . : param key : Key ' s name : type key : six . string _ types : param value : A value to be stored on server . : type value : object : param time : Time in seconds that your key will expire . : type time : int : param compress _ level : How much to compress . 0 = no compression , 1 = fastest , 9 = slowest but best , -1 = default compression level . : type compress _ level : int : return : True if key is added False if key already exists and has a different CAS : rtype : bool"""
# The protocol CAS value 0 means " no cas " . Calling cas ( ) with that value is # probably unintentional . Don ' t allow it , since it would overwrite the value # without performing CAS at all . assert cas != 0 , '0 is an invalid CAS value' # If we get a cas of None , interpret that as " compare against nonexistant and set " , # which is simply Add . if cas is None : return self . _set_add_replace ( 'add' , key , value , time , compress_level = compress_level ) else : return self . _set_add_replace ( 'set' , key , value , time , cas = cas , compress_level = compress_level )
def max_drawdown ( returns = None , geometric = True , dd = None , inc_date = False ) : """compute the max draw down . returns : period return Series or DataFrame dd : drawdown Series or DataFrame ( mutually exclusive with returns )"""
if ( returns is None and dd is None ) or ( returns is not None and dd is not None ) : raise ValueError ( 'returns and drawdowns are mutually exclusive' ) if returns is not None : dd = drawdowns ( returns , geometric = geometric ) if isinstance ( dd , pd . DataFrame ) : vals = [ max_drawdown ( dd = dd [ c ] , inc_date = inc_date ) for c in dd . columns ] cols = [ 'maxxdd' ] + ( inc_date and [ 'maxdd_dt' ] or [ ] ) res = pd . DataFrame ( vals , columns = cols , index = dd . columns ) return res if inc_date else res . maxdd else : mddidx = dd . idxmin ( ) # if mddidx = = dd . index [ 0 ] : # # no maxff # return 0 if not inc _ date else ( 0 , None ) # else : sub = dd [ : mddidx ] start = sub [ : : - 1 ] . idxmax ( ) mdd = dd [ mddidx ] # return start , mddidx , mdd return mdd if not inc_date else ( mdd , mddidx )
def get_workflow_status ( self , depth = 2 ) : '''Gets the workflow status . Parameters depth : int , optional query depth - in which detail status of subtasks will be queried Returns dict status information about the workflow See also : func : ` tmserver . api . workflow . get _ workflow _ status ` : func : ` tmlib . workflow . utils . get _ task _ status ` : class : ` tmlib . models . submission . Task `'''
logger . info ( 'get status for workflow of experiment "%s"' , self . experiment_name ) params = { 'depth' : depth } url = self . _build_api_url ( '/experiments/{experiment_id}/workflow/status' . format ( experiment_id = self . _experiment_id ) , params ) res = self . _session . get ( url ) res . raise_for_status ( ) return res . json ( ) [ 'data' ]
def head ( self , route : str ( ) , callback : object ( ) ) : """Binds a HEAD route with the given callback : rtype : object"""
self . __set_route ( 'head' , { route : callback } ) return RouteMapping
def accept ( self , evt ) : """write setting to the preferences"""
# determine if application is a script file or frozen exe ( pyinstaller ) frozen = getattr ( sys , 'frozen' , False ) if frozen : app_file = sys . executable else : app_file = PathStr ( __main__ . __file__ ) . abspath ( ) if self . cb_startmenu . isChecked ( ) : # TODO : allow only logo location # icon = app _ file . dirname ( ) . join ( ' media ' , ' logo . ico ' ) StartMenuEntry ( self . name , app_file , icon = self . icon , console = False ) . create ( ) if self . cb_mime . isChecked ( ) : # get admin rights if not isAdmin ( ) : try : # run this file as _ _ main _ _ with admin rights : if frozen : cmd = "from %s import embeddIntoOS\nembeddIntoOS('%s', '%s', '%s')" % ( __name__ , '' , self . ftype , self . name ) # in this case there is no python . exe and no moduly . py to call # thats why we have to import the method and execute it runAsAdmin ( ( sys . executable , '-exec' , cmd ) ) else : runAsAdmin ( ( sys . executable , __file__ , app_file , self . ftype , self . name ) ) except : print ( 'needs admin rights to work' ) else : embeddIntoOS ( app_file , self . ftype , self . name ) QtWidgets . QDialog . accept ( self )
def publish ( self , user_id , wifi_fingerprint , action = 'track' , location_id = '' , port = 1883 ) : '''a method to publish wifi fingerprint data to a mosquitto server : param user _ id : string with id of user : param wifi _ fingerprint : list of dictionaries with wifi fields mac and rssi : param action : string with type of action to perform with data ( track or learn ) : param location _ id : [ optional ] string with classifier to add to learning data : param port : [ optional ] integer with port to connect to : return : True'''
title = '%s.publish' % self . __class__ . __name__ # validate inputs input_fields = { 'user_id' : user_id , 'wifi_fingerprint' : wifi_fingerprint , 'action' : action , 'location_id' : location_id , 'port' : port } for key , value in input_fields . items ( ) : object_title = '%s(%s=%s)' % ( title , key , str ( value ) ) self . fields . validate ( value , '.%s' % key , object_title ) # compose message fingerprint_string = '' for signal in wifi_fingerprint : fingerprint_string += signal [ 'mac' ] . replace ( ':' , '' ) rssi_string = str ( signal [ 'rssi' ] ) . replace ( '-' , '' ) if len ( rssi_string ) > 2 : fingerprint_string += ' ' fingerprint_string += rssi_string # compose channel topic_string = '%s/track/%s' % ( self . group_name , user_id ) if action == 'learn' : topic_string = '%s/learn/%s/%s' % ( self . group_name , user_id , location_id ) # send a single message to server import paho . mqtt . publish as mqtt_publish mqtt_publish . single ( topic = topic_string , payload = fingerprint_string , auth = { 'username' : self . group_name , 'password' : self . password } , hostname = self . server_url , port = port ) return True
def hybrid_forward ( self , F , words , weight ) : """Compute embedding of words in batch . Parameters words : mx . nd . NDArray Array of token indices ."""
# pylint : disable = arguments - differ embeddings = F . sparse . dot ( words , weight ) return embeddings
def get_model_creation_kwargs ( model_obj ) : """Get a dictionary of the keyword arguments needed to create the passed model object using ` pylogit . create _ choice _ model ` . Parameters model _ obj : An MNDC _ Model instance . Returns model _ kwargs : dict . Contains the keyword arguments and the required values that are needed to initialize a replica of ` model _ obj ` ."""
# Extract the model abbreviation for this model model_abbrev = get_model_abbrev ( model_obj ) # Create a dictionary to store the keyword arguments needed to Initialize # the new model object . d model_kwargs = { "model_type" : model_abbrev , "names" : model_obj . name_spec , "intercept_names" : model_obj . intercept_names , "intercept_ref_pos" : model_obj . intercept_ref_position , "shape_names" : model_obj . shape_names , "shape_ref_pos" : model_obj . shape_ref_position , "nest_spec" : model_obj . nest_spec , "mixing_vars" : model_obj . mixing_vars , "mixing_id_col" : model_obj . mixing_id_col } return model_kwargs
def normalized_start ( self ) : """Returns a NamespaceRange with leading non - existant namespaces removed . Returns : A copy of this NamespaceRange whose namespace _ start is adjusted to exclude the portion of the range that contains no actual namespaces in the datastore . None is returned if the NamespaceRange contains no actual namespaces in the datastore ."""
namespaces_after_key = list ( self . make_datastore_query ( ) . Run ( limit = 1 ) ) if not namespaces_after_key : return None namespace_after_key = namespaces_after_key [ 0 ] . name ( ) or '' return NamespaceRange ( namespace_after_key , self . namespace_end , _app = self . app )
def _generate_features ( self , feature_extractors ) : """Run all FeatureExtractors and record results in a key - value format . : param feature _ extractors : iterable of ` FeatureExtractor ` objects ."""
results = [ pd . DataFrame ( ) ] n_ext = len ( feature_extractors ) for i , extractor in enumerate ( feature_extractors ) : log . info ( "generating: '%s' (%d/%d)" , extractor . name , i + 1 , n_ext ) cached_extractor = self . _cache [ extractor . name ] if extractor . same ( cached_extractor ) : log . info ( 'pulling from cache' ) extractor = cached_extractor else : log . info ( 'running...' ) extractor . extract ( ) results . append ( extractor . result ) if self . cache_path : self . _cache [ extractor . name ] = extractor if self . cache_path : with open ( self . cache_path , 'wb' ) as f : pickle . dump ( self . _cache , f ) return pd . concat ( results , axis = 1 )
def find_coord_vars ( ncds ) : """Finds all coordinate variables in a dataset . A variable with the same name as a dimension is called a coordinate variable ."""
coord_vars = [ ] for d in ncds . dimensions : if d in ncds . variables and ncds . variables [ d ] . dimensions == ( d , ) : coord_vars . append ( ncds . variables [ d ] ) return coord_vars
def link_to_sibling ( self , feed , sibling_type , atom_feed ) : """Adding previous or next links to the given feed self . _ link _ to _ sibling ( feed , ' previous ' , atom _ feed ) self . _ link _ to _ sibling ( feed , ' next ' , atom _ feed ) : param feed : a feed object : param sibling _ type : ' previous ' or ' next ' : param atom _ feed : an atom feed like ` feedgen . feed . FeedGenerator `"""
sibling = self . atom_feed_manager . get_sibling ( feed . id , sibling_type ) if sibling : rel = "prev-archive" if sibling_type == "previous" else "next-archive" atom_feed . link ( href = self . request . route_url ( self . get_atom_feed_url , id = sibling . id ) , rel = rel )