signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def getBody ( self ) : """Extract body json"""
data = None try : data = json . loads ( self . request . body ) except : data = json . loads ( urllib . unquote_plus ( self . request . body ) ) return data
def get_reference_template ( self , ref_type ) : """Return the reference template for the type as an ordered dictionary . Zotero . item _ template ( ) caches data after the first API call ."""
template = self . _zotero_lib . item_template ( ref_type ) return OrderedDict ( sorted ( template . items ( ) , key = lambda x : x [ 0 ] ) )
def power_on ( self ) : """Power on the set - top box ."""
if not self . is_powered_on ( ) : log . debug ( 'Powering on set-top box at %s:%s.' , self . ip , self . port ) self . send_key ( keys . POWER )
def get_checked ( self ) : """Return the list of checked items that do not have any child ."""
checked = [ ] def get_checked_children ( item ) : if not self . tag_has ( "unchecked" , item ) : ch = self . get_children ( item ) if not ch and self . tag_has ( "checked" , item ) : checked . append ( item ) else : for c in ch : get_checked_children ( c ) ch = self . get_children ( "" ) for c in ch : get_checked_children ( c ) return checked
def get_all_tags ( self ) : """Return a tuple of lists ( [ common _ tags ] , [ anti _ tags ] , [ organisational _ tags ] ) all tags of all tasks of this course Since this is an heavy procedure , we use a cache to cache results . Cache should be updated when a task is modified ."""
if self . _all_tags_cache != None : return self . _all_tags_cache tag_list_common = set ( ) tag_list_misconception = set ( ) tag_list_org = set ( ) tasks = self . get_tasks ( ) for id , task in tasks . items ( ) : for tag in task . get_tags ( ) [ 0 ] : tag_list_common . add ( tag ) for tag in task . get_tags ( ) [ 1 ] : tag_list_misconception . add ( tag ) for tag in task . get_tags ( ) [ 2 ] : tag_list_org . add ( tag ) tag_list_common = natsorted ( tag_list_common , key = lambda y : y . get_name ( ) . lower ( ) ) tag_list_misconception = natsorted ( tag_list_misconception , key = lambda y : y . get_name ( ) . lower ( ) ) tag_list_org = natsorted ( tag_list_org , key = lambda y : y . get_name ( ) . lower ( ) ) self . _all_tags_cache = ( list ( tag_list_common ) , list ( tag_list_misconception ) , list ( tag_list_org ) ) return self . _all_tags_cache
def find_file_structure ( self , body , params = None ) : """` < http : / / www . elastic . co / guide / en / elasticsearch / reference / current / ml - file - structure . html > ` _ : arg body : The contents of the file to be analyzed : arg charset : Optional parameter to specify the character set of the file : arg column _ names : Optional parameter containing a comma separated list of the column names for a delimited file : arg delimiter : Optional parameter to specify the delimiter character for a delimited file - must be a single character : arg explain : Whether to include a commentary on how the structure was derived , default False : arg format : Optional parameter to specify the high level file format , valid choices are : ' ndjson ' , ' xml ' , ' delimited ' , ' semi _ structured _ text ' : arg grok _ pattern : Optional parameter to specify the Grok pattern that should be used to extract fields from messages in a semi - structured text file : arg has _ header _ row : Optional parameter to specify whether a delimited file includes the column names in its first row : arg lines _ to _ sample : How many lines of the file should be included in the analysis , default 1000 : arg quote : Optional parameter to specify the quote character for a delimited file - must be a single character : arg should _ trim _ fields : Optional parameter to specify whether the values between delimiters in a delimited file should have whitespace trimmed from them : arg timeout : Timeout after which the analysis will be aborted , default '25s ' : arg timestamp _ field : Optional parameter to specify the timestamp field in the file : arg timestamp _ format : Optional parameter to specify the timestamp format in the file - may be either a Joda or Java time format"""
if body in SKIP_IN_PATH : raise ValueError ( "Empty value passed for a required argument 'body'." ) return self . transport . perform_request ( "POST" , "/_ml/find_file_structure" , params = params , body = self . _bulk_body ( body ) , )
def WriteClientStartupInfo ( self , client_id , startup_info , cursor = None ) : """Writes a new client startup record ."""
query = """ SET @now = NOW(6); INSERT INTO client_startup_history (client_id, timestamp, startup_info) VALUES (%(client_id)s, @now, %(startup_info)s); UPDATE clients SET last_startup_timestamp = @now WHERE client_id = %(client_id)s; """ params = { "client_id" : db_utils . ClientIDToInt ( client_id ) , "startup_info" : startup_info . SerializeToString ( ) , } try : cursor . execute ( query , params ) except MySQLdb . IntegrityError as e : raise db . UnknownClientError ( client_id , cause = e )
def _netstat_sunos ( ) : '''Return netstat information for SunOS flavors'''
log . warning ( 'User and program not (yet) supported on SunOS' ) ret = [ ] for addr_family in ( 'inet' , 'inet6' ) : # Lookup TCP connections cmd = 'netstat -f {0} -P tcp -an | tail +5' . format ( addr_family ) out = __salt__ [ 'cmd.run' ] ( cmd , python_shell = True ) for line in out . splitlines ( ) : comps = line . split ( ) ret . append ( { 'proto' : 'tcp6' if addr_family == 'inet6' else 'tcp' , 'recv-q' : comps [ 5 ] , 'send-q' : comps [ 4 ] , 'local-address' : comps [ 0 ] , 'remote-address' : comps [ 1 ] , 'state' : comps [ 6 ] } ) # Lookup UDP connections cmd = 'netstat -f {0} -P udp -an | tail +5' . format ( addr_family ) out = __salt__ [ 'cmd.run' ] ( cmd , python_shell = True ) for line in out . splitlines ( ) : comps = line . split ( ) ret . append ( { 'proto' : 'udp6' if addr_family == 'inet6' else 'udp' , 'local-address' : comps [ 0 ] , 'remote-address' : comps [ 1 ] if len ( comps ) > 2 else '' } ) return ret
def Back ( self , n = 1 , dl = 0 ) : """退格键n次"""
self . Delay ( dl ) self . keyboard . tap_key ( self . keyboard . backspace_key , n )
def check_for_lime ( self , pattern ) : """Check to see if LiME has loaded on the remote system : type pattern : str : param pattern : pattern to check output against : type listen _ port : int : param listen _ port : port LiME is listening for connections on"""
check = self . commands . lime_check . value lime_loaded = False result = self . shell . execute ( check ) stdout = self . shell . decode ( result [ 'stdout' ] ) connections = self . net_parser . parse ( stdout ) for conn in connections : local_addr , remote_addr = conn if local_addr == pattern : lime_loaded = True break return lime_loaded
def rebin_spec ( spec , wavnew , oversamp = 100 , plot = False ) : """Rebin a spectrum to a new wavelength array while preserving the total flux Parameters spec : array - like The wavelength and flux to be binned wavenew : array - like The new wavelength array Returns np . ndarray The rebinned flux"""
wave , flux = spec nlam = len ( wave ) x0 = np . arange ( nlam , dtype = float ) x0int = np . arange ( ( nlam - 1. ) * oversamp + 1. , dtype = float ) / oversamp w0int = np . interp ( x0int , x0 , wave ) spec0int = np . interp ( w0int , wave , flux ) / oversamp # Set up the bin edges for down - binning maxdiffw1 = np . diff ( wavnew ) . max ( ) w1bins = np . concatenate ( ( [ wavnew [ 0 ] - maxdiffw1 ] , .5 * ( wavnew [ 1 : : ] + wavnew [ 0 : - 1 ] ) , [ wavnew [ - 1 ] + maxdiffw1 ] ) ) # Bin down the interpolated spectrum : w1bins = np . sort ( w1bins ) nbins = len ( w1bins ) - 1 specnew = np . zeros ( nbins ) inds2 = [ [ w0int . searchsorted ( w1bins [ ii ] , side = 'left' ) , w0int . searchsorted ( w1bins [ ii + 1 ] , side = 'left' ) ] for ii in range ( nbins ) ] for ii in range ( nbins ) : specnew [ ii ] = np . sum ( spec0int [ inds2 [ ii ] [ 0 ] : inds2 [ ii ] [ 1 ] ] ) return specnew
def ensure_local_files ( ) : """Ensure that filesystem is setup / filled out in a valid way"""
if _file_permissions : if not os . path . isdir ( AUTH_DIR ) : os . mkdir ( AUTH_DIR ) for fn in [ CONFIG_FILE ] : contents = load_json_dict ( fn ) for key , val in list ( _FILE_CONTENT [ fn ] . items ( ) ) : if key not in contents : contents [ key ] = val contents_keys = list ( contents . keys ( ) ) for key in contents_keys : if key not in _FILE_CONTENT [ fn ] : del contents [ key ] save_json_dict ( fn , contents ) else : warnings . warn ( "Looks like you don't have 'read-write' permission to " "your 'home' ('~') directory" )
def flatten ( source , task_limit = None ) : """Given an asynchronous sequence of sequences , generate the elements of the sequences as soon as they ' re received . The sequences are awaited concurrently , although it ' s possible to limit the amount of running sequences using the ` task _ limit ` argument . Errors raised in the source or an element sequence are propagated ."""
return base_combine . raw ( source , task_limit = task_limit , switch = False , ordered = False )
def _put_buffers ( state , buffer_paths , buffers ) : """The inverse of _ remove _ buffers , except here we modify the existing dict / lists . Modifying should be fine , since this is used when state comes from the wire ."""
for buffer_path , buffer in zip ( buffer_paths , buffers ) : # we ' d like to set say sync _ data [ ' x ' ] [ 0 ] [ ' y ' ] = buffer # where buffer _ path in this example would be [ ' x ' , 0 , ' y ' ] obj = state for key in buffer_path [ : - 1 ] : obj = obj [ key ] obj [ buffer_path [ - 1 ] ] = buffer
def expose_event ( self , widget , event ) : """When an area of the window is exposed , we just copy out of the server - side , off - screen surface to that area ."""
x , y , width , height = event . area self . logger . debug ( "surface is %s" % self . surface ) if self . surface is not None : win = widget . get_window ( ) cr = win . cairo_create ( ) # set clip area for exposed region cr . rectangle ( x , y , width , height ) cr . clip ( ) # Paint from off - screen surface cr . set_source_surface ( self . surface , 0 , 0 ) cr . set_operator ( cairo . OPERATOR_SOURCE ) cr . paint ( ) return False
def _start_http_session ( self ) : """Start a new requests HTTP session , clearing cookies and session data . : return : None"""
api_logger . debug ( "Starting new HTTP session..." ) self . session = requests . Session ( ) self . session . headers . update ( { "User-Agent" : self . user_agent } ) if self . username and self . password : api_logger . debug ( "Requests will use authorization." ) self . session . auth = HTTPBasicAuth ( self . username , self . password )
def read_table_pattern ( self , header_pattern , row_pattern , footer_pattern , postprocess = str , attribute_name = None , last_one_only = True ) : """Parse table - like data . A table composes of three parts : header , main body , footer . All the data matches " row pattern " in the main body will be returned . Args : header _ pattern ( str ) : The regular expression pattern matches the table header . This pattern should match all the text immediately before the main body of the table . For multiple sections table match the text until the section of interest . MULTILINE and DOTALL options are enforced , as a result , the " . " meta - character will also match " \n " in this section . row _ pattern ( str ) : The regular expression matches a single line in the table . Capture interested field using regular expression groups . footer _ pattern ( str ) : The regular expression matches the end of the table . E . g . a long dash line . postprocess ( callable ) : A post processing function to convert all matches . Defaults to str , i . e . , no change . attribute _ name ( str ) : Name of this table . If present the parsed data will be attached to " data . e . g . self . data [ " efg " ] = [ . . . ] last _ one _ only ( bool ) : All the tables will be parsed , if this option is set to True , only the last table will be returned . The enclosing list will be removed . i . e . Only a single table will be returned . Default to be True . Returns : List of tables . 1 ) A table is a list of rows . 2 ) A row if either a list of attribute values in case the the capturing group is defined without name in row _ pattern , or a dict in case that named capturing groups are defined by row _ pattern ."""
with zopen ( self . filename , 'rt' ) as f : text = f . read ( ) table_pattern_text = header_pattern + r"\s*^(?P<table_body>(?:\s+" + row_pattern + r")+)\s+" + footer_pattern table_pattern = re . compile ( table_pattern_text , re . MULTILINE | re . DOTALL ) rp = re . compile ( row_pattern ) tables = [ ] for mt in table_pattern . finditer ( text ) : table_body_text = mt . group ( "table_body" ) table_contents = [ ] for line in table_body_text . split ( "\n" ) : ml = rp . search ( line ) d = ml . groupdict ( ) if len ( d ) > 0 : processed_line = { k : postprocess ( v ) for k , v in d . items ( ) } else : processed_line = [ postprocess ( v ) for v in ml . groups ( ) ] table_contents . append ( processed_line ) tables . append ( table_contents ) if last_one_only : retained_data = tables [ - 1 ] else : retained_data = tables if attribute_name is not None : self . data [ attribute_name ] = retained_data return retained_data
def show_namespace ( name , ** kwargs ) : '''Return information for a given namespace defined by the specified name CLI Examples : : salt ' * ' kubernetes . show _ namespace kube - system'''
cfg = _setup_conn ( ** kwargs ) try : api_instance = kubernetes . client . CoreV1Api ( ) api_response = api_instance . read_namespace ( name ) return api_response . to_dict ( ) except ( ApiException , HTTPError ) as exc : if isinstance ( exc , ApiException ) and exc . status == 404 : return None else : log . exception ( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError ( exc ) finally : _cleanup ( ** cfg )
def set_work_request ( self , worker_name , sample_set , subkeys = None ) : """Make a work request for an existing stored sample ( or sample _ set ) . Args : worker _ name : ' strings ' , ' pe _ features ' , whatever sample _ set : the md5 of a sample _ set in the Workbench data store subkeys : just get a subkey of the output : ' foo ' or ' foo . bar ' ( None for all ) Returns : The output is a generator of the results of the worker output for the sample _ set"""
# Does worker support sample _ set _ input ? if self . plugin_meta [ worker_name ] [ 'sample_set_input' ] : yield self . work_request ( worker_name , sample_set , subkeys ) # Loop through all the md5s and return a generator with yield else : md5_list = self . get_sample_set ( sample_set ) for md5 in md5_list : if subkeys : yield self . work_request ( worker_name , md5 , subkeys ) else : yield self . work_request ( worker_name , md5 ) [ worker_name ]
def fatal ( ftn , txt ) : """If can ' t continue ."""
msg = "{0}.{1}:FATAL:{2}\n" . format ( modname , ftn , txt ) raise SystemExit ( msg )
def setmonitor ( self , enable = True ) : """Alias for setmode ( ' monitor ' ) or setmode ( ' managed ' ) Only available with Npcap"""
# We must reset the monitor cache if enable : res = self . setmode ( 'monitor' ) else : res = self . setmode ( 'managed' ) if not res : log_runtime . error ( "Npcap WlanHelper returned with an error code !" ) self . cache_mode = None tmp = self . cache_mode = self . ismonitor ( ) return tmp if enable else ( not tmp )
def attack_class ( self , x , target_y ) : """Run the attack on a specific target class . : param x : tf Tensor . The input example . : param target _ y : tf Tensor . The attacker ' s desired target class . Returns : A targeted adversarial example , intended to be classified as the target class ."""
adv = self . base_attacker . generate ( x , y_target = target_y , ** self . params ) return adv
def export_profile ( self ) : """Export minimum needs to a json file . This method will save the current state of the minimum needs setup . Then open a dialog allowing the user to browse to the desired destination location and allow the user to save the needs as a json file ."""
file_name_dialog = QFileDialog ( self ) file_name_dialog . setAcceptMode ( QFileDialog . AcceptSave ) file_name_dialog . setNameFilter ( self . tr ( 'JSON files (*.json *.JSON)' ) ) file_name_dialog . setDefaultSuffix ( 'json' ) file_name = None if file_name_dialog . exec_ ( ) : file_name = file_name_dialog . selectedFiles ( ) [ 0 ] if file_name != '' and file_name is not None : self . minimum_needs . write_to_file ( file_name )
def console_set_default_background ( con : tcod . console . Console , col : Tuple [ int , int , int ] ) -> None : """Change the default background color for a console . Args : con ( Console ) : Any Console instance . col ( Union [ Tuple [ int , int , int ] , Sequence [ int ] ] ) : An ( r , g , b ) sequence or Color instance . . . deprecated : : 8.5 Use : any : ` Console . default _ bg ` instead ."""
lib . TCOD_console_set_default_background ( _console ( con ) , col )
def debug ( self , key ) : """Returns True if the debug setting is enabled ."""
return ( not self . quiet and not self . debug_none and ( self . debug_all or getattr ( self , "debug_%s" % key ) ) )
def move_to_pat ( pat : str , offset : ( float , float ) = None , tolerance : int = 0 ) -> None : """See help for click _ on _ pat"""
with tempfile . NamedTemporaryFile ( ) as f : subprocess . call ( ''' xwd -root -silent -display :0 | convert xwd:- png:''' + f . name , shell = True ) loc = visgrep ( f . name , pat , tolerance ) pat_size = get_png_dim ( pat ) if offset is None : x , y = [ l + ps // 2 for l , ps in zip ( loc , pat_size ) ] else : x , y = [ l + ps * ( off / 100 ) for off , l , ps in zip ( offset , loc , pat_size ) ] mouse_move ( x , y )
def replace ( self , s , data , attrs = None ) : """Replace the attributes of the plotter data in a string % ( replace _ note ) s Parameters s : str String where the replacements shall be made data : InteractiveBase Data object from which to use the coordinates and insert the coordinate and attribute informations attrs : dict Meta attributes that shall be used for replacements . If None , it will be gained from ` data . attrs ` Returns str ` s ` with inserted informations"""
# insert labels s = s . format ( ** self . rc [ 'labels' ] ) # replace attributes attrs = attrs or data . attrs if hasattr ( getattr ( data , 'psy' , None ) , 'arr_name' ) : attrs = attrs . copy ( ) attrs [ 'arr_name' ] = data . psy . arr_name s = safe_modulo ( s , attrs ) # replace datetime . datetime like time informations if isinstance ( data , InteractiveList ) : data = data [ 0 ] tname = self . any_decoder . get_tname ( next ( self . plotter . iter_base_variables ) , data . coords ) if tname is not None and tname in data . coords : time = data . coords [ tname ] if not time . values . ndim : try : # assume a valid datetime . datetime instance s = pd . to_datetime ( str ( time . values [ ( ) ] ) ) . strftime ( s ) except ValueError : pass if six . PY2 : return s . decode ( 'utf-8' ) return s
def task_ref_role ( name , rawtext , text , lineno , inliner , options = None , content = None ) : """Process a role that references the target nodes created by the ` ` lsst - task ` ` directive . Parameters name The role name used in the document . rawtext The entire markup snippet , with role . text The text marked with the role . lineno The line number where ` ` rawtext ` ` appears in the input . inliner The inliner instance that called us . options Directive options for customization . content The directive content for customization . Returns nodes : ` list ` List of nodes to insert into the document . messages : ` list ` List of system messages ."""
# app = inliner . document . settings . env . app node = pending_task_xref ( rawsource = text ) return [ node ] , [ ]
def update_req ( req ) : """Updates a given req object with the latest version ."""
if not req . name : return req , None info = get_package_info ( req . name ) if info [ 'info' ] . get ( '_pypi_hidden' ) : print ( '{} is hidden on PyPI and will not be updated.' . format ( req ) ) return req , None if _is_pinned ( req ) and _is_version_range ( req ) : print ( '{} is pinned to a range and will not be updated.' . format ( req ) ) return req , None newest_version = _get_newest_version ( info ) current_spec = next ( iter ( req . specifier ) ) if req . specifier else None current_version = current_spec . version if current_spec else None new_spec = Specifier ( u'=={}' . format ( newest_version ) ) if not current_spec or current_spec . _spec != new_spec . _spec : req . specifier = new_spec update_info = ( req . name , current_version , newest_version ) return req , update_info return req , None
def default_display ( value , with_module = True ) : """Default display for unknown objects ."""
object_type = type ( value ) try : name = object_type . __name__ module = object_type . __module__ if with_module : return name + ' object of ' + module + ' module' else : return name except : type_str = to_text_string ( object_type ) return type_str [ 1 : - 1 ]
def list_env ( self , saltenv = 'base' ) : '''Return a list of the files in the file server ' s specified environment'''
load = { 'saltenv' : saltenv , 'cmd' : '_file_list' } return salt . utils . data . decode ( self . channel . send ( load ) ) if six . PY2 else self . channel . send ( load )
def convert ( args ) : """% prog convert in . fastq illumina fastq quality encoding uses offset 64 , and sanger uses 33 . This script creates a new file with the correct encoding . Output gzipped file if input is also gzipped ."""
p = OptionParser ( convert . __doc__ ) p . set_phred ( ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) infastq , = args phred = opts . phred or str ( guessoffset ( [ infastq ] ) ) ophred = { "64" : "33" , "33" : "64" } [ phred ] gz = infastq . endswith ( ".gz" ) outfastq = infastq . rsplit ( "." , 1 ) [ 0 ] if gz else infastq pf , sf = outfastq . rsplit ( "." , 1 ) outfastq = "{0}.q{1}.{2}" . format ( pf , ophred , sf ) if gz : outfastq += ".gz" fin = "illumina" if phred == "64" else "sanger" fout = "sanger" if phred == "64" else "illumina" seqret = "seqret" if infastq . endswith ( ".gz" ) : cmd = "zcat {0} | " . format ( infastq ) cmd += seqret + " fastq-{0}::stdin fastq-{1}::stdout" . format ( fin , fout ) else : cmd = seqret + " fastq-{0}::{1} fastq-{2}::stdout" . format ( fin , infastq , fout ) sh ( cmd , outfile = outfastq ) return outfastq
def info_dialog ( self , title = "Information" , message = "" , ** kwargs ) : """Show an information dialog Usage : C { dialog . info _ dialog ( title = " Information " , message = " " , * * kwargs ) } @ param title : window title for the dialog @ param message : message displayed in the dialog @ return : a tuple containing the exit code and user input @ rtype : C { DialogData ( int , str ) }"""
return self . _run_kdialog ( title , [ "--msgbox" , message ] , kwargs )
def snap_picture ( self ) : """Take a picture with camera to create a new thumbnail ."""
return api . request_new_image ( self . sync . blink , self . network_id , self . camera_id )
def enter_unlink_mode ( self ) : """Enter unlinking mode for a group"""
self . logger . info ( "enter_unlink_mode Group %s" , self . group_id ) self . scene_command ( '0A' ) # should send http : / / 0.0.0.0/0?0A01 = I = 0 # # TODO check return status status = self . hub . get_buffer_status ( ) return status
def insert ( self , row ) : """Add a row ( type : dict ) by inserting it into the table . Columns must exist . data = dict ( title = ' I am a banana ! ' ) table . insert ( data ) Returns the inserted row ' s primary key ."""
self . _check_dropped ( ) res = self . engine . execute ( self . table . insert ( row ) ) if len ( res . inserted_primary_key ) > 0 : return res . inserted_primary_key [ 0 ]
def ack ( self ) : """Acknowledge Message . : raises AMQPInvalidArgument : Invalid Parameters : raises AMQPChannelError : Raises if the channel encountered an error . : raises AMQPConnectionError : Raises if the connection encountered an error . : return :"""
if not self . _method : raise AMQPMessageError ( 'Message.ack only available on incoming messages' ) self . _channel . basic . ack ( delivery_tag = self . delivery_tag )
def read_value ( self , key , filepath = None , filename = None ) : """Tries to read the value of given key from JSON file filename . : param filepath : Path to file : param filename : Name of file : param key : Key to search for : return : Value corresponding to given key : raises OSError , EnvironmentError , KeyError"""
path = filepath if filepath else self . filepath name = filename if filename else self . filename name = self . _ends_with ( name , ".json" ) path = self . _ends_with ( path , os . path . sep ) try : output = self . _read_json ( path , name ) if key not in output : raise KeyError ( "Key '{}' not found in file {}" . format ( key , filename ) ) else : return output [ key ] except EnvironmentError as error : self . logger . error ( "Error while opening or reading the file: {}" . format ( error ) ) raise
def standardized_compound ( self ) : """Return the : class : ` ~ pubchempy . Compound ` that was produced when this Substance was standardized . Requires an extra request . Result is cached ."""
for c in self . record [ 'compound' ] : if c [ 'id' ] [ 'type' ] == CompoundIdType . STANDARDIZED : return Compound . from_cid ( c [ 'id' ] [ 'id' ] [ 'cid' ] )
def addPos ( self , dp_x = None , dy = None , dz = None ) : """Add vector to current actor position ."""
p = np . array ( self . GetPosition ( ) ) if dz is None : # assume dp _ x is of the form ( x , y , z ) self . SetPosition ( p + dp_x ) else : self . SetPosition ( p + [ dp_x , dy , dz ] ) if self . trail : self . updateTrail ( ) return self
def _save_namepaths_bids_derivatives ( self , f , tag , save_directory , suffix = None ) : """Creates output directory and output name Paramters f : str input files , includes the file bids _ suffix tag : str what should be added to f in the output file . save _ directory : str additional directory that the output file should go in suffix : str add new suffix to data Returns save _ name : str previous filename with new tag save _ dir : str directory where it will be saved base _ dir : str subjective base directory ( i . e . derivatives / teneto / func [ / anythingelse / ] )"""
file_name = f . split ( '/' ) [ - 1 ] . split ( '.' ) [ 0 ] if tag != '' : tag = '_' + tag if suffix : file_name , _ = drop_bids_suffix ( file_name ) save_name = file_name + tag save_name += '_' + suffix else : save_name = file_name + tag paths_post_pipeline = f . split ( self . pipeline ) if self . pipeline_subdir : paths_post_pipeline = paths_post_pipeline [ 1 ] . split ( self . pipeline_subdir ) [ 0 ] else : paths_post_pipeline = paths_post_pipeline [ 1 ] . split ( file_name ) [ 0 ] base_dir = self . BIDS_dir + '/derivatives/' + 'teneto_' + teneto . __version__ + '/' + paths_post_pipeline + '/' save_dir = base_dir + '/' + save_directory + '/' if not os . path . exists ( save_dir ) : # A case has happened where this has been done in parallel and an error was raised . So do try / except try : os . makedirs ( save_dir ) except : # Wait 2 seconds so that the error does not try and save something in the directory before it is created time . sleep ( 2 ) if not os . path . exists ( self . BIDS_dir + '/derivatives/' + 'teneto_' + teneto . __version__ + '/dataset_description.json' ) : try : with open ( self . BIDS_dir + '/derivatives/' + 'teneto_' + teneto . __version__ + '/dataset_description.json' , 'w' ) as fs : json . dump ( self . tenetoinfo , fs ) except : # Same as above , just in case parallel does duplicaiton time . sleep ( 2 ) return save_name , save_dir , base_dir
def get_default_config ( self ) : """Returns the default collector settings"""
config = super ( VarnishCollector , self ) . get_default_config ( ) config . update ( { 'path' : 'varnish' , 'bin' : '/usr/bin/varnishstat' , 'use_sudo' : False , 'sudo_cmd' : '/usr/bin/sudo' , } ) return config
def structure_results ( res ) : """Format Elasticsearch result as Python dictionary"""
out = { 'hits' : { 'hits' : [ ] } } keys = [ u'admin1_code' , u'admin2_code' , u'admin3_code' , u'admin4_code' , u'alternativenames' , u'asciiname' , u'cc2' , u'coordinates' , u'country_code2' , u'country_code3' , u'dem' , u'elevation' , u'feature_class' , u'feature_code' , u'geonameid' , u'modification_date' , u'name' , u'population' , u'timezone' ] for i in res : i_out = { } for k in keys : i_out [ k ] = i [ k ] out [ 'hits' ] [ 'hits' ] . append ( i_out ) return out
def drop_callback_reference ( self , subscription ) : """Drop reference to the callback function after unsubscribing . Any future messages arriving for that subscription will result in exceptions being raised . : param subscription : Subscription ID to delete callback reference for ."""
if subscription not in self . __subscriptions : raise workflows . Error ( "Attempting to drop callback reference for unknown subscription" ) if not self . __subscriptions [ subscription ] [ "unsubscribed" ] : raise workflows . Error ( "Attempting to drop callback reference for live subscription" ) del self . __subscriptions [ subscription ]
def download_next_song ( self , song ) : """Downloads the next song and starts playing it"""
dl_ydl_opts = dict ( ydl_opts ) dl_ydl_opts [ "progress_hooks" ] = [ self . ytdl_progress_hook ] dl_ydl_opts [ "outtmpl" ] = self . output_format # Move the songs from the next cache to the current cache self . move_next_cache ( ) self . state = 'ready' self . play_empty ( ) # Download the file and create the stream with youtube_dl . YoutubeDL ( dl_ydl_opts ) as ydl : try : ydl . download ( [ song ] ) except DownloadStreamException : # This is a livestream , use the appropriate player future = asyncio . run_coroutine_threadsafe ( self . create_stream_player ( song , dl_ydl_opts ) , client . loop ) try : future . result ( ) except Exception as e : logger . exception ( e ) self . vafter_ts ( ) return except PermissionError : # File is still in use , it ' ll get cleared next time pass except youtube_dl . utils . DownloadError as e : self . logger . exception ( e ) self . statuslog . error ( e ) self . vafter_ts ( ) return except Exception as e : self . logger . exception ( e ) self . vafter_ts ( ) return
def item_frequency ( sa , xlabel = LABEL_DEFAULT , ylabel = LABEL_DEFAULT , title = LABEL_DEFAULT ) : """Plots an item frequency of the sarray provided as input , and returns the resulting Plot object . The function supports SArrays with dtype str . Parameters sa : SArray The data to get an item frequency for . Must have dtype str xlabel : str ( optional ) The text label for the X axis . Defaults to " Values " . ylabel : str ( optional ) The text label for the Y axis . Defaults to " Count " . title : str ( optional ) The title of the plot . Defaults to LABEL _ DEFAULT . If the value is LABEL _ DEFAULT , the title will be " < xlabel > vs . < ylabel > " . If the value is None , the title will be omitted . Otherwise , the string passed in as the title will be used as the plot title . Returns out : Plot A : class : Plot object that is the item frequency plot . Examples Make an item frequency of an SArray . > > > x = turicreate . SArray ( [ ' a ' , ' ab ' , ' acd ' , ' ab ' , ' a ' , ' a ' , ' a ' , ' ab ' , ' cd ' ] ) > > > ifplt = turicreate . visualization . item _ frequency ( x )"""
if ( not isinstance ( sa , tc . data_structures . sarray . SArray ) or sa . dtype != str ) : raise ValueError ( "turicreate.visualization.item_frequency supports " + "SArrays of dtype str" ) title = _get_title ( title ) plt_ref = tc . extensions . plot_item_frequency ( sa , xlabel , ylabel , title ) return Plot ( plt_ref )
def open_remote_file ( dataset_key , file_name , profile = 'default' , mode = 'w' , ** kwargs ) : """Open a remote file object that can be used to write to or read from a file in a data . world dataset : param dataset _ key : Dataset identifier , in the form of owner / id : type dataset _ key : str : param file _ name : The name of the file to open : type file _ name : str : param mode : the mode for the file - must be ' w ' , ' wb ' , ' r ' , or ' rb ' - indicating read / write ( ' r ' / ' w ' ) and optionally " binary " handling of the file data . ( Default value = ' w ' ) : type mode : str , optional : param chunk _ size : size of chunked bytes to return when reading streamed bytes in ' rb ' mode : type chunk _ size : int , optional : param decode _ unicode : whether to decode textual responses as unicode when returning streamed lines in ' r ' mode : type decode _ unicode : bool , optional : param profile : ( Default value = ' default ' ) : param * * kwargs : Examples > > > import datadotworld as dw > > > # write a text file > > > with dw . open _ remote _ file ( ' username / test - dataset ' , . . . ' test . txt ' ) as w : . . . w . write ( " this is a test . " ) > > > # write a jsonlines file > > > import json > > > with dw . open _ remote _ file ( ' username / test - dataset ' , . . . ' test . jsonl ' ) as w : . . . json . dump ( { ' foo ' : 42 , ' bar ' : " A " } , w ) . . . w . write ( " \\ n " ) . . . json . dump ( { ' foo ' : 13 , ' bar ' : " B " } , w ) . . . w . write ( " \\ n " ) > > > # write a csv file > > > import csv > > > with dw . open _ remote _ file ( ' username / test - dataset ' , . . . ' test . csv ' ) as w : . . . csvw = csv . DictWriter ( w , fieldnames = [ ' foo ' , ' bar ' ] ) . . . csvw . writeheader ( ) . . . csvw . writerow ( { ' foo ' : 42 , ' bar ' : " A " } ) . . . csvw . writerow ( { ' foo ' : 13 , ' bar ' : " B " } ) > > > # write a pandas dataframe as a csv file > > > import pandas as pd > > > df = pd . DataFrame ( { ' foo ' : [ 1,2,3,4 ] , ' bar ' : [ ' a ' , ' b ' , ' c ' , ' d ' ] } ) > > > with dw . open _ remote _ file ( ' username / test - dataset ' , . . . ' dataframe . csv ' ) as w : . . . df . to _ csv ( w , index = False ) > > > # write a binary file > > > with dw . open _ remote _ file ( ' username / test - dataset ' , > > > ' test . txt ' , mode = ' wb ' ) as w : . . . w . write ( bytes ( [ 100,97,116,97,46,119,111,114,108,100 ] ) ) > > > # read a text file > > > with dw . open _ remote _ file ( ' username / test - dataset ' , . . . ' test . txt ' , mode = ' r ' ) as r : . . . print ( r . read ( ) ) > > > # read a csv file > > > with dw . open _ remote _ file ( ' username / test - dataset ' , . . . ' test . csv ' , mode = ' r ' ) as r : . . . csvr = csv . DictReader ( r ) . . . for row in csvr : . . . print ( row [ ' column a ' ] , row [ ' column b ' ] ) > > > # read a binary file > > > with dw . open _ remote _ file ( ' username / test - dataset ' , . . . ' test ' , mode = ' rb ' ) as r : . . . bytes = r . read ( )"""
return _get_instance ( profile , ** kwargs ) . open_remote_file ( dataset_key , file_name , mode = mode , ** kwargs )
def _create_trial_info ( self , expr_dir ) : """Create information for given trial . Meta file will be loaded if exists , and the trial information will be saved in db backend . Args : expr _ dir ( str ) : Directory path of the experiment ."""
meta = self . _build_trial_meta ( expr_dir ) self . logger . debug ( "Create trial for %s" % meta ) trial_record = TrialRecord . from_json ( meta ) trial_record . save ( )
def _new ( self , dx_hash , close = False , ** kwargs ) : """: param dx _ hash : Standard hash populated in : func : ` dxpy . bindings . DXDataObject . new ( ) ` containing attributes common to all data object classes . : type dx _ hash : dict : param init _ from : Record from which to initialize the metadata : type init _ from : : class : ` DXRecord ` : param close : Whether or not to close the record immediately after creating it : type close : boolean Create a new remote record object ."""
if "init_from" in kwargs : if kwargs [ "init_from" ] is not None : if not isinstance ( kwargs [ "init_from" ] , DXRecord ) : raise DXError ( "Expected instance of DXRecord to init_from" ) dx_hash [ "initializeFrom" ] = { "id" : kwargs [ "init_from" ] . get_id ( ) , "project" : kwargs [ "init_from" ] . get_proj_id ( ) } del kwargs [ "init_from" ] if close : dx_hash [ "close" ] = True resp = dxpy . api . record_new ( dx_hash , ** kwargs ) self . set_ids ( resp [ "id" ] , dx_hash [ "project" ] )
def guess_value ( text_value ) : # type : ( str ) - > str """Get string value for common strings . Method is far from complete but helping with odd arxml files . : param text _ value : value in text like " true " : return : string for value like " 1" """
if sys . version_info >= ( 3 , 0 ) : text_value = text_value . casefold ( ) else : text_value = text_value . lower ( ) if text_value in [ "false" , "off" ] : return "0" elif text_value in [ "true" , "on" ] : return "1" return text_value
def get_member_class ( resource ) : """Returns the registered member class for the given resource . : param resource : registered resource : type resource : class implementing or instance providing or subclass of a registered resource interface ."""
reg = get_current_registry ( ) if IInterface in provided_by ( resource ) : member_class = reg . getUtility ( resource , name = 'member-class' ) else : member_class = reg . getAdapter ( resource , IMemberResource , name = 'member-class' ) return member_class
def save_hdf ( self , filename , path = '' , overwrite = False , append = False ) : """Saves object data to HDF file ( only works if MCMC is run ) Samples are saved to / samples location under given path , : class : ` ObservationTree ` is saved to / obs location under given path . : param filename : Name of file to save to . Should be . h5 file . : param path : ( optional ) Path within HDF file structure to save to . : param overwrite : ( optional ) If ` ` True ` ` , delete any existing file by the same name before writing . : param append : ( optional ) If ` ` True ` ` , then if a file exists , then just the path within the file will be updated ."""
if os . path . exists ( filename ) : with pd . HDFStore ( filename ) as store : if path in store : if overwrite : os . remove ( filename ) elif not append : raise IOError ( '{} in {} exists. Set either overwrite or append option.' . format ( path , filename ) ) if self . samples is not None : self . samples . to_hdf ( filename , path + '/samples' ) else : pd . DataFrame ( ) . to_hdf ( filename , path + '/samples' ) self . obs . save_hdf ( filename , path + '/obs' , append = True ) with pd . HDFStore ( filename ) as store : # store = pd . HDFStore ( filename ) attrs = store . get_storer ( '{}/samples' . format ( path ) ) . attrs attrs . ic_type = type ( self . ic ) attrs . ic_bands = list ( self . ic . bands ) attrs . use_emcee = self . use_emcee if hasattr ( self , '_mnest_basename' ) : attrs . _mnest_basename = self . _mnest_basename attrs . _bounds = self . _bounds attrs . _priors = self . _priors attrs . name = self . name store . close ( )
async def _string_data ( self , data ) : """This is a private message handler method . It is the message handler for String data messages that will be printed to the console . : param data : message : returns : None - message is sent to console"""
reply = '' data = data [ 1 : - 1 ] for x in data : reply_data = x if reply_data : reply += chr ( reply_data ) if self . log_output : logging . info ( reply ) else : print ( reply )
async def delCronJob ( self , iden ) : '''Delete a cron job Args : iden ( bytes ) : The iden of the cron job to be deleted'''
cron = self . cell . agenda . appts . get ( iden ) if cron is None : raise s_exc . NoSuchIden ( ) self . _trig_auth_check ( cron . useriden ) await self . cell . agenda . delete ( iden )
def rule_from_pattern ( pattern , base_path = None , source = None ) : """Take a . gitignore match pattern , such as " * . py [ cod ] " or " * * / * . bak " , and return an IgnoreRule suitable for matching against files and directories . Patterns which do not match files , such as comments and blank lines , will return None . Because git allows for nested . gitignore files , a base _ path value is required for correct behavior . The base path should be absolute ."""
if base_path and base_path != abspath ( base_path ) : raise ValueError ( 'base_path must be absolute' ) # Store the exact pattern for our repr and string functions orig_pattern = pattern # Early returns follow # Discard comments and seperators if pattern . strip ( ) == '' or pattern [ 0 ] == '#' : return # Discard anything with more than two consecutive asterisks if pattern . find ( '***' ) > - 1 : return # Strip leading bang before examining double asterisks if pattern [ 0 ] == '!' : negation = True pattern = pattern [ 1 : ] else : negation = False # Discard anything with invalid double - asterisks - - they can appear # at the start or the end , or be surrounded by slashes for m in re . finditer ( r'\*\*' , pattern ) : start_index = m . start ( ) if ( start_index != 0 and start_index != len ( pattern ) - 2 and ( pattern [ start_index - 1 ] != '/' or pattern [ start_index + 2 ] != '/' ) ) : return # Special - casing ' / ' , which doesn ' t match any files or directories if pattern . rstrip ( ) == '/' : return directory_only = pattern [ - 1 ] == '/' # A slash is a sign that we ' re tied to the base _ path of our rule # set . anchored = '/' in pattern [ : - 1 ] if pattern [ 0 ] == '/' : pattern = pattern [ 1 : ] if pattern [ 0 ] == '*' and pattern [ 1 ] == '*' : pattern = pattern [ 2 : ] anchored = False if pattern [ 0 ] == '/' : pattern = pattern [ 1 : ] if pattern [ - 1 ] == '/' : pattern = pattern [ : - 1 ] regex = fnmatch_pathname_to_regex ( pattern ) if anchored : regex = '' . join ( [ '^' , regex ] ) return IgnoreRule ( pattern = orig_pattern , regex = regex , negation = negation , directory_only = directory_only , anchored = anchored , base_path = Path ( base_path ) if base_path else None , source = source )
def interrogate ( self , platformIdentifier , configuration , libraries , libOverrides = { } ) : """Interrogates UnrealBuildTool about the build flags for the specified third - party libraries"""
# Determine which libraries need their modules parsed by UBT , and which are override - only libModules = list ( [ lib for lib in libraries if lib not in libOverrides ] ) # Check that we have at least one module to parse details = ThirdPartyLibraryDetails ( ) if len ( libModules ) > 0 : # Retrieve the list of third - party library modules from UnrealBuildTool modules = self . _getThirdPartyLibs ( platformIdentifier , configuration ) # Filter the list of modules to include only those that were requested modules = [ m for m in modules if m [ 'Name' ] in libModules ] # Emit a warning if any of the requested modules are not supported names = [ m [ 'Name' ] for m in modules ] unsupported = [ '"' + m + '"' for m in libModules if m not in names ] if len ( unsupported ) > 0 : Utility . printStderr ( 'Warning: unsupported libraries ' + ',' . join ( unsupported ) ) # Some libraries are listed as just the filename without the leading directory ( especially prevalent under Windows ) for module in modules : if len ( module [ 'PublicAdditionalLibraries' ] ) > 0 and len ( module [ 'PublicLibraryPaths' ] ) > 0 : libPath = ( self . _absolutePaths ( module [ 'PublicLibraryPaths' ] ) ) [ 0 ] libs = list ( [ lib . replace ( '\\' , '/' ) for lib in module [ 'PublicAdditionalLibraries' ] ] ) libs = list ( [ os . path . join ( libPath , lib ) if '/' not in lib else lib for lib in libs ] ) module [ 'PublicAdditionalLibraries' ] = libs # Flatten the lists of paths fields = [ 'Directory' , 'PublicAdditionalLibraries' , 'PublicLibraryPaths' , 'PublicSystemIncludePaths' , 'PublicIncludePaths' , 'PrivateIncludePaths' , 'PublicDefinitions' ] flattened = { } for field in fields : transform = ( lambda l : self . _absolutePaths ( l ) ) if field != 'Definitions' else None flattened [ field ] = self . _flatten ( field , modules , transform ) # Compose the prefix directories from the module root directories , the header and library paths , and their direct parent directories libraryDirectories = flattened [ 'PublicLibraryPaths' ] headerDirectories = flattened [ 'PublicSystemIncludePaths' ] + flattened [ 'PublicIncludePaths' ] + flattened [ 'PrivateIncludePaths' ] modulePaths = flattened [ 'Directory' ] prefixDirectories = list ( set ( flattened [ 'Directory' ] + headerDirectories + libraryDirectories + [ os . path . dirname ( p ) for p in headerDirectories + libraryDirectories ] ) ) # Wrap the results in a ThirdPartyLibraryDetails instance , converting any relative directory paths into absolute ones details = ThirdPartyLibraryDetails ( prefixDirs = prefixDirectories , includeDirs = headerDirectories , linkDirs = libraryDirectories , definitions = flattened [ 'PublicDefinitions' ] , libs = flattened [ 'PublicAdditionalLibraries' ] ) # Apply any overrides overridesToApply = list ( [ libOverrides [ lib ] for lib in libraries if lib in libOverrides ] ) for override in overridesToApply : details . merge ( override ) return details
def get_resources ( self ) : """Gets a JSON mapping of server - side resource names to paths : rtype dict"""
status , _ , body = self . _request ( 'GET' , '/' , { 'Accept' : 'application/json' } ) if status == 200 : tmp , resources = json . loads ( bytes_to_str ( body ) ) , { } for k in tmp : # The keys and values returned by json . loads ( ) are unicode , # which will cause problems when passed into httplib later # ( expecting bytes both in Python 2 . x and 3 . x ) . # We just encode the resource paths into bytes , with an # encoding consistent with what the resources module expects . resources [ k ] = tmp [ k ] . encode ( 'utf-8' ) return resources else : return { }
def _read_preference_for ( self , session ) : """Read only access to the read preference of this instance or session ."""
# Override this operation ' s read preference with the transaction ' s . if session : return session . _txn_read_preference ( ) or self . __read_preference return self . __read_preference
def from_archive ( archive_filename , py_interpreter = sys . executable ) : """extract metadata from a given sdist archive file : param archive _ filename : a sdist archive file : param py _ interpreter : The full path to the used python interpreter : returns : a json blob with metadata"""
with _extract_to_tempdir ( archive_filename ) as root_dir : data = _setup_py_run_from_dir ( root_dir , py_interpreter ) return data
def get_clients_groups ( self ) -> typing . Iterator [ 'Group' ] : """Gets all clients groups Returns : generator of Groups"""
for group in self . groups : if group . group_is_client_group : yield group
def iso_mesh_line ( vertices , tris , vertex_data , levels ) : """Generate an isocurve from vertex data in a surface mesh . Parameters vertices : ndarray , shape ( Nv , 3) Vertex coordinates . tris : ndarray , shape ( Nf , 3) Indices of triangular element into the vertices array . vertex _ data : ndarray , shape ( Nv , ) data at vertex . levels : ndarray , shape ( Nl , ) Levels at which to generate an isocurve Returns lines : ndarray , shape ( Nvout , 3) Vertex coordinates for lines points connects : ndarray , shape ( Ne , 2) Indices of line element into the vertex array . vertex _ level : ndarray , shape ( Nvout , ) level for vertex in lines Notes Uses a marching squares algorithm to generate the isolines ."""
lines = None connects = None vertex_level = None level_index = None if not all ( [ isinstance ( x , np . ndarray ) for x in ( vertices , tris , vertex_data , levels ) ] ) : raise ValueError ( 'all inputs must be numpy arrays' ) if vertices . shape [ 1 ] <= 3 : verts = vertices elif vertices . shape [ 1 ] == 4 : verts = vertices [ : , : - 1 ] else : verts = None if ( verts is not None and tris . shape [ 1 ] == 3 and vertex_data . shape [ 0 ] == verts . shape [ 0 ] ) : edges = np . vstack ( ( tris . reshape ( ( - 1 ) ) , np . roll ( tris , - 1 , axis = 1 ) . reshape ( ( - 1 ) ) ) ) . T edge_datas = vertex_data [ edges ] edge_coors = verts [ edges ] . reshape ( tris . shape [ 0 ] * 3 , 2 , 3 ) for lev in levels : # index for select edges with vertices have only False - True # or True - False at extremity index = ( edge_datas >= lev ) index = index [ : , 0 ] ^ index [ : , 1 ] # xor calculation # Selectect edge edge_datas_Ok = edge_datas [ index , : ] xyz = edge_coors [ index ] # Linear interpolation ratio = np . array ( [ ( lev - edge_datas_Ok [ : , 0 ] ) / ( edge_datas_Ok [ : , 1 ] - edge_datas_Ok [ : , 0 ] ) ] ) point = xyz [ : , 0 , : ] + ratio . T * ( xyz [ : , 1 , : ] - xyz [ : , 0 , : ] ) nbr = point . shape [ 0 ] // 2 if connects is not None : connect = np . arange ( 0 , nbr * 2 ) . reshape ( ( nbr , 2 ) ) + len ( lines ) connects = np . append ( connects , connect , axis = 0 ) lines = np . append ( lines , point , axis = 0 ) vertex_level = np . append ( vertex_level , np . zeros ( len ( point ) ) + lev ) level_index = np . append ( level_index , np . array ( len ( point ) ) ) else : lines = point connects = np . arange ( 0 , nbr * 2 ) . reshape ( ( nbr , 2 ) ) vertex_level = np . zeros ( len ( point ) ) + lev level_index = np . array ( len ( point ) ) vertex_level = vertex_level . reshape ( ( vertex_level . size , 1 ) ) return lines , connects , vertex_level , level_index
def send_cmd_recv_rsp ( self , cmd_code , cmd_data , timeout , send_idm = True , check_status = True ) : """Send a command and receive a response . This low level method sends an arbitrary command with the 8 - bit integer * cmd _ code * , followed by the captured tag identifier ( IDm ) if * send _ idm * is : const : ` True ` and the byte string or bytearray * cmd _ data * . It then waits * timeout * seconds for a response , verifies that the response is correctly formatted and , if * check _ status * is : const : ` True ` , that the status flags do not indicate an error . All errors raise a : exc : ` ~ nfc . tag . TagCommandError ` exception . Errors from response status flags produce an : attr : ` ~ nfc . tag . TagCommandError . errno ` that is greater than 255 , all other errors are below 256."""
idm = self . idm if send_idm else bytearray ( ) cmd = chr ( 2 + len ( idm ) + len ( cmd_data ) ) + chr ( cmd_code ) + idm + cmd_data log . debug ( ">> {0:02x} {1:02x} {2} {3} ({4}s)" . format ( cmd [ 0 ] , cmd [ 1 ] , hexlify ( cmd [ 2 : 10 ] ) , hexlify ( cmd [ 10 : ] ) , timeout ) ) started = time . time ( ) for retry in range ( 3 ) : try : rsp = self . clf . exchange ( cmd , timeout ) break except nfc . clf . CommunicationError as error : reason = error . __class__ . __name__ log . debug ( "%s after %d retries" % ( reason , retry ) ) else : if type ( error ) is nfc . clf . TimeoutError : raise Type3TagCommandError ( nfc . tag . TIMEOUT_ERROR ) if type ( error ) is nfc . clf . TransmissionError : raise Type3TagCommandError ( nfc . tag . RECEIVE_ERROR ) if type ( error ) is nfc . clf . ProtocolError : # pragma : no branch raise Type3TagCommandError ( nfc . tag . PROTOCOL_ERROR ) if rsp [ 0 ] != len ( rsp ) : log . debug ( "incorrect response length {0:02x}" . format ( rsp [ 0 ] ) ) raise Type3TagCommandError ( RSP_LENGTH_ERROR ) if rsp [ 1 ] != cmd_code + 1 : log . debug ( "incorrect response code {0:02x}" . format ( rsp [ 1 ] ) ) raise Type3TagCommandError ( RSP_CODE_ERROR ) if send_idm and rsp [ 2 : 10 ] != self . idm : log . debug ( "wrong tag or transaction id " + hexlify ( rsp [ 2 : 10 ] ) ) raise Type3TagCommandError ( TAG_IDM_ERROR ) if not send_idm : log . debug ( "<< {0:02x} {1:02x} {2}" . format ( rsp [ 0 ] , rsp [ 1 ] , hexlify ( rsp [ 2 : ] ) ) ) return rsp [ 2 : ] if check_status and rsp [ 10 ] != 0 : log . debug ( "tag returned error status " + hexlify ( rsp [ 10 : 12 ] ) ) raise Type3TagCommandError ( unpack ( ">H" , rsp [ 10 : 12 ] ) [ 0 ] ) if not check_status : log . debug ( "<< {0:02x} {1:02x} {2} {3}" . format ( rsp [ 0 ] , rsp [ 1 ] , hexlify ( rsp [ 2 : 10 ] ) , hexlify ( rsp [ 10 : ] ) ) ) return rsp [ 10 : ] log . debug ( "<< {0:02x} {1:02x} {2} {3} {4} ({elapsed:f}s)" . format ( rsp [ 0 ] , rsp [ 1 ] , hexlify ( rsp [ 2 : 10 ] ) , hexlify ( rsp [ 10 : 12 ] ) , hexlify ( rsp [ 12 : ] ) , elapsed = time . time ( ) - started ) ) return rsp [ 12 : ]
def create_new_csv ( samples , args ) : """create csv file that can be use with bcbio - w template"""
out_fn = os . path . splitext ( args . csv ) [ 0 ] + "-merged.csv" logger . info ( "Preparing new csv: %s" % out_fn ) with file_transaction ( out_fn ) as tx_out : with open ( tx_out , 'w' ) as handle : handle . write ( _header ( args . csv ) ) for s in samples : sample_name = s [ 'name' ] if isinstance ( s [ 'out_file' ] , list ) else os . path . basename ( s [ 'out_file' ] ) handle . write ( "%s,%s,%s\n" % ( sample_name , s [ 'name' ] , "," . join ( s [ 'anno' ] ) ) )
def size ( self ) : """Returns the size of the cache in bytes ."""
total_size = 0 for dir_path , dir_names , filenames in os . walk ( self . dir ) : for f in filenames : fp = os . path . join ( dir_path , f ) total_size += os . path . getsize ( fp ) return total_size
def remove_unsupported_kwargs ( module_or_fn , all_kwargs_dict ) : """Removes any kwargs not supported by ` module _ or _ fn ` from ` all _ kwargs _ dict ` . A new dict is return with shallow copies of keys & values from ` all _ kwargs _ dict ` , as long as the key is accepted by module _ or _ fn . The returned dict can then be used to connect ` module _ or _ fn ` ( along with some other inputs , ie non - keyword arguments , in general ) . ` snt . supports _ kwargs ` is used to tell whether a given kwarg is supported . Note that this method may give false negatives , which would lead to extraneous removals in the result of this function . Please read the docstring for ` snt . supports _ kwargs ` for details , and manually inspect the results from this function if in doubt . Args : module _ or _ fn : some callable which can be interrogated by ` snt . supports _ kwargs ` . Generally a Sonnet module or a method ( wrapped in ` @ reuse _ variables ` ) of a Sonnet module . all _ kwargs _ dict : a dict containing strings as keys , or None . Raises : ValueError : if ` all _ kwargs _ dict ` is not a dict . Returns : A dict containing some subset of the keys and values in ` all _ kwargs _ dict ` . This subset may be empty . If ` all _ kwargs _ dict ` is None , this will be an empty dict ."""
if all_kwargs_dict is None : all_kwargs_dict = { } if not isinstance ( all_kwargs_dict , dict ) : raise ValueError ( "all_kwargs_dict must be a dict with string keys." ) return { kwarg : value for kwarg , value in all_kwargs_dict . items ( ) if supports_kwargs ( module_or_fn , kwarg ) != NOT_SUPPORTED }
def all_schema_names ( self , cache = False , cache_timeout = None , force = False ) : """Parameters need to be passed as keyword arguments . For unused parameters , they are referenced in cache _ util . memoized _ func decorator . : param cache : whether cache is enabled for the function : type cache : bool : param cache _ timeout : timeout in seconds for the cache : type cache _ timeout : int : param force : whether to force refresh the cache : type force : bool : return : schema list : rtype : list"""
return self . db_engine_spec . get_schema_names ( self . inspector )
def mx_page_trees ( self , mx_page ) : """return trees assigned to given MX Page"""
resp = dict ( ) for tree_name , tree in self . scheduler . timetable . trees . items ( ) : if tree . mx_page == mx_page : rest_tree = self . _get_tree_details ( tree_name ) resp [ tree . tree_name ] = rest_tree . document return resp
def _capture_original_object ( self ) : """Capture the original python object ."""
try : self . _doubles_target = getattr ( self . target , self . _name ) except AttributeError : raise VerifyingDoubleError ( self . target , self . _name )
def file_hash ( path , hash_type = "md5" , block_size = 65536 , hex_digest = True ) : """Hash a given file with md5 , or any other and return the hex digest . You can run ` hashlib . algorithms _ available ` to see which are available on your system unless you have an archaic python version , you poor soul ) . This function is designed to be non memory intensive . . . code : : python reusables . file _ hash ( test _ structure . zip " ) # ' 61e387de305201a2c915a4f4277d6663' : param path : location of the file to hash : param hash _ type : string name of the hash to use : param block _ size : amount of bytes to add to hasher at a time : param hex _ digest : returned as hexdigest , false will return digest : return : file ' s hash"""
hashed = hashlib . new ( hash_type ) with open ( path , "rb" ) as infile : buf = infile . read ( block_size ) while len ( buf ) > 0 : hashed . update ( buf ) buf = infile . read ( block_size ) return hashed . hexdigest ( ) if hex_digest else hashed . digest ( )
def _ttr ( self , k , dist , cache ) : """Three terms recursion coefficients ."""
a , b = evaluation . evaluate_recurrence_coefficients ( dist , k ) return - a , b
def fetch ( bank , key , cachedir ) : '''Fetch information from a file .'''
inkey = False key_file = os . path . join ( cachedir , os . path . normpath ( bank ) , '{0}.p' . format ( key ) ) if not os . path . isfile ( key_file ) : # The bank includes the full filename , and the key is inside the file key_file = os . path . join ( cachedir , os . path . normpath ( bank ) + '.p' ) inkey = True if not os . path . isfile ( key_file ) : log . debug ( 'Cache file "%s" does not exist' , key_file ) return { } try : with salt . utils . files . fopen ( key_file , 'rb' ) as fh_ : if inkey : return __context__ [ 'serial' ] . load ( fh_ ) [ key ] else : return __context__ [ 'serial' ] . load ( fh_ ) except IOError as exc : raise SaltCacheError ( 'There was an error reading the cache file "{0}": {1}' . format ( key_file , exc ) )
async def addNodes ( self , nodedefs ) : '''Add / merge nodes in bulk . The addNodes API is designed for bulk adds which will also set properties and add tags to existing nodes . Nodes are specified as a list of the following tuples : ( ( form , valu ) , { ' props ' : { } , ' tags ' : { } } ) Args : nodedefs ( list ) : A list of nodedef tuples . Returns : ( list ) : A list of xact messages .'''
for ( formname , formvalu ) , forminfo in nodedefs : props = forminfo . get ( 'props' ) # remove any universal created props . . . if props is not None : props . pop ( '.created' , None ) node = await self . addNode ( formname , formvalu , props = props ) if node is not None : tags = forminfo . get ( 'tags' ) if tags is not None : for tag , asof in tags . items ( ) : await node . addTag ( tag , valu = asof ) yield node
def build ( self , builder ) : """Build XML by appending to builder"""
params = dict ( Value = self . value , Status = self . status . value , ProtocolDeviationRepeatKey = self . repeat_key ) if self . code : params [ 'Code' ] = self . code if self . pdclass : params [ 'Class' ] = self . pdclass if self . transaction_type : params [ 'TransactionType' ] = self . transaction_type builder . start ( 'mdsol:ProtocolDeviation' , params ) builder . end ( 'mdsol:ProtocolDeviation' )
def _merge_args ( qCmd , parsed_args , _extra_values , value_specs ) : """Merge arguments from _ extra _ values into parsed _ args . If an argument value are provided in both and it is a list , the values in _ extra _ values will be merged into parsed _ args . @ param parsed _ args : the parsed args from known options @ param _ extra _ values : the other parsed arguments in unknown parts @ param values _ specs : the unparsed unknown parts"""
temp_values = _extra_values . copy ( ) for key , value in six . iteritems ( temp_values ) : if hasattr ( parsed_args , key ) : arg_value = getattr ( parsed_args , key ) if arg_value is not None and value is not None : if isinstance ( arg_value , list ) : if value and isinstance ( value , list ) : if ( not arg_value or isinstance ( arg_value [ 0 ] , type ( value [ 0 ] ) ) ) : arg_value . extend ( value ) _extra_values . pop ( key )
def _resample_data ( self , gssha_var ) : """This function resamples the data to match the GSSHA grid IN TESTING MODE"""
self . data = self . data . lsm . resample ( gssha_var , self . gssha_grid )
def not_empty ( message = None ) -> Filter_T : """Validate any object to ensure it ' s not empty ( is None or has no elements ) ."""
def validate ( value ) : if value is None : _raise_failure ( message ) if hasattr ( value , '__len__' ) and value . __len__ ( ) == 0 : _raise_failure ( message ) return value return validate
def findCycle ( self , cycNum ) : '''Method that looks through the self . cycles and returns the nearest cycle : Parameters cycNum : int int of the cycle desired cycle .'''
cycNum = int ( cycNum ) i = 0 while i < len ( self . cycles ) : if cycNum < int ( self . cycles [ i ] ) : break i += 1 if i == 0 : return self . cycles [ i ] elif i == len ( self . cycles ) : return self . cycles [ i - 1 ] lower = int ( self . cycles [ i - 1 ] ) higher = int ( self . cycles [ i ] ) if higher - cycNum >= cycNum - lower : return self . cycles [ i - 1 ] else : return self . cycles [ i ]
def added ( self , context ) : """Ingredient method called before anything else . Here this method just builds the full command tree and stores it inside the context as the ` ` cmd _ tree ` ` attribute . The structure of the tree is explained by the : func : ` build _ cmd _ tree ( ) ` function ."""
context . cmd_tree = self . _build_cmd_tree ( self . command ) context . cmd_toplevel = context . cmd_tree . cmd_obj # Collect spices from the top - level command for spice in context . cmd_toplevel . get_cmd_spices ( ) : context . bowl . add_spice ( spice )
def connect ( self , host = 'localhost' ) : """Connect to the server and set everything up . Args : host : hostname to connect to"""
# Connect get_logger ( ) . info ( "Connecting to RabbitMQ server..." ) self . _conn = pika . BlockingConnection ( pika . ConnectionParameters ( host = host ) ) self . _channel = self . _conn . channel ( ) # Exchanger get_logger ( ) . info ( "Declaring topic exchanger {}..." . format ( self . exchange ) ) self . _channel . exchange_declare ( exchange = self . exchange , type = 'topic' ) # Create queue get_logger ( ) . info ( "Creating RabbitMQ queue..." ) result = self . _channel . queue_declare ( exclusive = True ) self . _queue_name = result . method . queue # Binding if self . listen_all : get_logger ( ) . info ( "Binding queue to exchanger {} (listen all)..." . format ( self . exchange ) ) self . _channel . queue_bind ( exchange = self . exchange , queue = self . _queue_name , routing_key = '*' ) else : for routing_key in self . topics : get_logger ( ) . info ( "Binding queue to exchanger {} " "with routing key {}..." . format ( self . exchange , routing_key ) ) self . _channel . queue_bind ( exchange = self . exchange , queue = self . _queue_name , routing_key = routing_key ) # Callback get_logger ( ) . info ( "Binding callback..." ) self . _channel . basic_consume ( self . _callback , queue = self . _queue_name , no_ack = True )
def get ( self , key : str , default = None , as_type : type = None , binary_file = False ) : """Get a setting specified by key ` ` key ` ` . Normally , settings are strings , but if you put non - strings into the settings object , you can request unserialization by specifying ` ` as _ type ` ` . If the key does not have a harcdoded default type , omitting ` ` as _ type ` ` always will get you a string . If the setting with the specified name does not exist on this object , any parent object up to the global settings layer ( if configured ) will be queried . If still no value is found , a default value set in ths source code will be returned if one exists . If not , the value of the ` ` default ` ` argument of this method will be returned instead . If you receive a ` ` File ` ` object , it will already be opened . You can specify the ` ` binary _ file ` ` flag to indicate that it should be opened in binary mode ."""
if as_type is None and key in self . _h . defaults : as_type = self . _h . defaults [ key ] . type if key in self . _cache ( ) : value = self . _cache ( ) [ key ] else : value = None if self . _parent : value = getattr ( self . _parent , self . _h . attribute_name ) . get ( key , as_type = str ) if value is None and key in self . _h . defaults : value = self . _h . defaults [ key ] . value if value is None and default is not None : value = default return self . _unserialize ( value , as_type , binary_file = binary_file )
def connect ( self , uri , link_quality_callback , link_error_callback ) : """Connect the link driver to a specified URI of the format : radio : / / < dongle nbr > / < radio channel > / [ 250K , 1M , 2M ] The callback for linkQuality can be called at any moment from the driver to report back the link quality in percentage . The callback from linkError will be called when a error occurs with an error message ."""
# check if the URI is a radio URI if not re . search ( '^radio://' , uri ) : raise WrongUriType ( 'Not a radio URI' ) # Open the USB dongle if not re . search ( '^radio://([0-9a-fA-F]+)((/([0-9]+))' '((/(250K|1M|2M))?(/([A-F0-9]+))?)?)?$' , uri ) : raise WrongUriType ( 'Wrong radio URI format!' ) uri_data = re . search ( '^radio://([0-9a-fA-F]+)((/([0-9]+))' '((/(250K|1M|2M))?(/([A-F0-9]+))?)?)?$' , uri ) self . uri = uri if len ( uri_data . group ( 1 ) ) < 10 and uri_data . group ( 1 ) . isdigit ( ) : devid = int ( uri_data . group ( 1 ) ) else : try : devid = crazyradio . get_serials ( ) . index ( uri_data . group ( 1 ) . upper ( ) ) except ValueError : raise Exception ( 'Cannot find radio with serial {}' . format ( uri_data . group ( 1 ) ) ) channel = 2 if uri_data . group ( 4 ) : channel = int ( uri_data . group ( 4 ) ) datarate = Crazyradio . DR_2MPS if uri_data . group ( 7 ) == '250K' : datarate = Crazyradio . DR_250KPS if uri_data . group ( 7 ) == '1M' : datarate = Crazyradio . DR_1MPS if uri_data . group ( 7 ) == '2M' : datarate = Crazyradio . DR_2MPS address = DEFAULT_ADDR_A if uri_data . group ( 9 ) : addr = str ( uri_data . group ( 9 ) ) new_addr = struct . unpack ( '<BBBBB' , binascii . unhexlify ( addr ) ) address = new_addr if self . _radio_manager is None : self . _radio_manager = _RadioManager ( devid , channel , datarate , address ) else : raise Exception ( 'Link already open!' ) with self . _radio_manager as cradio : if cradio . version >= 0.4 : cradio . set_arc ( _nr_of_arc_retries ) else : logger . warning ( 'Radio version <0.4 will be obsoleted soon!' ) # Prepare the inter - thread communication queue self . in_queue = queue . Queue ( ) # Limited size out queue to avoid " ReadBack " effect self . out_queue = queue . Queue ( 1 ) # Launch the comm thread self . _thread = _RadioDriverThread ( self . _radio_manager , self . in_queue , self . out_queue , link_quality_callback , link_error_callback , self ) self . _thread . start ( ) self . link_error_callback = link_error_callback
def seen ( self , event , nickname ) : """Shows the amount of time since the given nickname was last seen in the channel ."""
try : self . joined [ nickname ] except KeyError : pass else : if nickname == self . get_nickname ( event ) : prefix = "you are" else : prefix = "%s is" % nickname return "%s here right now" % prefix try : seen = self . timesince ( self . quit [ nickname ] ) except KeyError : return "%s has never been seen" % nickname else : return "%s was last seen %s ago" % ( nickname , seen )
def parse ( cls , request , default_start = 0 , default_end = 9 , max_end = 50 ) : '''Parse the range headers into a range object . When there are no range headers , check for a page ' pagina ' parameter , otherwise use the defaults defaults : param request : a request object : param default _ start : default start for paging ( optional , default is 0) : param default _ end : default end for paging ( optional , default is 9) : param max _ end : maximum end for paging ( optional , default is 50, no limits in case of None ) : return : : class : ' oe _ utils . range _ parser . Range ' '''
settings = request . registry . settings page_param = settings . get ( 'oe.paging.page.queryparam' , 'pagina' ) if 'Range' in request . headers and request . headers [ 'Range' ] is not '' : match = re . match ( '^items=([0-9]+)-([0-9]+)$' , request . headers [ 'Range' ] ) if match : start = int ( match . group ( 1 ) ) end = int ( match . group ( 2 ) ) if end < start : end = start if max_end and end > start + max_end : end = start + max_end return cls ( start , end ) else : raise RangeParseException ( 'range header does not match expected format' ) elif page_param in request . params : per_page_param = settings . get ( 'oe.paging.per_page.queryparam' , 'per_pagina' ) page = int ( request . params . get ( page_param ) ) items_per_page = int ( request . params . get ( per_page_param , default_end - default_start + 1 ) ) start = default_start + items_per_page * ( page - 1 ) end = start + items_per_page - 1 return cls ( start , end , page ) else : return cls ( default_start , default_end )
def logplr ( self ) : """Returns the log of the prior - weighted likelihood ratio at the current parameter values . The logprior is calculated first . If the logprior returns ` ` - inf ` ` ( possibly indicating a non - physical point ) , then ` ` loglr ` ` is not called ."""
logp = self . logprior if logp == - numpy . inf : return logp else : return logp + self . loglr
def get_eidos_bayesian_scorer ( prior_counts = None ) : """Return a BayesianScorer based on Eidos curation counts ."""
table = load_eidos_curation_table ( ) subtype_counts = { 'eidos' : { r : [ c , i ] for r , c , i in zip ( table [ 'RULE' ] , table [ 'Num correct' ] , table [ 'Num incorrect' ] ) } } prior_counts = prior_counts if prior_counts else copy . deepcopy ( default_priors ) scorer = BayesianScorer ( prior_counts = prior_counts , subtype_counts = subtype_counts ) return scorer
def ensure_dir ( path , parents = False ) : """Returns a boolean indicating whether the directory already existed . Will attempt to create parent directories if * parents * is True ."""
if parents : from os . path import dirname parent = dirname ( path ) if len ( parent ) and parent != path : ensure_dir ( parent , True ) try : os . mkdir ( path ) except OSError as e : if e . errno == 17 : # EEXIST return True raise return False
def skip_if ( self , condition : bool , default : Any = None ) -> 'Question' : """Skip the question if flag is set and return the default instead ."""
self . should_skip_question = condition self . default = default return self
def main ( ) : """NAME scalc _ magic . py DESCRIPTION calculates Sb from pmag _ results files SYNTAX scalc _ magic - h [ command line options ] INPUT takes magic formatted pmag _ results ( 2.5 ) or sites ( 3.0 ) table pmag _ result _ name ( 2.5 ) must start with " VGP : Site " must have average _ lat ( 2.5 ) or lat ( 3.0 ) if spin axis is reference OPTIONS - h prints help message and quits - f FILE : specify input results file , default is ' sites . txt ' - c cutoff : specify VGP colatitude cutoff value , default is no cutoff - k cutoff : specify kappa cutoff , default is 0 - crd [ s , g , t ] : specify coordinate system , default is geographic - v : use the VanDammme criterion - a : use antipodes of reverse data : default is to use only normal - r : use reverse data only - p : do relative to principle axis - b : do bootstrap confidence bounds - n : set minimum n for samples ( specimens ) per site - dm : data model [ 3.0 is default , otherwise , 2.5] - mm97 : correct for within site scatter ( McElhinny & McFadden , 1997) NOTES if kappa , N _ site , lat supplied , will consider within site scatter OUTPUT N Sb Sb _ lower Sb _ upper Co - lat . Cutoff OUTPUT : if option - b used : N , S _ B , lower and upper bounds otherwise : N , S _ B , cutoff"""
coord , kappa , cutoff , n = 0 , 0 , 180. , 0 nb , anti , spin , v , boot = 1000 , 0 , 0 , 0 , 0 data_model = 3 rev = 0 if '-dm' in sys . argv : ind = sys . argv . index ( "-dm" ) data_model = int ( sys . argv [ ind + 1 ] ) if data_model == 2 : coord_key = 'tilt_correction' in_file = 'pmag_results.txt' k_key , n_key , lat_key = 'average_k' , 'average_nn' , 'average_lat' else : coord_key = 'dir_tilt_correction' in_file = 'sites.txt' k_key , n_key , lat_key = 'dir_k' , 'dir_n_samples`' , 'lat' if '-h' in sys . argv : print ( main . __doc__ ) sys . exit ( ) if '-f' in sys . argv : ind = sys . argv . index ( "-f" ) in_file = sys . argv [ ind + 1 ] vgp_df = pd . read_csv ( in_file , sep = '\t' , header = 1 ) else : vgp_df = pd . read_csv ( sys . stdin , sep = '\t' , header = 1 ) if '-c' in sys . argv : ind = sys . argv . index ( '-c' ) cutoff = float ( sys . argv [ ind + 1 ] ) if '-k' in sys . argv : ind = sys . argv . index ( '-k' ) kappa = float ( sys . argv [ ind + 1 ] ) if '-n' in sys . argv : ind = sys . argv . index ( '-n' ) n = float ( sys . argv [ ind + 1 ] ) if '-crd' in sys . argv : ind = sys . argv . index ( "-crd" ) coord = sys . argv [ ind + 1 ] if coord == 's' : coord = - 1 if coord == 'g' : coord = 0 if coord == 't' : coord = 100 if '-a' in sys . argv : anti = 1 if '-r' in sys . argv : rev = 1 if '-p' in sys . argv : spin = 1 if '-v' in sys . argv : v = 1 if '-b' in sys . argv : boot = 1 if '-mm97' in sys . argv : mm97 = 1 else : mm97 = 0 # find desired vgp lat , lon , kappa , N _ site data : vgp_df . dropna ( subset = [ 'vgp_lat' , 'vgp_lon' ] ) keys = [ coord_key , k_key , n_key , lat_key ] for key in keys : if key not in vgp_df . columns : vgp_df [ key ] = 0 vgp_df = vgp_df [ vgp_df [ coord_key ] == coord ] if data_model != 3 : # convert vgp_df [ 'dir_k' ] = vgp_df [ k_key ] vgp_df [ 'dir_n_samples' ] = vgp_df [ n_key ] vgp_df [ 'lat' ] = vgp_df [ lat_key ] N , S_B , low , high , cutoff = pmag . scalc_vgp_df ( vgp_df , anti = anti , rev = rev , cutoff = cutoff , kappa = kappa , n = n , spin = spin , v = v , boot = boot , mm97 = mm97 ) if high != 0 : print ( N , '%7.1f %7.1f %7.1f %7.1f ' % ( S_B , low , high , cutoff ) ) else : print ( N , '%7.1f %7.1f ' % ( S_B , cutoff ) )
def batlab2sparkle ( experiment_data ) : """Sparkle expects meta data to have a certain heirarchial organization , reformat batlab experiment data to fit ."""
# This is mostly for convention . . attribute that matters most is samplerate , # since it is used in the GUI to calculate things like duration nsdata = { } for attr in [ 'computername' , 'pst_filename' , 'title' , 'who' , 'date' , 'program_date' ] : nsdata [ attr ] = experiment_data [ attr ] for itest , test in enumerate ( experiment_data [ 'test' ] ) : setname = 'test_{}' . format ( itest + 1 ) nsdata [ setname ] = { } nsdata [ setname ] [ 'samplerate_ad' ] = test [ 'trace' ] [ 0 ] [ 'samplerate_ad' ] nsdata [ setname ] [ 'comment' ] = test [ 'comment' ] nsdata [ setname ] [ 'start' ] = test [ 'time' ] nsdata [ setname ] [ 'mode' ] = 'finite' nsdata [ setname ] [ 'user_tag' ] = '' if test [ 'full_testtype' ] == 'General Auto Test' and test [ 'testtype' ] == 'tone' : nsdata [ setname ] [ 'testtype' ] = 'Tuning Curve' else : nsdata [ setname ] [ 'testtype' ] = test [ 'full_testtype' ] stims = [ ] for itrace , trace in enumerate ( test [ 'trace' ] ) : try : stim = { 'samplerate_da' : trace [ 'samplerate_da' ] , 'overloaded_attenuation' : 0 , } components = [ ] for icomp , component in enumerate ( trace [ 'stimulus' ] ) : # always add in silence component to match batlab ' s delay parameter delay_comp = { 'index' : [ icomp , 0 ] , 'stim_type' : 'silence' , 'intensity' : 0 , 'duration' : component [ 'delay' ] / 1000. , 'start_s' : 0 , 'risefall' : 0 } components . append ( delay_comp ) # FIXME need to pull in speaker calibration to get real intensity comp = { 'risefall' : component [ 'rise_fall' ] / 1000. , 'index' : [ icomp , 1 ] , 'duration' : component [ 'duration' ] / 1000. , 'start_s' : component [ 'delay' ] / 1000. , 'intensity' : 100 - component [ 'attenuation' ] } if component [ 'soundtype_name' ] == 'vocalization' : # print component comp [ 'stim_type' ] = 'Vocalization' comp [ 'filename' ] = component [ 'vocal_call_file' ] comp [ 'browsedir' ] = '' elif component [ 'soundtype_name' ] == 'fmsweep' : comp [ 'stim_type' ] = 'FM Sweep' usweep = 1 if component [ 'usweep' ] else - 1 comp [ 'start_f' ] = component [ 'frequency' ] - ( component [ 'bandwidth' ] / 2 ) * usweep comp [ 'stop_f' ] = component [ 'frequency' ] + ( component [ 'bandwidth' ] / 2 ) * usweep elif component [ 'soundtype_name' ] == 'tone' : comp [ 'stim_type' ] = 'Pure Tone' comp [ 'frequency' ] = component [ 'frequency' ] else : # print ' FOUND UNKNOWN STIM ' , component [ ' soundtype _ name ' ] # raise ValueError comp [ 'stim_type' ] = component [ 'soundtype_name' ] components . append ( comp ) stim [ 'components' ] = components stims . append ( stim ) except TypeError : print 'PROBLEM with' , itest , itrace print 'component' , component continue nsdata [ setname ] [ 'stim' ] = stims return nsdata
def is_first ( self , value ) : """The is _ first property . Args : value ( string ) . the property value ."""
if value == self . _defaults [ 'ai.session.isFirst' ] and 'ai.session.isFirst' in self . _values : del self . _values [ 'ai.session.isFirst' ] else : self . _values [ 'ai.session.isFirst' ] = value
def getURL ( self , CorpNum , UserID , ToGo ) : """: param CorpNum : 팝빌회원 사업자번호 : param UserID : 팝빌회원 아이디 : param ToGo : [ PLUSFRIEND - 플러스친구계정관리 , SENDER - 발신번호관리 , TEMPLATE - 알림톡템플릿관리 , BOX - 카카오톡전송내용 ] : return : 팝빌 URL"""
if ToGo == None or ToGo == '' : raise PopbillException ( - 99999999 , "TOGO값이 입력되지 않았습니다." ) if ToGo == 'SENDER' : result = self . _httpget ( '/Message/?TG=' + ToGo , CorpNum , UserID ) else : result = self . _httpget ( '/KakaoTalk/?TG=' + ToGo , CorpNum , UserID ) return result . url
def discretize ( value , factor = 100 ) : """Discretize the given value , pre - multiplying by the given factor"""
if not isinstance ( value , Iterable ) : return int ( value * factor ) int_value = list ( deepcopy ( value ) ) for i in range ( len ( int_value ) ) : int_value [ i ] = int ( int_value [ i ] * factor ) return int_value
def reqMktData ( self , contract : Contract , genericTickList : str = '' , snapshot : bool = False , regulatorySnapshot : bool = False , mktDataOptions : List [ TagValue ] = None ) -> Ticker : """Subscribe to tick data or request a snapshot . Returns the Ticker that holds the market data . The ticker will initially be empty and gradually ( after a couple of seconds ) be filled . https : / / interactivebrokers . github . io / tws - api / md _ request . html Args : contract : Contract of interest . genericTickList : Comma separated IDs of desired generic ticks that will cause corresponding Ticker fields to be filled : ID Ticker fields 100 ` ` putVolume ` ` , ` ` callVolume ` ` ( for options ) 101 ` ` putOpenInterest ` ` , ` ` callOpenInterest ` ` ( for options ) 104 ` ` histVolatility ` ` ( for options ) 105 ` ` avOptionVolume ` ` ( for options ) 106 ` ` impliedVolatility ` ` ( for options ) 162 ` ` indexFuturePremium ` ` 165 ` ` low13week ` ` , ` ` high13week ` ` , ` ` low26week ` ` , ` ` high26week ` ` , ` ` low52week ` ` , ` ` high52week ` ` , ` ` avVolume ` ` 221 ` ` markPrice ` ` 233 ` ` last ` ` , ` ` lastSize ` ` , ` ` rtVolume ` ` , ` ` vwap ` ` ( Time & Sales ) 236 ` ` shortableShares ` ` 258 ` ` fundamentalRatios ` ` ( of type : class : ` ib _ insync . objects . FundamentalRatios ` ) 293 ` ` tradeCount ` ` 294 ` ` tradeRate ` ` 295 ` ` volumeRate ` ` 411 ` ` rtHistVolatility ` ` 456 ` ` dividends ` ` ( of type : class : ` ib _ insync . objects . Dividends ` ) 588 ` ` futuresOpenInterest ` ` snapshot : If True then request a one - time snapshot , otherwise subscribe to a stream of realtime tick data . regulatorySnapshot : Request NBBO snapshot ( may incur a fee ) . mktDataOptions : Unknown"""
reqId = self . client . getReqId ( ) ticker = self . wrapper . startTicker ( reqId , contract , 'mktData' ) self . client . reqMktData ( reqId , contract , genericTickList , snapshot , regulatorySnapshot , mktDataOptions ) return ticker
def serialize_date ( value ) : """Attempts to convert ` value ` into an ` ` xs : date ` ` string . If ` value ` is ` ` None ` ` , ` ` None ` ` will be returned . Args : value : A date value . This can be a string , datetime . date , or datetime . datetime object . Returns : An ` ` xs : date ` ` formatted timestamp string ."""
if not value : return None elif isinstance ( value , datetime . datetime ) : return value . date ( ) . isoformat ( ) elif isinstance ( value , datetime . date ) : return value . isoformat ( ) else : return parse_date ( value ) . isoformat ( )
def transition_to_execute_complete ( self ) : """Transition to execute complate"""
assert self . state in [ AQStateMachineStates . execute ] self . state = AQStateMachineStates . execute_complete
def _should_wrap ( self , node , child , is_left ) : """Wrap child if : - it has lower precedence - same precedence with position opposite to associativity direction"""
node_precedence = node . op_precedence ( ) child_precedence = child . op_precedence ( ) if node_precedence > child_precedence : # 3 * ( 4 + 5) return True if ( node_precedence == child_precedence and is_left != node . op_left_associative ( ) ) : # 3 - ( 4 - 5) # (2 * * 3 ) * * 4 return True return False
def dict_merge ( base , addition , append_lists = False ) : """Merge one dictionary with another , recursively . Fields present in addition will be added to base if not present or merged if both values are dictionaries or lists ( with append _ lists = True ) . If the values are different data types , the value in addition will be discarded . No data from base is deleted or overwritten . This function does not modify either dictionary . Dictionaries inside of other container types ( list , etc . ) are not merged , as the rules for merging would be ambiguous . If values from base and addition are of differing types , the value in addition is discarded . This utility could be expanded to merge Mapping and Container types in the future , but currently works only with dict and list . Arguments : base ( dict ) : The dictionary being added to . addition ( dict ) : The dictionary with additional data . append _ lists ( bool ) : When ` ` True ` ` , fields present in base and addition that are lists will also be merged . Extra values from addition will be appended to the list in base . Returns : dict : The merged base ."""
if not isinstance ( base , dict ) or not isinstance ( addition , dict ) : raise TypeError ( "dict_merge only works with dicts." ) new_base = deepcopy ( base ) for key , value in addition . items ( ) : # Simplest case : Key not in base , so add value to base if key not in new_base . keys ( ) : new_base [ key ] = value # If the value is a dict , and base ' s value is also a dict , merge # If there is a type disagreement , merging cannot and should not happen if isinstance ( value , dict ) and isinstance ( new_base [ key ] , dict ) : new_base [ key ] = dict_merge ( new_base [ key ] , value ) # If value is a list , lists should be merged , and base is compatible elif append_lists and isinstance ( value , list ) and isinstance ( new_base [ key ] , list ) : new_list = deepcopy ( new_base [ key ] ) [ new_list . append ( item ) for item in value if item not in new_list ] new_base [ key ] = new_list # If none of these trigger , discard value from addition implicitly return new_base
def is_supported ( cls , desc ) : """Determines if the given label descriptor is supported . Args : desc ( : class : ` endpoints _ management . gen . servicemanagement _ v1 _ messages . LabelDescriptor ` ) : the label descriptor to test Return : ` True ` if desc is supported , otherwise ` False `"""
for l in cls : if l . matches ( desc ) : return True return False
def calculate_gamma_matrix ( magnetic_states , Omega = 1 ) : r"""Calculate the matrix of decay between states . This function calculates the matrix $ \ gamma _ { ij } $ of decay rates between states | i > and | j > ( in the units specified by the Omega argument ) . > > > g = State ( " Rb " , 87,5,0,1 / Integer ( 2 ) ) > > > e = State ( " Rb " , 87,5,1,3 / Integer ( 2 ) ) > > > magnetic _ states = make _ list _ of _ states ( [ g , e ] , " magnetic " ) To return the rates in rad / s : > > > print calculate _ gamma _ matrix ( magnetic _ states ) [ [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , - 12702506.296014734 , - 15878132.870018415 , - 15878132.870018415 , - 0.0 , - 19053759.4440221 , - 9526879.72201105 , - 3175626.5740036834 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , - 12702506.296014734 , - 15878132.870018415 , - 0.0 , - 15878132.870018415 , - 0.0 , - 9526879.72201105 , - 12702506.296014734 , - 9526879.72201105 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , - 12702506.296014734 , - 0.0 , - 15878132.870018415 , - 15878132.870018415 , - 0.0 , - 0.0 , - 3175626.5740036834 , - 9526879.72201105 , - 19053759.4440221 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , - 0.0 , - 3810751.8888044204 , - 0.0 , - 0.0 , - 12702506.296014734 , - 6351253.148007367 , - 0.0 , - 0.0 , - 0.0 , - 38107518.8880442 , - 12702506.296014734 , - 2540501.2592029474 , - 0.0 , - 0.0 , - 0.0 , - 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , - 0.0 , - 1905375.9444022102 , - 1905375.9444022102 , - 0.0 , - 6351253.148007367 , - 3175626.5740036834 , - 9526879.72201105 , - 0.0 , - 0.0 , - 0.0 , - 25405012.592029467 , - 20324010.07362358 , - 7621503.77760884 , - 0.0 , - 0.0 , - 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , - 0.0 , - 635125.3148007367 , - 2540501.259202947 , - 635125.3148007367 , - 0.0 , - 9526879.72201105 , - 0.0 , - 9526879.72201105 , - 0.0 , - 0.0 , - 0.0 , - 15243007.55521768 , - 22864511.33282652 , - 15243007.55521768 , - 0.0 , - 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , - 0.0 , - 0.0 , - 1905375.9444022102 , - 1905375.9444022102 , - 0.0 , - 0.0 , - 9526879.72201105 , - 3175626.5740036834 , - 6351253.148007367 , - 0.0 , - 0.0 , - 0.0 , - 7621503.77760884 , - 20324010.07362358 , - 25405012.592029467 , - 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , - 0.0 , - 0.0 , - 0.0 , - 3810751.8888044204 , - 0.0 , - 0.0 , - 0.0 , - 6351253.148007367 , - 12702506.296014734 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 2540501.2592029474 , - 12702506.296014734 , - 38107518.8880442 ] , [ 12702506.296014734 , 12702506.296014734 , 12702506.296014734 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 15878132.870018415 , 15878132.870018415 , 0.0 , 3810751.8888044204 , 1905375.9444022102 , 635125.3148007367 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 15878132.870018415 , 0.0 , 15878132.870018415 , 0.0 , 1905375.9444022102 , 2540501.259202947 , 1905375.9444022102 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 15878132.870018415 , 15878132.870018415 , 0.0 , 0.0 , 635125.3148007367 , 1905375.9444022102 , 3810751.8888044204 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 19053759.4440221 , 0.0 , 0.0 , 12702506.296014734 , 6351253.148007367 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 9526879.72201105 , 9526879.72201105 , 0.0 , 6351253.148007367 , 3175626.5740036834 , 9526879.72201105 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 3175626.5740036834 , 12702506.296014734 , 3175626.5740036834 , 0.0 , 9526879.72201105 , 0.0 , 9526879.72201105 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 9526879.72201105 , 9526879.72201105 , 0.0 , 0.0 , 9526879.72201105 , 3175626.5740036834 , 6351253.148007367 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 0.0 , 19053759.4440221 , 0.0 , 0.0 , 0.0 , 6351253.148007367 , 12702506.296014734 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 0.0 , 0.0 , 38107518.8880442 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 0.0 , 0.0 , 12702506.296014734 , 25405012.592029467 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 0.0 , 0.0 , 2540501.2592029474 , 20324010.07362358 , 15243007.55521768 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 7621503.77760884 , 22864511.33282652 , 7621503.77760884 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 15243007.55521768 , 20324010.07362358 , 2540501.2592029474 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 25405012.592029467 , 12702506.296014734 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 38107518.8880442 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] ] To return the rates in 10 ^ 6 rad / s : > > > gamma = calculate _ gamma _ matrix ( magnetic _ states , Omega = 1e6) > > > gamma [ [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , - 12.702506296014734 , - 15.878132870018417 , - 15.878132870018417 , - 0.0 , - 19.053759444022102 , - 9.526879722011051 , - 3.1756265740036835 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , - 12.702506296014734 , - 15.878132870018417 , - 0.0 , - 15.878132870018417 , - 0.0 , - 9.526879722011051 , - 12.702506296014734 , - 9.526879722011051 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , - 12.702506296014734 , - 0.0 , - 15.878132870018417 , - 15.878132870018417 , - 0.0 , - 0.0 , - 3.1756265740036835 , - 9.526879722011051 , - 19.053759444022102 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , - 0.0 , - 3.8107518888044205 , - 0.0 , - 0.0 , - 12.702506296014734 , - 6.351253148007367 , - 0.0 , - 0.0 , - 0.0 , - 38.107518888044204 , - 12.702506296014734 , - 2.5405012592029474 , - 0.0 , - 0.0 , - 0.0 , - 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , - 0.0 , - 1.9053759444022103 , - 1.9053759444022103 , - 0.0 , - 6.351253148007367 , - 3.1756265740036835 , - 9.526879722011051 , - 0.0 , - 0.0 , - 0.0 , - 25.40501259202947 , - 20.32401007362358 , - 7.62150377760884 , - 0.0 , - 0.0 , - 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , - 0.0 , - 0.6351253148007368 , - 2.540501259202947 , - 0.6351253148007368 , - 0.0 , - 9.526879722011051 , - 0.0 , - 9.526879722011051 , - 0.0 , - 0.0 , - 0.0 , - 15.24300755521768 , - 22.86451133282652 , - 15.24300755521768 , - 0.0 , - 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , - 0.0 , - 0.0 , - 1.9053759444022103 , - 1.9053759444022103 , - 0.0 , - 0.0 , - 9.526879722011051 , - 3.1756265740036835 , - 6.351253148007367 , - 0.0 , - 0.0 , - 0.0 , - 7.62150377760884 , - 20.32401007362358 , - 25.40501259202947 , - 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , - 0.0 , - 0.0 , - 0.0 , - 3.8107518888044205 , - 0.0 , - 0.0 , - 0.0 , - 6.351253148007367 , - 12.702506296014734 , - 0.0 , - 0.0 , - 0.0 , - 0.0 , - 2.5405012592029474 , - 12.702506296014734 , - 38.107518888044204 ] , [ 12.702506296014734 , 12.702506296014734 , 12.702506296014734 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 15.878132870018417 , 15.878132870018417 , 0.0 , 3.8107518888044205 , 1.9053759444022103 , 0.6351253148007368 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 15.878132870018417 , 0.0 , 15.878132870018417 , 0.0 , 1.9053759444022103 , 2.540501259202947 , 1.9053759444022103 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 15.878132870018417 , 15.878132870018417 , 0.0 , 0.0 , 0.6351253148007368 , 1.9053759444022103 , 3.8107518888044205 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 19.053759444022102 , 0.0 , 0.0 , 12.702506296014734 , 6.351253148007367 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 9.526879722011051 , 9.526879722011051 , 0.0 , 6.351253148007367 , 3.1756265740036835 , 9.526879722011051 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 3.1756265740036835 , 12.702506296014734 , 3.1756265740036835 , 0.0 , 9.526879722011051 , 0.0 , 9.526879722011051 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 9.526879722011051 , 9.526879722011051 , 0.0 , 0.0 , 9.526879722011051 , 3.1756265740036835 , 6.351253148007367 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 0.0 , 19.053759444022102 , 0.0 , 0.0 , 0.0 , 6.351253148007367 , 12.702506296014734 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 0.0 , 0.0 , 38.107518888044204 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 0.0 , 0.0 , 12.702506296014734 , 25.40501259202947 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 0.0 , 0.0 , 2.5405012592029474 , 20.32401007362358 , 15.24300755521768 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 7.62150377760884 , 22.86451133282652 , 7.62150377760884 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 15.24300755521768 , 20.32401007362358 , 2.5405012592029474 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 25.40501259202947 , 12.702506296014734 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] , [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 38.107518888044204 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ] ] Let us test if all D2 lines decay at the expected rate ( 6.065 MHz ) : > > > Gamma = [ sum ( [ gamma [ i ] [ j ] for j in range ( i ) ] ) / 2 / pi for i in range ( len ( magnetic _ states ) ) ] [ 8 : ] > > > for Gammai in Gamma : print Gammai 6.065 6.065 6.065 6.065 6.065 6.065 6.065 6.065 6.065 6.065 6.065 6.065 6.065 6.065 6.065 6.065"""
Ne = len ( magnetic_states ) II = magnetic_states [ 0 ] . i gamma = [ [ 0.0 for j in range ( Ne ) ] for i in range ( Ne ) ] for i in range ( Ne ) : for j in range ( i ) : ei = magnetic_states [ i ] ej = magnetic_states [ j ] einsteinAij = Transition ( ei , ej ) . einsteinA if einsteinAij != 0 : ji = ei . j ; jj = ej . j fi = ei . f ; fj = ej . f mi = ei . m ; mj = ej . m gammaij = ( 2.0 * ji + 1 ) gammaij *= ( 2.0 * fi + 1 ) gammaij *= ( 2.0 * fj + 1 ) gammaij *= float ( wigner_6j ( ji , fi , II , fj , jj , 1 ) ** 2 ) gammaij *= sum ( [ float ( wigner_3j ( fj , 1 , fi , - mj , q , mi ) ** 2 ) for q in [ - 1 , 0 , 1 ] ] ) gammaij *= einsteinAij / Omega gammaij = float ( gammaij ) gamma [ i ] [ j ] = gammaij gamma [ j ] [ i ] = - gammaij return gamma
def connect ( self , host , port ) : '''Connect to a host and port .'''
# Clear the connect state immediately since we ' re no longer connected # at this point . self . _connected = False # Only after the socket has connected do we clear this state ; closed # must be False so that writes can be buffered in writePacket ( ) . The # closed state might have been set to True due to a socket error or a # redirect . self . _host = "%s:%d" % ( host , port ) self . _closed = False self . _close_info = { 'reply_code' : 0 , 'reply_text' : 'failed to connect to %s' % ( self . _host ) , 'class_id' : 0 , 'method_id' : 0 } self . _transport . connect ( ( host , port ) ) self . _transport . write ( PROTOCOL_HEADER ) self . _last_octet_time = time . time ( ) if self . _synchronous_connect : # Have to queue this callback just after connect , it can ' t go # into the constructor because the channel needs to be # " always there " for frame processing , but the synchronous # callback can ' t be added until after the protocol header has # been written . This SHOULD be registered before the protocol # header is written , in the case where the header bytes are # written , but this thread / greenlet / context does not return until # after another thread / greenlet / context has read and processed the # recv _ start frame . Without more re - write to add _ sync _ cb though , # it will block on reading responses that will never arrive # because the protocol header isn ' t written yet . TBD if needs # refactoring . Could encapsulate entirely here , wherein # read _ frames exits if protocol header not yet written . Like other # synchronous behaviors , adding this callback will result in a # blocking frame read and process loop until _ recv _ start and any # subsequent synchronous callbacks have been processed . In the # event that this is / not / a synchronous transport , but the # caller wants the connect to be synchronous so as to ensure that # the connection is ready , then do a read frame loop here . self . _channels [ 0 ] . add_synchronous_cb ( self . _channels [ 0 ] . _recv_start ) while not self . _connected : self . read_frames ( )