signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def visit_addresses ( frame , return_addresses ) : """Visits all of the addresses , returns a new dict which contains just the addressed elements Parameters frame return _ addresses : a dictionary , keys will be column names of the resulting dataframe , and are what the user passed in as ' return _ columns ' . Values are a tuple : ( py _ name , { coords dictionary } ) which tells us where to look for the value to put in that specific column . Returns outdict : dictionary"""
outdict = dict ( ) for real_name , ( pyname , address ) in return_addresses . items ( ) : if address : xrval = frame [ pyname ] . loc [ address ] if xrval . size > 1 : outdict [ real_name ] = xrval else : outdict [ real_name ] = float ( np . squeeze ( xrval . values ) ) else : outdict [ real_name ] = frame [ pyname ] return outdict
def interruptWrite ( self , endpoint , buffer , timeout = 100 ) : r"""Perform a interrupt write request to the endpoint specified . Arguments : endpoint : endpoint number . buffer : sequence data buffer to write . This parameter can be any sequence type . timeout : operation timeout in milliseconds . ( default : 100) Returns the number of bytes written ."""
return self . dev . write ( endpoint , buffer , timeout )
def favorite_dashboard ( self , id , ** kwargs ) : # noqa : E501 """Mark a dashboard as favorite # noqa : E501 # noqa : E501 This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass async _ req = True > > > thread = api . favorite _ dashboard ( id , async _ req = True ) > > > result = thread . get ( ) : param async _ req bool : param str id : ( required ) : return : ResponseContainer If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'async_req' ) : return self . favorite_dashboard_with_http_info ( id , ** kwargs ) # noqa : E501 else : ( data ) = self . favorite_dashboard_with_http_info ( id , ** kwargs ) # noqa : E501 return data
def set_sns ( style = "white" , context = "paper" , font_scale = 1.5 , color_codes = True , rc = { } ) : """Set default plot style using seaborn . Font size is set to match the size of the tick labels , rather than the axes labels ."""
rcd = { "lines.markersize" : 8 , "lines.markeredgewidth" : 1.25 , "legend.fontsize" : "small" , "font.size" : 12 / 1.5 * font_scale , "legend.frameon" : True , "axes.formatter.limits" : ( - 5 , 5 ) , "axes.grid" : True } rcd . update ( rc ) import seaborn as sns sns . set ( style = style , context = context , font_scale = font_scale , color_codes = color_codes , rc = rcd )
def unpack_frame ( message ) : """Called to unpack a STOMP message into a dictionary . returned = { # STOMP Command : ' cmd ' : ' . . . ' , # Headers e . g . ' headers ' : { ' destination ' : ' xyz ' , ' message - id ' : ' some event ' , etc , # Body : ' body ' : ' . . . 1234 . . . \x00 ' ,"""
body = [ ] returned = dict ( cmd = '' , headers = { } , body = '' ) breakdown = message . split ( '\n' ) # Get the message command : returned [ 'cmd' ] = breakdown [ 0 ] breakdown = breakdown [ 1 : ] def headD ( field ) : # find the first ' : ' everything to the left of this is a # header , everything to the right is data : index = field . find ( ':' ) if index : header = field [ : index ] . strip ( ) data = field [ index + 1 : ] . strip ( ) # print " header ' % s ' data ' % s ' " % ( header , data ) returned [ 'headers' ] [ header . strip ( ) ] = data . strip ( ) def bodyD ( field ) : field = field . strip ( ) if field : body . append ( field ) # Recover the header fields and body data handler = headD for field in breakdown : # print " field : " , field if field . strip ( ) == '' : # End of headers , it body data next . handler = bodyD continue handler ( field ) # Stich the body data together : # print " 1 . body : " , body body = "" . join ( body ) returned [ 'body' ] = body . replace ( '\x00' , '' ) # print " 2 . body : < % s > " % returned [ ' body ' ] return returned
def getcellslice ( self , columnname , rownr , blc , trc , inc = [ ] ) : """Get a slice from a column cell holding an array . The columnname and ( 0 - relative ) rownr indicate the table cell . The slice to get is defined by the blc , trc , and optional inc arguments ( blc = bottom - left corner , trc = top - right corner , inc = stride ) . Not all axes have to be filled in for blc , trc , and inc . Missing axes default to begin , end , and 1 . A negative blc or trc defaults to begin or end . Note that trc is inclusive ( unlike python indexing ) ."""
return self . _getcellslice ( columnname , rownr , blc , trc , inc )
def badEntries ( self ) : """Creates a new collection of the same type with only the bad entries # Returns ` CollectionWithIDs ` > A collection of only the bad entries"""
badEntries = set ( ) for i in self : if i . bad : badEntries . add ( i ) return type ( self ) ( badEntries , quietStart = True )
def evaluate ( self , env ) : """Evaluate the entire expression in the environment , returning a Unicode string ."""
out = [ ] for part in self . parts : if isinstance ( part , str ) : out . append ( part ) else : out . append ( part . evaluate ( env ) ) return u'' . join ( map ( str , out ) )
def wait_for_next_completion ( self , runtime_context ) : # type : ( RuntimeContext ) - > None """Wait for jobs to finish ."""
if runtime_context . workflow_eval_lock is not None : runtime_context . workflow_eval_lock . wait ( ) if self . exceptions : raise self . exceptions [ 0 ]
def allowed_values ( self ) : """A tuple containing the allowed values for this Slot . The Python equivalent of the CLIPS slot - allowed - values function ."""
data = clips . data . DataObject ( self . _env ) lib . EnvDeftemplateSlotAllowedValues ( self . _env , self . _tpl , self . _name , data . byref ) return tuple ( data . value ) if isinstance ( data . value , list ) else ( )
def run ( self ) : """Run consumer"""
if KSER_METRICS_ENABLED == "yes" : from prometheus_client import start_http_server logger . info ( "Metric.Starting..." ) start_http_server ( os . getenv ( "KSER_METRICS_PORT" , 8888 ) , os . getenv ( "KSER_METRICS_ADDRESS" , "0.0.0.0" ) ) logger . info ( "{}.Starting..." . format ( self . __class__ . __name__ ) ) while True : if self . is_active ( ) is True : msg = next ( self . client ) data = msg . value . decode ( 'utf-8' ) if self . client . config [ 'enable_auto_commit' ] is False : self . client . commit ( ) logger . debug ( "{}: Manual commit done." . format ( self . __class__ . __name__ ) ) self . REGISTRY . run ( data ) else : logger . warning ( "Consumer is paused" ) time . sleep ( 60 )
def eff_request_host ( request ) : """Return a tuple ( request - host , effective request - host name ) . As defined by RFC 2965 , except both are lowercased ."""
erhn = req_host = request_host ( request ) if req_host . find ( "." ) == - 1 and not IPV4_RE . search ( req_host ) : erhn = req_host + ".local" return req_host , erhn
def get_tables ( self , models ) : '''Extract all peewee models from the passed in module'''
return { obj . _meta . db_table : obj for obj in models . __dict__ . itervalues ( ) if isinstance ( obj , peewee . BaseModel ) and len ( obj . _meta . fields ) > 1 }
def _init_transforms ( self , subjs , voxels , features , random_state ) : """Initialize the mappings ( Wi ) with random orthogonal matrices . Parameters subjs : int The number of subjects . voxels : list of int A list with the number of voxels per subject . features : int The number of features in the model . random _ state : ` RandomState ` A random state to draw the mappings . Returns W : list of array , element i has shape = [ voxels _ i , features ] The initialized orthogonal transforms ( mappings ) : math : ` W _ i ` for each subject . Note Not thread safe ."""
# Init the Random seed generator np . random . seed ( self . rand_seed ) # Draw a random W for each subject W = [ random_state . random_sample ( ( voxels [ i ] , features ) ) for i in range ( subjs ) ] # Make it orthogonal it with QR decomposition for i in range ( subjs ) : W [ i ] , _ = np . linalg . qr ( W [ i ] ) return W
def folder2db ( folder_name , debug , energy_limit , skip_folders , goto_reaction ) : """Read folder and collect data in local sqlite3 database"""
folder_name = folder_name . rstrip ( '/' ) skip = [ ] for s in skip_folders . split ( ', ' ) : for sk in s . split ( ',' ) : skip . append ( sk ) pub_id = _folder2db . main ( folder_name , debug , energy_limit , skip , goto_reaction ) if pub_id : print ( '' ) print ( '' ) print ( 'Ready to release the data?' ) print ( " Send it to the Catalysis-Hub server with 'cathub db2server {folder_name}/{pub_id}.db'." . format ( ** locals ( ) ) ) print ( " Then log in at www.catalysis-hub.org/upload/ to verify and release. " )
def get_asm_uids ( taxon_uid ) : """Returns a set of NCBI UIDs associated with the passed taxon . This query at NCBI returns all assemblies for the taxon subtree rooted at the passed taxon _ uid ."""
query = "txid%s[Organism:exp]" % taxon_uid logger . info ( "Entrez ESearch with query: %s" , query ) # Perform initial search for assembly UIDs with taxon ID as query . # Use NCBI history for the search . handle = entrez_retry ( Entrez . esearch , db = "assembly" , term = query , format = "xml" , usehistory = "y" ) record = Entrez . read ( handle , validate = False ) result_count = int ( record [ 'Count' ] ) logger . info ( "Entrez ESearch returns %d assembly IDs" , result_count ) # Recover assembly UIDs from the web history asm_ids = entrez_batch_webhistory ( record , result_count , 250 , db = "assembly" , retmode = "xml" ) logger . info ( "Identified %d unique assemblies" , len ( asm_ids ) ) return asm_ids
def find_item ( self , fq_name ) : """Find an item in the specification by fully qualified name . Args : fq _ name ( str ) : Fully - qualified name of the item . Returns : The item if it is in the specification . None otherwise"""
names = fq_name . split ( self . _separator ) current = self . _yapconf_items for name in names : if isinstance ( current , ( YapconfDictItem , YapconfListItem ) ) : current = current . children if name not in current : return None current = current [ name ] return current
def _get_delta ( self , now , then ) : """Internal helper which will return a ` ` datetime . timedelta ` ` representing the time between ` ` now ` ` and ` ` then ` ` . Assumes ` ` now ` ` is a ` ` datetime . date ` ` or ` ` datetime . datetime ` ` later than ` ` then ` ` . If ` ` now ` ` and ` ` then ` ` are not of the same type due to one of them being a ` ` datetime . date ` ` and the other being a ` ` datetime . datetime ` ` , both will be coerced to ` ` datetime . date ` ` before calculating the delta ."""
if now . __class__ is not then . __class__ : now = datetime . date ( now . year , now . month , now . day ) then = datetime . date ( then . year , then . month , then . day ) if now < then : raise ValueError ( "Cannot determine moderation rules because date field is set to a value in the future" ) return now - then
def get ( self , name : str ) -> Union [ None , str , List [ str ] ] : """获取 header"""
name = name . casefold ( ) if name == "referer" or name == "referrer" : if "referrer" in self . _headers : return self . _headers [ "referrer" ] elif "referer" in self . _headers : return self . _headers [ "referer" ] else : return None elif name in self . _headers : return self . _headers [ name ] else : return None
def unassign ( pid_type , pid_value ) : """Unassign persistent identifier ."""
from . models import PersistentIdentifier obj = PersistentIdentifier . get ( pid_type , pid_value ) obj . unassign ( ) db . session . commit ( ) click . echo ( obj . status )
def reverse_cipher ( message ) : """反转加密法 : param message : 待加密字符串 : return : 被加密字符串"""
translated = '' i = len ( message ) - 1 while i >= 0 : translated = translated + message [ i ] i = i - 1 return translated
def from_css ( Class , csstext , encoding = None , href = None , media = None , title = None , validate = None ) : """parse CSS text into a Styles object , using cssutils"""
styles = Class ( ) cssStyleSheet = cssutils . parseString ( csstext , encoding = encoding , href = href , media = media , title = title , validate = validate ) for rule in cssStyleSheet . cssRules : if rule . type == cssutils . css . CSSRule . FONT_FACE_RULE : if styles . get ( '@font-face' ) is None : styles [ '@font-face' ] = [ ] styles [ '@font-face' ] . append ( Class . styleProperties ( rule . style ) ) elif rule . type == cssutils . css . CSSRule . IMPORT_RULE : if styles . get ( '@import' ) is None : styles [ '@import' ] = [ ] styles [ '@import' ] . append ( "url(%s)" % rule . href ) elif rule . type == cssutils . css . CSSRule . NAMESPACE_RULE : if styles . get ( '@namespace' ) is None : styles [ '@namespace' ] = { } styles [ '@namespace' ] [ rule . prefix ] = rule . namespaceURI elif rule . type == cssutils . css . CSSRule . MEDIA_RULE : if styles . get ( '@media' ) is None : styles [ '@media' ] = [ ] styles [ '@media' ] . append ( rule . cssText ) elif rule . type == cssutils . css . CSSRule . PAGE_RULE : if styles . get ( '@page' ) is None : styles [ '@page' ] = [ ] styles [ '@page' ] . append ( rule . cssText ) elif rule . type == cssutils . css . CSSRule . STYLE_RULE : for selector in rule . selectorList : sel = selector . selectorText if sel not in styles : styles [ sel ] = Class . styleProperties ( rule . style ) elif rule . type == cssutils . css . CSSRule . CHARSET_RULE : styles [ '@charset' ] = rule . encoding elif rule . type == cssutils . css . CSSRule . COMMENT : # comments are thrown away pass elif rule . type == cssutils . css . CSSRule . VARIABLES_RULE : pass else : log . warning ( "Unknown rule type: %r" % rule . cssText ) return styles
def get_data ( latitude = 52.091579 , longitude = 5.119734 , usexml = False ) : """Get buienradar xml data and return results ."""
if usexml : log . info ( "Getting buienradar XML data for latitude=%s, longitude=%s" , latitude , longitude ) return get_xml_data ( latitude , longitude ) else : log . info ( "Getting buienradar JSON data for latitude=%s, longitude=%s" , latitude , longitude ) return get_json_data ( latitude , longitude )
def parse_cache ( self , full_df ) : """Format the cached data model into a dictionary of DataFrames and a criteria map DataFrame . Parameters full _ df : DataFrame result of self . get _ dm _ offline ( ) Returns data _ model : dictionary of DataFrames crit _ map : DataFrame"""
data_model = { } levels = [ 'specimens' , 'samples' , 'sites' , 'locations' , 'ages' , 'measurements' , 'criteria' , 'contribution' , 'images' ] criteria_map = pd . DataFrame ( full_df [ 'criteria_map' ] ) for level in levels : df = pd . DataFrame ( full_df [ 'tables' ] [ level ] [ 'columns' ] ) data_model [ level ] = df . transpose ( ) # replace np . nan with None data_model [ level ] = data_model [ level ] . where ( ( pd . notnull ( data_model [ level ] ) ) , None ) return data_model , criteria_map
def send_raw_packet ( self , packet : str ) : """Encode and put packet string onto write buffer ."""
data = packet + '\r\n' log . debug ( 'writing data: %s' , repr ( data ) ) self . transport . write ( data . encode ( ) )
def variant ( case_id , variant_id ) : """Show a single variant ."""
case_obj = app . db . case ( case_id ) variant = app . db . variant ( case_id , variant_id ) if variant is None : return abort ( 404 , "variant not found" ) comments = app . db . comments ( variant_id = variant . md5 ) template = 'sv_variant.html' if app . db . variant_type == 'sv' else 'variant.html' return render_template ( template , variant = variant , case_id = case_id , comments = comments , case = case_obj )
def rank ( self , item ) : '''Return the rank ( index ) of ` ` item ` ` in this : class : ` zset ` .'''
score = self . _dict . get ( item ) if score is not None : return self . _sl . rank ( score )
def _get_free_words ( self , blockAllowed , isRead ) : """Return the number of words free in the transmit packet"""
if blockAllowed : # DAP _ TransferBlock request packet : # BYTE | BYTE * * * * * | SHORT * * * * * | BYTE * * * * * | WORD * * * * * | # > 0x06 | DAP Index | Transfer Count | Transfer Request | Transfer Data | send = self . _size - 5 - 4 * self . _write_count # DAP _ TransferBlock response packet : # BYTE | SHORT * * * * * | BYTE * * * * * | WORD * * * * * | # < 0x06 | Transfer Count | Transfer Response | Transfer Data | recv = self . _size - 4 - 4 * self . _read_count if isRead : return recv // 4 else : return send // 4 else : # DAP _ Transfer request packet : # BYTE | BYTE * * * * * | BYTE * * * * * | BYTE * * * * * | WORD * * * * * | # > 0x05 | DAP Index | Transfer Count | Transfer Request | Transfer Data | send = self . _size - 3 - 1 * self . _read_count - 5 * self . _write_count # DAP _ Transfer response packet : # BYTE | BYTE * * * * * | BYTE * * * * * | WORD * * * * * | # < 0x05 | Transfer Count | Transfer Response | Transfer Data | recv = self . _size - 3 - 4 * self . _read_count if isRead : # 1 request byte in request packet , 4 data bytes in response packet return min ( send , recv // 4 ) else : # 1 request byte + 4 data bytes return send // 5
def _mutate ( self ) : '''Mutate enclosed fields'''
for i in range ( self . _field_idx , len ( self . _fields ) ) : self . _field_idx = i if self . _current_field ( ) . mutate ( ) : return True self . _current_field ( ) . reset ( ) return False
def _relativePath ( self , fullPath ) : """Return fullPath relative to Store directory . Return fullPath if fullPath is not inside directory . Return None if fullPath is outside our scope ."""
if fullPath is None : return None assert fullPath . startswith ( "/" ) , fullPath path = os . path . relpath ( fullPath , self . userPath ) if not path . startswith ( "../" ) : return path elif self . ignoreExtraVolumes : return None else : return fullPath
def handle_prepared_selection_of_core_class_elements ( self , core_class , models ) : """Handles the selection for TreeStore widgets maintaining lists of a specific ` core _ class ` elements If widgets hold a TreeStore with elements of a specific ` core _ class ` , the local selection of that element type is handled by that widget . This method is called to integrate the local selection with the overall selection of the state machine . If no modifier key ( indicating to extend the selection ) is pressed , the state machine selection is set to the passed selection . If the selection is to be extended , the state machine collection will consist of the widget selection plus all previously selected elements not having the core class ` core _ class ` . : param State | StateElement core _ class : The core class of the elements the widget handles : param models : The list of models that are currently being selected locally"""
if extend_selection ( ) : self . _selected . difference_update ( self . get_selected_elements_of_core_class ( core_class ) ) else : self . _selected . clear ( ) models = self . _check_model_types ( models ) if len ( models ) > 1 : models = reduce_to_parent_states ( models ) self . _selected . update ( models )
def extend ( self , http_api , route = "" , base_url = "" , ** kwargs ) : """Adds handlers from a different Hug API to this one - to create a single API"""
self . versions . update ( http_api . versions ) base_url = base_url or self . base_url for router_base_url , routes in http_api . routes . items ( ) : self . routes . setdefault ( base_url , OrderedDict ( ) ) for item_route , handler in routes . items ( ) : for method , versions in handler . items ( ) : for version , function in versions . items ( ) : function . interface . api = self . api self . routes [ base_url ] . setdefault ( route + item_route , { } ) . update ( handler ) for sink_base_url , sinks in http_api . sinks . items ( ) : for url , sink in sinks . items ( ) : self . add_sink ( sink , route + url , base_url = base_url ) for middleware in ( http_api . middleware or ( ) ) : self . add_middleware ( middleware ) for version , handler in getattr ( http_api , '_exception_handlers' , { } ) . items ( ) : for exception_type , exception_handlers in handler . items ( ) : target_exception_handlers = self . exception_handlers ( version ) or { } for exception_handler in exception_handlers : if exception_type not in target_exception_handlers : self . add_exception_handler ( exception_type , exception_handler , version ) for input_format , input_format_handler in getattr ( http_api , '_input_format' , { } ) . items ( ) : if not input_format in getattr ( self , '_input_format' , { } ) : self . set_input_format ( input_format , input_format_handler ) for version , handler in http_api . not_found_handlers . items ( ) : if version not in self . not_found_handlers : self . set_not_found_handler ( handler , version )
def derivative ( self , z , x , y , fase ) : """Wrapper derivative for custom derived properties where x , y , z can be : P , T , v , rho , u , h , s , g , a"""
return deriv_H ( self , z , x , y , fase )
def smooth_gaps ( self , min_intron ) : """any gaps smaller than min _ intron are joined , andreturns a new mapping with gaps smoothed : param min _ intron : the smallest an intron can be , smaller gaps will be sealed : type min _ intron : int : return : a mapping with small gaps closed : rtype : MappingGeneric"""
rngs = [ self . _rngs [ 0 ] . copy ( ) ] for i in range ( len ( self . _rngs ) - 1 ) : dist = - 1 if self . _rngs [ i + 1 ] . chr == rngs [ - 1 ] . chr : dist = self . _rngs [ i + 1 ] . start - rngs [ - 1 ] . end - 1 if dist >= min_intron or dist < 0 : rngs . append ( self . _rngs [ i + 1 ] . copy ( ) ) else : rngs [ - 1 ] . end = self . _rngs [ i + 1 ] . end return type ( self ) ( rngs , self . _options )
def _log_control ( self , s ) : """Write control characters to the appropriate log files"""
if self . encoding is not None : s = s . decode ( self . encoding , 'replace' ) self . _log ( s , 'send' )
def slackbuild ( self , name , sbo_file ) : """Read SlackBuild file"""
return URL ( self . sbo_url + name + sbo_file ) . reading ( )
def _RunUserDefinedFunctions_ ( config , data , histObj , position , namespace = __name__ ) : """Return a single updated data record and history object after running user - defined functions : param dict config : DWM configuration ( see DataDictionary ) : param dict data : single record ( dictionary ) to which user - defined functions should be applied : param dict histObj : History object to which changes should be appended : param string position : position name of which function set from config should be run : param namespace : namespace of current working script ; must be passed if using user - defined functions"""
udfConfig = config [ 'userDefinedFunctions' ] if position in udfConfig : posConfig = udfConfig [ position ] for udf in posConfig . keys ( ) : posConfigUDF = posConfig [ udf ] data , histObj = getattr ( sys . modules [ namespace ] , posConfigUDF ) ( data = data , histObj = histObj ) return data , histObj
def _send_paginated_message ( self , endpoint , params = None ) : """Send API message that results in a paginated response . The paginated responses are abstracted away by making API requests on demand as the response is iterated over . Paginated API messages support 3 additional parameters : ` before ` , ` after ` , and ` limit ` . ` before ` and ` after ` are mutually exclusive . To use them , supply an index value for that endpoint ( the field used for indexing varies by endpoint - get _ fills ( ) uses ' trade _ id ' , for example ) . ` before ` : Only get data that occurs more recently than index ` after ` : Only get data that occurs further in the past than index ` limit ` : Set amount of data per HTTP response . Default ( and maximum ) of 100. Args : endpoint ( str ) : Endpoint ( to be added to base URL ) params ( Optional [ dict ] ) : HTTP request parameters Yields : dict : API response objects"""
if params is None : params = dict ( ) url = self . url + endpoint while True : r = self . session . get ( url , params = params , auth = self . auth , timeout = 30 ) results = r . json ( ) for result in results : yield result # If there are no more pages , we ' re done . Otherwise update ` after ` # param to get next page . # If this request included ` before ` don ' t get any more pages - the # cbpro API doesn ' t support multiple pages in that case . if not r . headers . get ( 'cb-after' ) or params . get ( 'before' ) is not None : break else : params [ 'after' ] = r . headers [ 'cb-after' ]
def list_files ( tag = None , sat_id = None , data_path = None , format_str = None ) : """Return a Pandas Series of every file for chosen satellite data Parameters tag : ( string or NoneType ) Denotes type of file to load . Accepted types are ' ' and ' ascii ' . If ' ' is specified , the primary data type ( ascii ) is loaded . ( default = None ) sat _ id : ( string or NoneType ) Specifies the satellite ID for a constellation . Not used . ( default = None ) data _ path : ( string or NoneType ) Path to data directory . If None is specified , the value previously set in Instrument . files . data _ path is used . ( default = None ) format _ str : ( NoneType ) User specified file format not supported . ( default = None ) Returns pysat . Files . from _ os : ( pysat . _ files . Files ) A class containing the verified available files"""
import sys # if tag = = ' ionprf ' : # # from _ os constructor currently doesn ' t work because of the variable # # filename components at the end of each string . . . . . # ion _ fmt = ' * / ionPrf _ * . { year : 04d } . { day : 03d } . { hour : 02d } . { min : 02d } * _ nc ' # return pysat . Files . from _ os ( dir _ path = os . path . join ( ' cosmic ' , ' ionprf ' ) , # format _ str = ion _ fmt ) estr = 'Building a list of COSMIC files, which can possibly take time. ' estr = '{:s}~1s per 100K files' . format ( estr ) print ( estr ) sys . stdout . flush ( ) # number of files may be large # only select file that are the cosmic data files and end with _ nc cosmicFiles = glob . glob ( os . path . join ( data_path , '*/*_nc' ) ) # need to get date and time from filename to generate index num = len ( cosmicFiles ) if num != 0 : print ( 'Estimated time:' , num * 1.E-5 , 'seconds' ) sys . stdout . flush ( ) # preallocate lists year = [ None ] * num ; days = [ None ] * num ; hours = [ None ] * num ; minutes = [ None ] * num ; microseconds = [ None ] * num ; for i , f in enumerate ( cosmicFiles ) : f2 = f . split ( '.' ) year [ i ] = f2 [ - 6 ] days [ i ] = f2 [ - 5 ] hours [ i ] = f2 [ - 4 ] minutes [ i ] = f2 [ - 3 ] microseconds [ i ] = i year = np . array ( year ) . astype ( int ) days = np . array ( days ) . astype ( int ) uts = np . array ( hours ) . astype ( int ) * 3600. + np . array ( minutes ) . astype ( int ) * 60. # adding microseconds to ensure each time is unique , not allowed to # pass 1 . E - 3 s uts += np . mod ( np . array ( microseconds ) . astype ( int ) * 4 , 8000 ) * 1.E-5 index = pysat . utils . create_datetime_index ( year = year , day = days , uts = uts ) file_list = pysat . Series ( cosmicFiles , index = index ) return file_list else : print ( 'Found no files, check your path or download them.' ) return pysat . Series ( None )
def close ( self ) : """Close connection to an MS SQL Server . This function tries to close the connection and free all memory used . It can be called more than once in a row . No exception is raised in this case ."""
if self . _conn : if self . _pooling : _connection_pool . add ( self . _key , ( self . _conn , self . _main_cursor . _session ) ) else : self . _conn . close ( ) self . _active_cursor = None self . _main_cursor = None self . _conn = None self . _closed = True
def get_penalty_model ( specification ) : """Factory function for penaltymodel _ maxgap . Args : specification ( penaltymodel . Specification ) : The specification for the desired penalty model . Returns : : class : ` penaltymodel . PenaltyModel ` : Penalty model with the given specification . Raises : : class : ` penaltymodel . ImpossiblePenaltyModel ` : If the penalty cannot be built . Parameters : priority ( int ) : - 100"""
# check that the feasible _ configurations are spin feasible_configurations = specification . feasible_configurations if specification . vartype is dimod . BINARY : feasible_configurations = { tuple ( 2 * v - 1 for v in config ) : en for config , en in feasible_configurations . items ( ) } # convert ising _ quadratic _ ranges to the form we expect ising_quadratic_ranges = specification . ising_quadratic_ranges quadratic_ranges = { ( u , v ) : ising_quadratic_ranges [ u ] [ v ] for u , v in specification . graph . edges } bqm , gap = generate ( specification . graph , feasible_configurations , specification . decision_variables , specification . ising_linear_ranges , quadratic_ranges , specification . min_classical_gap , None ) # unspecified smt solver try : ground = max ( feasible_configurations . values ( ) ) except ValueError : ground = 0.0 # if empty return pm . PenaltyModel . from_specification ( specification , bqm , gap , ground )
def queryURL ( self , xri , service_type = None ) : """Build a URL to query the proxy resolver . @ param xri : An XRI to resolve . @ type xri : unicode @ param service _ type : The service type to resolve , if you desire service endpoint selection . A service type is a URI . @ type service _ type : str @ returns : a URL @ returntype : str"""
# Trim off the xri : / / prefix . The proxy resolver didn ' t accept it # when this code was written , but that may ( or may not ) change for # XRI Resolution 2.0 Working Draft 11. qxri = toURINormal ( xri ) [ 6 : ] hxri = self . proxy_url + qxri args = { # XXX : If the proxy resolver will ensure that it doesn ' t return # bogus CanonicalIDs ( as per Steve ' s message of 15 Aug 2006 # 11:13:42 ) , then we could ask for application / xrd + xml instead , # which would give us a bit less to process . '_xrd_r' : 'application/xrds+xml' , } if service_type : args [ '_xrd_t' ] = service_type else : # Don ' t perform service endpoint selection . args [ '_xrd_r' ] += ';sep=false' query = _appendArgs ( hxri , args ) return query
def from_file ( cls , fname , form = None ) : """Read an orthography profile from a metadata file or a default tab - separated profile file ."""
try : tg = TableGroup . from_file ( fname ) opfname = None except JSONDecodeError : tg = TableGroup . fromvalue ( cls . MD ) opfname = fname if len ( tg . tables ) != 1 : raise ValueError ( 'profile description must contain exactly one table' ) metadata = tg . common_props metadata . update ( fname = Path ( fname ) , form = form ) return cls ( * [ { k : None if ( k != cls . GRAPHEME_COL and v == cls . NULL ) else v for k , v in d . items ( ) } for d in tg . tables [ 0 ] . iterdicts ( fname = opfname ) ] , ** metadata )
def get ( self , key ) : """get a set of keys from redis"""
res = self . connection . get ( key ) print ( res ) return res
def initialize_segment_register_x64 ( self , state , concrete_target ) : """Set the gs register in the angr to the value of the fs register in the concrete process : param state : state which will be modified : param concrete _ target : concrete target that will be used to read the fs register : return : None"""
_l . debug ( "Synchronizing gs segment register" ) state . regs . gs = self . _read_gs_register_x64 ( concrete_target )
def decode ( self ) : """Decode this report from a msgpack encoded binary blob ."""
report_dict = msgpack . unpackb ( self . raw_report , raw = False ) events = [ IOTileEvent . FromDict ( x ) for x in report_dict . get ( 'events' , [ ] ) ] readings = [ IOTileReading . FromDict ( x ) for x in report_dict . get ( 'data' , [ ] ) ] if 'device' not in report_dict : raise DataError ( "Invalid encoded FlexibleDictionaryReport that did not " "have a device key set with the device uuid" ) self . origin = report_dict [ 'device' ] self . report_id = report_dict . get ( "incremental_id" , IOTileReading . InvalidReadingID ) self . sent_timestamp = report_dict . get ( "device_sent_timestamp" , 0 ) self . origin_streamer = report_dict . get ( "streamer_index" ) self . streamer_selector = report_dict . get ( "streamer_selector" ) self . lowest_id = report_dict . get ( 'lowest_id' ) self . highest_id = report_dict . get ( 'highest_id' ) return readings , events
def connection_factory_absent ( name , both = True , server = None ) : '''Ensures the transaction factory is absent . name Name of the connection factory both Delete both the pool and the resource , defaults to ` ` true ` `'''
ret = { 'name' : name , 'result' : None , 'comment' : None , 'changes' : { } } pool_name = '{0}-Connection-Pool' . format ( name ) pool_ret = _do_element_absent ( pool_name , 'connector_c_pool' , { 'cascade' : both } , server ) if not pool_ret [ 'error' ] : if __opts__ [ 'test' ] and pool_ret [ 'delete' ] : ret [ 'comment' ] = 'Connection Factory set to be deleted' elif pool_ret [ 'delete' ] : ret [ 'result' ] = True ret [ 'comment' ] = 'Connection Factory deleted' else : ret [ 'result' ] = True ret [ 'comment' ] = 'Connection Factory doesn\'t exist' else : ret [ 'result' ] = False ret [ 'comment' ] = 'Error: {0}' . format ( pool_ret [ 'error' ] ) return ret
def request_url ( self , request , proxies ) : """Obtain the url to use when making the final request . If the message is being sent through a HTTP proxy , the full URL has to be used . Otherwise , we should only use the path portion of the URL . This should not be called from user code , and is only exposed for use when subclassing the : class : ` HTTPAdapter < requests . adapters . HTTPAdapter > ` . : param request : The : class : ` PreparedRequest < PreparedRequest > ` being sent . : param proxies : A dictionary of schemes to proxy URLs ."""
proxies = proxies or { } scheme = urlparse ( request . url ) . scheme proxy = proxies . get ( scheme ) if proxy and scheme != 'https' : url = urldefragauth ( request . url ) else : url = request . path_url return url
def get_max_val_indices ( input_list ) : """This function determines all indices of the maximum values in the supplied list . Examples : > > > get _ max _ val _ indices ( [ 12 , 33 , 23 , 10 , 67 , 89 , 45 , 667 , 23 , 12 , 11 , 10 , 54 ] ) > > > get _ max _ val _ indices ( [ 1 , 2 , 2 , 2 , 4 , 4 , 4 , 5 , 5 , 5 , 5 ] ) [7 , 8 , 9 , 10] > > > get _ max _ val _ indices ( [ 2 , 1 , 5 , 6 , 8 , 3 , 4 , 9 , 10 , 11 , 8 , 12 ] ) [11] : param input _ list : A list to process . : return : Returns the indices of maximum values in the list ."""
max_value = max ( input_list ) indices = [ index for index , value in enumerate ( input_list ) if value == max_value ] return indices
def load_completions ( self ) : """Load completions from the completion index . Updates the following attributes : * commands * subcommands * global _ opts * args _ opts"""
try : index_str = self . load_index ( utils . AWSCLI_VERSION ) except IndexLoadError : return index_str = self . load_index ( utils . AWSCLI_VERSION ) index_data = json . loads ( index_str ) index_root = index_data [ 'aws' ] # ec2 , s3 , elb . . . self . commands = index_root [ 'commands' ] # - - profile , - - region , - - output . . . self . global_opts = index_root [ 'arguments' ] for command in self . commands : # ec2 : start - instances , stop - instances , terminate - instances . . . subcommands_current = index_root [ 'children' ] . get ( command ) [ 'commands' ] self . subcommands . extend ( subcommands_current ) for subcommand_current in subcommands_current : # start - instances : - - instance - ids , - - dry - run . . . args_opts_current = index_root [ 'children' ] . get ( command ) [ 'children' ] . get ( subcommand_current ) [ 'arguments' ] self . args_opts . update ( args_opts_current )
def _parse_family_pb ( family_pb ) : """Parses a Family protobuf into a dictionary . : type family _ pb : : class : ` . _ generated . data _ pb2 . Family ` : param family _ pb : A protobuf : rtype : tuple : returns : A string and dictionary . The string is the name of the column family and the dictionary has column names ( within the family ) as keys and cell lists as values . Each cell is represented with a two - tuple with the value ( in bytes ) and the timestamp for the cell . For example : . . code : : python b ' col - name1 ' : [ ( b ' cell - val ' , datetime . datetime ( . . . ) ) , ( b ' cell - val - newer ' , datetime . datetime ( . . . ) ) , b ' col - name2 ' : [ ( b ' altcol - cell - val ' , datetime . datetime ( . . . ) ) ,"""
result = { } for column in family_pb . columns : result [ column . qualifier ] = cells = [ ] for cell in column . cells : val_pair = ( cell . value , _datetime_from_microseconds ( cell . timestamp_micros ) ) cells . append ( val_pair ) return family_pb . name , result
def switch_showfilter_icon ( self , toggled ) : """Switch the icon on the showfilter _ tb : param toggled : the state of the button : type toggled : : class : ` bool ` : returns : None : rtype : None : raises : None"""
at = QtCore . Qt . DownArrow if toggled else QtCore . Qt . RightArrow self . showfilter_tb . setArrowType ( at )
def _make_verb_helper ( verb_func , add_groups = False ) : """Create function that prepares verb for the verb function The functions created add expressions to be evaluated to the verb , then call the core verb function Parameters verb _ func : function Core verb function . This is the function called after expressions created and added to the verb . The core function should be one of those that implement verbs that evaluate expressions . add _ groups : bool If True , a groups attribute is added to the verb . The groups are the columns created after evaluating the expressions . Returns out : function A function that implements a helper verb ."""
@ wraps ( verb_func ) def _verb_func ( verb ) : verb . expressions , new_columns = build_expressions ( verb ) if add_groups : verb . groups = new_columns return verb_func ( verb ) return _verb_func
def mock_stream ( hamiltonian , prog_orbit , prog_mass , k_mean , k_disp , release_every = 1 , Integrator = DOPRI853Integrator , Integrator_kwargs = dict ( ) , snapshot_filename = None , output_every = 1 , seed = None ) : """Generate a mock stellar stream in the specified potential with a progenitor system that ends up at the specified position . Parameters hamiltonian : ` ~ gala . potential . Hamiltonian ` The system Hamiltonian . prog _ orbit : ` ~ gala . dynamics . Orbit ` The orbit of the progenitor system . prog _ mass : numeric , array _ like A single mass or an array of masses if the progenitor mass evolves with time . k _ mean : ` numpy . ndarray ` Array of mean : math : ` k ` values ( see Fardal et al . 2015 ) . These are used to determine the exact prescription for generating the mock stream . The components are for : : math : ` ( R , \ phi , z , v _ R , v _ \ phi , v _ z ) ` . If 1D , assumed constant in time . If 2D , time axis is axis 0. k _ disp : ` numpy . ndarray ` Array of : math : ` k ` value dispersions ( see Fardal et al . 2015 ) . These are used to determine the exact prescription for generating the mock stream . The components are for : : math : ` ( R , \ phi , z , v _ R , v _ \ phi , v _ z ) ` . If 1D , assumed constant in time . If 2D , time axis is axis 0. release _ every : int ( optional ) Release particles at the Lagrange points every X timesteps . Integrator : ` ~ gala . integrate . Integrator ` ( optional ) Integrator to use . Integrator _ kwargs : dict ( optional ) Any extra keyword argumets to pass to the integrator function . snapshot _ filename : str ( optional ) Filename to save all incremental snapshots of particle positions and velocities . Warning : this can make very large files if you are not careful ! output _ every : int ( optional ) If outputing snapshots ( i . e . , if snapshot _ filename is specified ) , this controls how often to output a snapshot . seed : int ( optional ) A random number seed for initializing the particle positions . Returns stream : ` ~ gala . dynamics . PhaseSpacePosition `"""
if isinstance ( hamiltonian , CPotentialBase ) : warnings . warn ( "This function now expects a `Hamiltonian` instance " "instead of a `PotentialBase` subclass instance. If you " "are using a static reference frame, you just need to " "pass your potential object in to the Hamiltonian " "constructor to use, e.g., Hamiltonian(potential)." , DeprecationWarning ) hamiltonian = Hamiltonian ( hamiltonian ) # Some initial checks to short - circuit if input is bad if Integrator not in [ LeapfrogIntegrator , DOPRI853Integrator ] : raise ValueError ( "Only Leapfrog and dop853 integration is supported for" " generating mock streams." ) if not isinstance ( hamiltonian , Hamiltonian ) or not hamiltonian . c_enabled : raise TypeError ( "Input potential must be a CPotentialBase subclass." ) if not isinstance ( prog_orbit , Orbit ) : raise TypeError ( "Progenitor orbit must be an Orbit subclass." ) if snapshot_filename is not None and Integrator != DOPRI853Integrator : raise ValueError ( "If saving snapshots, must use the DOP853Integrator." ) k_mean = np . atleast_1d ( k_mean ) k_disp = np . atleast_1d ( k_disp ) if k_mean . ndim > 1 : assert k_mean . shape [ 0 ] == prog_orbit . t . size assert k_disp . shape [ 0 ] == prog_orbit . t . size if prog_orbit . t [ 1 ] < prog_orbit . t [ 0 ] : raise ValueError ( "Progenitor orbit goes backwards in time. Streams can " "only be generated on orbits that run forwards. Hint: " "you can reverse the orbit with prog_orbit[::-1], but " "make sure the array of k_mean values is ordered " "correctly." ) c_w = np . squeeze ( prog_orbit . w ( hamiltonian . units ) ) . T # transpose for Cython funcs prog_w = np . ascontiguousarray ( c_w ) prog_t = np . ascontiguousarray ( prog_orbit . t . decompose ( hamiltonian . units ) . value ) if not hasattr ( prog_mass , 'unit' ) : prog_mass = prog_mass * hamiltonian . units [ 'mass' ] if not prog_mass . isscalar : if len ( prog_mass ) != prog_orbit . ntimes : raise ValueError ( "If passing in an array of progenitor masses, it " "must have the same length as the number of " "timesteps in the input orbit." ) prog_mass = prog_mass . decompose ( hamiltonian . units ) . value if Integrator == LeapfrogIntegrator : stream_w = _mock_stream_leapfrog ( hamiltonian , t = prog_t , prog_w = prog_w , release_every = release_every , _k_mean = k_mean , _k_disp = k_disp , G = hamiltonian . potential . G , _prog_mass = prog_mass , seed = seed , ** Integrator_kwargs ) elif Integrator == DOPRI853Integrator : if snapshot_filename is not None : if os . path . exists ( snapshot_filename ) : raise IOError ( "Mockstream save file '{}' already exists." ) import h5py _mock_stream_animate ( snapshot_filename , hamiltonian , t = prog_t , prog_w = prog_w , release_every = release_every , output_every = output_every , _k_mean = k_mean , _k_disp = k_disp , G = hamiltonian . potential . G , _prog_mass = prog_mass , seed = seed , ** Integrator_kwargs ) with h5py . File ( str ( snapshot_filename ) , 'a' ) as h5f : h5f [ 'pos' ] . attrs [ 'unit' ] = str ( hamiltonian . units [ 'length' ] ) h5f [ 'vel' ] . attrs [ 'unit' ] = str ( hamiltonian . units [ 'length' ] / hamiltonian . units [ 'time' ] ) h5f [ 't' ] . attrs [ 'unit' ] = str ( hamiltonian . units [ 'time' ] ) return None else : stream_w = _mock_stream_dop853 ( hamiltonian , t = prog_t , prog_w = prog_w , release_every = release_every , _k_mean = k_mean , _k_disp = k_disp , G = hamiltonian . potential . G , _prog_mass = prog_mass , seed = seed , ** Integrator_kwargs ) else : raise RuntimeError ( "Should never get here..." ) return PhaseSpacePosition . from_w ( w = stream_w . T , units = hamiltonian . units )
def select_between_exonic_splice_site_and_alternate_effect ( effect ) : """If the given effect is an ExonicSpliceSite then it might contain an alternate effect of higher priority . In that case , return the alternate effect . Otherwise , this acts as an identity function ."""
if effect . __class__ is not ExonicSpliceSite : return effect if effect . alternate_effect is None : return effect splice_priority = effect_priority ( effect ) alternate_priority = effect_priority ( effect . alternate_effect ) if splice_priority > alternate_priority : return effect else : return effect . alternate_effect
def _chglog ( amend : bool = False , stage : bool = False , next_version : str = None , auto_next_version : bool = False ) : """Writes the changelog Args : amend : amend last commit with changes stage : stage changes"""
if config . CHANGELOG_DISABLE ( ) : LOGGER . info ( 'skipping changelog update as per config' ) else : epab . utils . ensure_exe ( 'git' ) epab . utils . ensure_exe ( 'gitchangelog' ) LOGGER . info ( 'writing changelog' ) if auto_next_version : next_version = epab . utils . get_next_version ( ) with gitchangelog_config ( ) : with temporary_tag ( next_version ) : changelog , _ = elib_run . run ( 'gitchangelog' , mute = True ) # changelog = changelog . encode ( ' utf8 ' ) . replace ( b ' \ r \ n ' , b ' \ n ' ) . decode ( ' utf8 ' ) changelog = re . sub ( BOGUS_LINE_PATTERN , '\\1\n' , changelog ) Path ( config . CHANGELOG_FILE_PATH ( ) ) . write_text ( changelog , encoding = 'utf8' ) if amend : CTX . repo . amend_commit ( append_to_msg = 'update changelog [auto]' , files_to_add = str ( config . CHANGELOG_FILE_PATH ( ) ) ) elif stage : CTX . repo . stage_subset ( str ( config . CHANGELOG_FILE_PATH ( ) ) )
def has_true ( self , e , extra_constraints = ( ) , solver = None , model_callback = None ) : # pylint : disable = unused - argument """Should return True if ` e ` can possible be True . : param e : The AST . : param extra _ constraints : Extra constraints ( as ASTs ) to add to the solver for this solve . : param solver : A solver , for backends that require it . : param model _ callback : a function that will be executed with recovered models ( if any ) : return : A boolean"""
# if self . _ solver _ required and solver is None : # raise BackendError ( " % s requires a solver for evaluation " % self . _ _ class _ _ . _ _ name _ _ ) return self . _has_true ( self . convert ( e ) , extra_constraints = extra_constraints , solver = solver , model_callback = model_callback )
def _call ( self , x , out = None ) : """Implement ` ` self ( x [ , out ] ) ` ` ."""
if out is None : return self . vector * self . functional ( x ) else : scalar = self . functional ( x ) out . lincomb ( scalar , self . vector )
def read_csv_with_different_type ( csv_name , column_types_dict , usecols = None ) : """Returns a DataFrame from a . csv file stored in / data / raw / . Reads the CSV as string ."""
csv_path = os . path . join ( DATA_FOLDER , csv_name ) csv = pd . read_csv ( csv_path , usecols = usecols , encoding = "utf-8" , dtype = column_types_dict , engine = "python" , ) for key_column , val_type in column_types_dict . items ( ) : if val_type == str : csv [ key_column ] = csv [ key_column ] . str . strip ( ) return csv
def get_apex ( self , lat , height = None ) : """Calculate apex height Parameters lat : ( float ) Latitude in degrees height : ( float or NoneType ) Height above the surface of the earth in km or NoneType to use reference height ( default = None ) Returns apex _ height : ( float ) Height of the field line apex in km"""
lat = helpers . checklat ( lat , name = 'alat' ) if height is None : height = self . refh cos_lat_squared = np . cos ( np . radians ( lat ) ) ** 2 apex_height = ( self . RE + height ) / cos_lat_squared - self . RE return apex_height
def setFixedHeight ( self , height ) : """Sets the maximum height value to the inputed height and emits the sizeConstraintChanged signal . : param height | < int >"""
super ( XView , self ) . setFixedHeight ( height ) if ( not self . signalsBlocked ( ) ) : self . sizeConstraintChanged . emit ( )
def one ( self ) : """Return a single row of the results or None if empty . This is basically a shortcut to ` result _ set . current _ rows [ 0 ] ` and should only be used when you know a query returns a single row . Consider using an iterator if the ResultSet contains more than one row ."""
row = None if self . _current_rows : try : row = self . _current_rows [ 0 ] except TypeError : # generator object is not subscriptable , PYTHON - 1026 row = next ( iter ( self . _current_rows ) ) return row
def make_header ( self , locale , catalog ) : """Populate header with correct data from top - most locale file ."""
return { "po-revision-date" : self . get_catalogue_header_value ( catalog , 'PO-Revision-Date' ) , "mime-version" : self . get_catalogue_header_value ( catalog , 'MIME-Version' ) , "last-translator" : 'Automatic <hi@thorgate.eu>' , "x-generator" : "Python" , "language" : self . get_catalogue_header_value ( catalog , 'Language' ) or locale , "lang" : locale , "content-transfer-encoding" : self . get_catalogue_header_value ( catalog , 'Content-Transfer-Encoding' ) , "project-id-version" : self . get_catalogue_header_value ( catalog , 'Project-Id-Version' ) , "pot-creation-date" : self . get_catalogue_header_value ( catalog , 'POT-Creation-Date' ) , "domain" : self . domain , "report-msgid-bugs-to" : self . get_catalogue_header_value ( catalog , 'Report-Msgid-Bugs-To' ) , "content-type" : self . get_catalogue_header_value ( catalog , 'Content-Type' ) , "plural-forms" : self . get_plural ( catalog ) , "language-team" : self . get_catalogue_header_value ( catalog , 'Language-Team' ) }
def estimateBackgroundLevel ( img , image_is_artefact_free = False , min_rel_size = 0.05 , max_abs_size = 11 ) : '''estimate background level through finding the most homogeneous area and take its average min _ size - relative size of the examined area'''
s0 , s1 = img . shape [ : 2 ] s = min ( max_abs_size , int ( max ( s0 , s1 ) * min_rel_size ) ) arr = np . zeros ( shape = ( s0 - 2 * s , s1 - 2 * s ) , dtype = img . dtype ) # fill arr : _spatialStd ( img , arr , s ) # most homogeneous area : i , j = np . unravel_index ( arr . argmin ( ) , arr . shape ) sub = img [ int ( i + 0.5 * s ) : int ( i + s * 1.5 ) , int ( j + s * 0.5 ) : int ( j + s * 1.5 ) ] return np . median ( sub )
def registered ( self , driver , frameworkId , masterInfo ) : """Invoked when the scheduler successfully registers with a Mesos master"""
log . debug ( "Registered with framework ID %s" , frameworkId . value ) # Save the framework ID self . frameworkId = frameworkId . value
def find ( self , path , resolved = True ) : """Get the definition object for the schema type located at the specified path . The path may contain ( . ) dot notation to specify nested types . Actually , the path separator is usually a ( . ) but can be redefined during contruction . @ param path : A ( . ) separated path to a schema type . @ type path : basestring @ param resolved : A flag indicating that the fully resolved type should be returned . @ type resolved : boolean @ return : The found schema I { type } @ rtype : L { xsd . sxbase . SchemaObject }"""
result = None parts = self . split ( path ) try : result = self . root ( parts ) if len ( parts ) > 1 : result = result . resolve ( nobuiltin = True ) result = self . branch ( result , parts ) result = self . leaf ( result , parts ) if resolved : result = result . resolve ( nobuiltin = True ) except PathResolver . BadPath : log . error ( 'path: "%s", not-found' % path ) return result
def read_file ( filename ) : """Read contents of the specified file . Parameters : filename : str The name of the file to be read Returns : lines : list of str The contents of the file , split by line"""
infile = open ( filename , 'r' ) lines = infile . readlines ( ) infile . close ( ) return lines
def hooks ( ctx ) : """List registered hooks ( in the order they run ) ."""
from . . hooks . run_hooks_hook import RunHooksHook bundles = _get_bundles ( ctx . obj . data [ 'env' ] ) hooks = RunHooksHook ( None ) . collect_from_bundles ( bundles ) print_table ( ( 'Hook Name' , 'Default Bundle Module' , 'Bundle Module Override Attr' , 'Description' ) , [ ( hook . name , hook . bundle_module_name or '(None)' , hook . bundle_override_module_name_attr or '(None)' , format_docstring ( hook . __doc__ ) or '(None)' ) for hook in hooks ] )
def import_string ( import_name , silent = False ) : """Imports an object based on a string . This is useful if you want to use import paths as endpoints or something similar . An import path can be specified either in dotted notation ( ` ` xml . sax . saxutils . escape ` ` ) or with a colon as object delimiter ( ` ` xml . sax . saxutils : escape ` ` ) . If ` silent ` is True the return value will be ` None ` if the import fails . : param import _ name : the dotted name for the object to import . : param silent : if set to ` True ` import errors are ignored and ` None ` is returned instead . : return : imported object"""
# XXX : py3 review needed assert isinstance ( import_name , string_types ) # force the import name to automatically convert to strings import_name = str ( import_name ) try : if ':' in import_name : module , obj = import_name . split ( ':' , 1 ) elif '.' in import_name : module , obj = import_name . rsplit ( '.' , 1 ) else : return __import__ ( import_name ) # _ _ import _ _ is not able to handle unicode strings in the fromlist # if the module is a package if PY2 and isinstance ( obj , unicode ) : obj = obj . encode ( 'utf-8' ) try : return getattr ( __import__ ( module , None , None , [ obj ] ) , obj ) except ( ImportError , AttributeError ) : # support importing modules not yet set up by the parent module # ( or package for that matter ) modname = module + '.' + obj __import__ ( modname ) return sys . modules [ modname ] except ImportError as e : if not silent : reraise ( ImportStringError , ImportStringError ( import_name , e ) , sys . exc_info ( ) [ 2 ] )
def download_page ( url , data = None ) : '''Returns the response for the given url . The optional data argument is passed directly to urlopen .'''
conn = urllib2 . urlopen ( url , data ) resp = conn . read ( ) conn . close ( ) return resp
def recursive_copy ( source , destination ) : """A wrapper around distutils . dir _ util . copy _ tree but won ' t throw any exception when the source directory does not exist . Args : source ( str ) : source path destination ( str ) : destination path"""
if os . path . isdir ( source ) : copy_tree ( source , destination )
def encrypt_state_m ( self , plaintext_in , key_in , reset ) : """Builds a multiple cycle AES Encryption state machine circuit : param reset : a one bit signal telling the state machine to reset and accept the current plaintext and key : return ready , cipher _ text : ready is a one bit signal showing that the encryption result ( cipher _ text ) has been calculated ."""
if len ( key_in ) != len ( plaintext_in ) : raise pyrtl . PyrtlError ( "AES key and plaintext should be the same length" ) plain_text , key = ( pyrtl . Register ( len ( plaintext_in ) ) for i in range ( 2 ) ) key_exp_in , add_round_in = ( pyrtl . WireVector ( len ( plaintext_in ) ) for i in range ( 2 ) ) counter = pyrtl . Register ( 4 , 'counter' ) round = pyrtl . WireVector ( 4 , 'round' ) counter . next <<= round sub_out = self . _sub_bytes ( plain_text ) shift_out = self . _shift_rows ( sub_out ) mix_out = self . _mix_columns ( shift_out ) key_out = self . _key_expansion ( key , counter ) add_round_out = self . _add_round_key ( add_round_in , key_exp_in ) with pyrtl . conditional_assignment : with reset == 1 : round |= 0 key_exp_in |= key_in # to lower the number of cycles plain_text . next |= add_round_out key . next |= key_in add_round_in |= plaintext_in with counter == 10 : # keep everything the same round |= counter plain_text . next |= plain_text with pyrtl . otherwise : # running through AES round |= counter + 1 key_exp_in |= key_out plain_text . next |= add_round_out key . next |= key_out with counter == 9 : add_round_in |= shift_out with pyrtl . otherwise : add_round_in |= mix_out ready = ( counter == 10 ) return ready , plain_text
def gcp ( main = False ) : """Get the current project Parameters main : bool If True , the current main project is returned , otherwise the current subproject is returned . See Also scp : Sets the current project project : Creates a new project"""
if main : return project ( ) if _current_project is None else _current_project else : return gcp ( True ) if _current_subproject is None else _current_subproject
def cmd_devop ( self , args ) : '''device operations'''
usage = "Usage: devop <read|write> <spi|i2c> name bus address" if len ( args ) < 5 : print ( usage ) return if args [ 1 ] == 'spi' : bustype = mavutil . mavlink . DEVICE_OP_BUSTYPE_SPI elif args [ 1 ] == 'i2c' : bustype = mavutil . mavlink . DEVICE_OP_BUSTYPE_I2C else : print ( usage ) if args [ 0 ] == 'read' : self . devop_read ( args [ 2 : ] , bustype ) elif args [ 0 ] == 'write' : self . devop_write ( args [ 2 : ] , bustype ) else : print ( usage )
def complete_invoice ( self , invoice_id , complete_dict ) : """Completes an invoice : param complete _ dict : the complete dict with the template id : param invoice _ id : the invoice id : return : Response"""
return self . _create_put_request ( resource = INVOICES , billomat_id = invoice_id , command = COMPLETE , send_data = complete_dict )
def _get_run_input_fields_for_applet ( executable_input , ** kwargs ) : '''Takes the same arguments as the run method . Creates an input hash for the / applet - xxxx / run method .'''
# Although it says " for _ applet " , this is factored out of # DXApplet because apps currently use the same mechanism for unsupported_arg in [ 'stage_instance_types' , 'stage_folders' , 'rerun_stages' , 'ignore_reuse_stages' ] : if kwargs . get ( unsupported_arg ) : raise DXError ( unsupported_arg + ' is not supported for applets (only workflows)' ) return DXExecutable . _get_run_input_common_fields ( executable_input , ** kwargs )
def cbpdnmd_ystep ( k ) : """Do the Y step of the cbpdn stage . The only parameter is the slice index ` k ` and there are no return values ; all inputs and outputs are from and to global variables ."""
if mp_W . shape [ 0 ] > 1 : W = mp_W [ k ] else : W = mp_W AXU0 = mp_DX [ k ] - mp_S [ k ] + mp_Z_U0 [ k ] AXU1 = mp_Z_X [ k ] + mp_Z_U1 [ k ] mp_Z_Y0 [ k ] = mp_xrho * AXU0 / ( W ** 2 + mp_xrho ) mp_Z_Y1 [ k ] = sp . prox_l1 ( AXU1 , ( mp_lmbda / mp_xrho ) )
def grouper_dict ( d , n ) : """Evenly divide dictionary into fixed - length piece , no filled value if chunk size smaller than fixed - length . Usage : : > > > list ( grouper _ dict ( { 1 : ' A ' , 2 : ' B ' , 3 : ' C ' , 4 : ' D ' , 5 : ' E ' , 6 : ' F ' , 7 : ' G ' , 8 : ' H ' , 9 : ' I ' , 10 : ' J ' } ) ) [ { 1 : ' A ' , 2 : ' B ' , 3 : ' C ' } , { 4 : ' D ' , 5 : ' E ' , 6 : ' F ' } , {7 : ' G ' , 8 : ' H ' , 9 : ' I ' } , { 10 : ' J ' } ]"""
chunk = dict ( ) counter = 0 for k , v in d . items ( ) : counter += 1 chunk [ k ] = v print ( counter , chunk ) if counter == n : yield chunk chunk = dict ( ) counter = 0 if len ( chunk ) > 0 : yield chunk
def size ( ctx , dataset , kwargs ) : "Show dataset size"
kwargs = parse_kwargs ( kwargs ) ( print ) ( data ( dataset , ** ctx . obj ) . get ( ** kwargs ) . complete_set . size )
def get_last_weekday_in_month ( year , month , weekday ) : """Get the last weekday in a given month . e . g : > > > # the last monday in Jan 2013 > > > Calendar . get _ last _ weekday _ in _ month ( 2013 , 1 , MON ) datetime . date ( 2013 , 1 , 28)"""
day = date ( year , month , monthrange ( year , month ) [ 1 ] ) while True : if day . weekday ( ) == weekday : break day = day - timedelta ( days = 1 ) return day
def get_upload_content ( pmid , force_fulltext_lookup = False ) : """Get full text and / or abstract for paper and upload to S3."""
# Make sure that the PMID doesn ' t start with PMID so that it doesn ' t # screw up the literature clients if pmid . startswith ( 'PMID' ) : pmid = pmid [ 4 : ] # First , check S3: ( ft_content_s3 , ft_content_type_s3 ) = get_full_text ( pmid ) # The abstract is on S3 but there is no full text ; if we ' re not forcing # fulltext lookup , then we ' re done if ft_content_type_s3 == 'abstract' and not force_fulltext_lookup : return ( ft_content_s3 , ft_content_type_s3 ) # If there ' s nothing ( even an abstract on S3 ) , or if there ' s an abstract # and we ' re forcing fulltext lookup , do the lookup elif ft_content_type_s3 is None or ( ft_content_type_s3 == 'abstract' and force_fulltext_lookup ) or ( ft_content_type_s3 == 'elsevier_xml' and not elsevier_client . extract_text ( ft_content_s3 ) ) : if ft_content_type_s3 == 'elsevier_xml' : logger . info ( 'PMID%s: elsevier_xml cached on S3 is missing full ' 'text element, getting again.' % pmid ) # Try to retrieve from literature client logger . info ( "PMID%s: getting content using literature client" % pmid ) ( ft_content , ft_content_type ) = lit . get_full_text ( pmid , 'pmid' ) assert ft_content_type in ( 'pmc_oa_xml' , 'elsevier_xml' , 'abstract' , None ) # If we tried to get the full text and didn ' t even get the abstract , # then there was probably a problem with the web service . Try to # get the abstract instead : if ft_content_type is None : return ( None , None ) # If we got the abstract , and we already had the abstract on S3 , then # do nothing elif ft_content_type == 'abstract' and ft_content_type_s3 == 'abstract' : logger . info ( "PMID%s: found abstract but already had it on " "S3; skipping" % pmid ) return ( ft_content , ft_content_type ) # If we got the abstract , and we had nothing on S3 , then upload elif ft_content_type == 'abstract' and ft_content_type_s3 is None : logger . info ( "PMID%s: found abstract, uploading to S3" % pmid ) put_abstract ( pmid , ft_content ) return ( ft_content , ft_content_type ) # If we got elsevier _ xml , but cannot get a full text element , then # get and put the abstract elif ft_content_type == 'elsevier_xml' and not elsevier_client . extract_text ( ft_content ) : logger . info ( "PMID%s: Couldn't get a full text element for " "the elsevier_xml content; getting abstract " % pmid ) abstract = pubmed_client . get_abstract ( pmid ) # Abstract is None , so return None if abstract is None : logger . info ( "PMID%s: Unable to get abstract, returning None" % pmid ) return ( None , None ) # Otherwise , upload and return the abstract else : logger . info ( "PMID%s: Uploading and returning abstract " % pmid ) put_abstract ( pmid , abstract ) return ( abstract , 'abstract' ) # We got a viable full text # ( or something other than None or abstract . . . ) else : logger . info ( "PMID%s: uploading and returning %s" % ( pmid , ft_content_type ) ) put_full_text ( pmid , ft_content , full_text_type = ft_content_type ) return ( ft_content , ft_content_type ) # Some form of full text is already on S3 else : # TODO # In future , could check for abstract even if full text is found , and # upload it just to have it return ( ft_content_s3 , ft_content_type_s3 ) # We should always return before we get here assert False
def iter_sources ( self ) : """Iterates over all source names and IDs ."""
for src_id in xrange ( self . get_source_count ( ) ) : yield src_id , self . get_source_name ( src_id )
def items ( self ) : """Get all list items"""
query = self . get_queryset ( ) fields = self . get_model_config ( ) . get_list_fields ( ) for item in query . iterator ( ) : row = OrderedDict ( ) for field_name in self . get_current_fields ( ) : field = fields . get ( field_name ) if not field_name : row [ field_name ] = '' if hasattr ( item , field [ 'field' ] ) : row [ field_name ] = getattr ( item , field [ 'field' ] ) else : row [ field_name ] = '' # TODO Maybe render field ans strip html ? yield row
def inet_ntoa ( address ) : """Convert a network format IPv6 address into text . @ param address : the binary address @ type address : string @ rtype : string @ raises ValueError : the address isn ' t 16 bytes long"""
if len ( address ) != 16 : raise ValueError ( "IPv6 addresses are 16 bytes long" ) hex = address . encode ( 'hex_codec' ) chunks = [ ] i = 0 l = len ( hex ) while i < l : chunk = hex [ i : i + 4 ] # strip leading zeros . we do this with an re instead of # with lstrip ( ) because lstrip ( ) didn ' t support chars until # python 2.2.2 m = _leading_zero . match ( chunk ) if not m is None : chunk = m . group ( 1 ) chunks . append ( chunk ) i += 4 # Compress the longest subsequence of 0 - value chunks to : : best_start = 0 best_len = 0 start = - 1 last_was_zero = False for i in xrange ( 8 ) : if chunks [ i ] != '0' : if last_was_zero : end = i current_len = end - start if current_len > best_len : best_start = start best_len = current_len last_was_zero = False elif not last_was_zero : start = i last_was_zero = True if last_was_zero : end = 8 current_len = end - start if current_len > best_len : best_start = start best_len = current_len if best_len > 0 : if best_start == 0 and ( best_len == 6 or best_len == 5 and chunks [ 5 ] == 'ffff' ) : # We have an embedded IPv4 address if best_len == 6 : prefix = '::' else : prefix = '::ffff:' hex = prefix + dns . ipv4 . inet_ntoa ( address [ 12 : ] ) else : hex = ':' . join ( chunks [ : best_start ] ) + '::' + ':' . join ( chunks [ best_start + best_len : ] ) else : hex = ':' . join ( chunks ) return hex
def parameters ( self , value ) : """Used to set all of the model ' s parameters to new values . * * Parameters : * * value : array _ like New values for the model parameters . Must be of length ` ` self . n _ parameters ` ` ."""
if len ( value ) != self . n_parameters : raise ValueError ( "Incorrect length of parameter vector. " "Model has %d parameters, but got %d" % ( self . n_parameters , len ( value ) ) ) i = 0 for hl in self . hidden_layers : hl . parameters = value [ i : i + hl . n_parameters ] i += hl . n_parameters self . top_layer . parameters = value [ - self . top_layer . n_parameters : ]
def inverse_transform ( t ) : """Return the inverse transformation of t : param t : A transform [ [ x , y , z ] , [ x , y , z , w ] ] : return : t2 such as multiply _ transform _ ( t , t2 ) = [ [ 0 , 0 , 0 ] , [ 0 , 0 , 0 , 1 ] ]"""
return [ quat_rotate ( tf . transformations . quaternion_inverse ( t [ 1 ] ) , [ - t [ 0 ] [ 0 ] , - t [ 0 ] [ 1 ] , - t [ 0 ] [ 2 ] ] ) , tf . transformations . quaternion_inverse ( t [ 1 ] ) ]
def delete_all ( self , * args , ** kwargs ) : """Deletes objects that match a set of conditions supplied . This method forwards filters directly to the repository . It does not instantiate entities and it does not trigger Entity callbacks or validations . Returns the number of objects matched and deleted ."""
deleted_item_count = 0 repository = repo_factory . get_repository ( self . _entity_cls ) try : deleted_item_count = repository . delete_all ( self . _criteria ) except Exception : # FIXME Log Exception raise return deleted_item_count
def _assert_is_dictlike ( maybe_dictlike , valid_keys ) : """Raises a TypeError iff ` maybe _ dictlike ` is not a dictlike object ."""
# This covers a common mistake when people use incorrect dictionary nesting # for initializers / partitioners etc . The previous error message was quite # opaque , this should be much clearer . if not hasattr ( maybe_dictlike , "__getitem__" ) : raise TypeError ( "Expected a dict-like object with possible keys %s, received %s" % ( str ( valid_keys ) , str ( maybe_dictlike ) ) )
def _gwf_channel_segments ( path , channel , warn = True ) : """Yields the segments containing data for ` ` channel ` ` in this GWF path"""
stream = open_gwf ( path ) # get segments for frames toc = stream . GetTOC ( ) secs = toc . GetGTimeS ( ) nano = toc . GetGTimeN ( ) dur = toc . GetDt ( ) readers = [ getattr ( stream , 'ReadFr{0}Data' . format ( type_ . title ( ) ) ) for type_ in ( "proc" , "sim" , "adc" ) ] # for each segment , try and read the data for this channel for i , ( s , ns , dt ) in enumerate ( zip ( secs , nano , dur ) ) : for read in readers : try : read ( i , channel ) except ( IndexError , ValueError ) : continue readers = [ read ] # use this one from now on epoch = LIGOTimeGPS ( s , ns ) yield Segment ( epoch , epoch + dt ) break else : # none of the readers worked for this channel , warn if warn : warnings . warn ( "{0!r} not found in frame {1} of {2}" . format ( channel , i , path ) , )
def tx_extract ( payload , senders , inputs , outputs , block_id , vtxindex , txid ) : """Extract and return a dict of fields from the underlying blockchain transaction data that are useful to this operation . structure : inputs | outputs sender scriptsig + scriptPubkey | OP _ RETURN with token transfer payload | recipient script ( DUST _ MINIMUM ) | sender ' s change address The recipient script identifies the recipient address . This is its own output to ensure that the underlying blockchain can and will enforce signatures from the recipient on future spend transactions . Also , it makes it straightforward to track blockstack transactions in existing block explorers . Any other inputs and outputs are allowed ."""
sender_script = None sender_address = None recipient_script = None recipient_address = None try : # first two outputs matter to us assert check_tx_output_types ( outputs [ : 2 ] , block_id ) assert len ( senders ) > 0 assert 'script_pubkey' in senders [ 0 ] . keys ( ) assert 'addresses' in senders [ 0 ] . keys ( ) sender_script = str ( senders [ 0 ] [ 'script_pubkey' ] ) sender_address = str ( senders [ 0 ] [ 'addresses' ] [ 0 ] ) assert sender_script is not None assert sender_address is not None recipient_script = get_token_transfer_recipient_from_outputs ( outputs ) recipient_address = virtualchain . script_hex_to_address ( recipient_script ) assert recipient_script is not None assert recipient_address is not None except Exception , e : log . exception ( e ) raise Exception ( "Failed to extract" ) parsed_payload = parse ( payload , block_id ) assert parsed_payload is not None ret = { } ret . update ( parsed_payload ) ret . update ( { 'address' : sender_address , 'sender' : sender_script , 'recipient_address' : recipient_address , 'recipient' : recipient_script , 'op' : TOKEN_TRANSFER , 'block_id' : block_id , 'txid' : txid , 'vtxindex' : vtxindex } ) return ret
async def _flowupdater ( self ) : """Coroutine calling ` updateflow ( ) `"""
lastresult = set ( v for v in self . _savedresult if v is not None and not v . isdeleted ( ) ) flowupdate = FlowUpdaterNotification . createMatcher ( self , FlowUpdaterNotification . FLOWUPDATE ) while True : currentresult = [ v for v in self . _savedresult if v is not None and not v . isdeleted ( ) ] # Calculating differences additems = [ ] updateditems = [ ] updatedset2 = self . _updatedset2 for v in currentresult : if v not in lastresult : additems . append ( v ) else : lastresult . remove ( v ) if v in updatedset2 : # Updated updateditems . append ( v ) removeitems = lastresult self . _updatedset2 . clear ( ) # Save current result for next difference lastresult = set ( currentresult ) if not additems and not removeitems and not updateditems : await flowupdate continue await self . updateflow ( self . _connection , set ( additems ) , removeitems , set ( updateditems ) )
def cmd ( send , msg , args ) : """Translate something . Syntax : { command } [ - - from < language code > ] [ - - to < language code > ] < text > See https : / / cloud . google . com / translate / v2 / translate - reference # supported _ languages for a list of valid language codes"""
parser = arguments . ArgParser ( args [ 'config' ] ) parser . add_argument ( '--lang' , '--from' , default = None ) parser . add_argument ( '--to' , default = 'en' ) parser . add_argument ( 'msg' , nargs = '+' ) try : cmdargs = parser . parse_args ( msg ) except arguments . ArgumentException as e : send ( str ( e ) ) return send ( gen_translate ( ' ' . join ( cmdargs . msg ) , cmdargs . lang , cmdargs . to ) )
async def complete ( self , code : str , opts : dict = None ) -> Iterable [ str ] : '''Gets the auto - completion candidates from the given code string , as if a user has pressed the tab key just after the code in IDEs . Depending on the language of the compute session , this feature may not be supported . Unsupported sessions returns an empty list . : param code : An ( incomplete ) code text . : param opts : Additional information about the current cursor position , such as row , col , line and the remainder text . : returns : An ordered list of strings .'''
opts = { } if opts is None else opts params = { } if self . owner_access_key : params [ 'owner_access_key' ] = self . owner_access_key rqst = Request ( self . session , 'POST' , '/kernel/{}/complete' . format ( self . kernel_id ) , params = params ) rqst . set_json ( { 'code' : code , 'options' : { 'row' : int ( opts . get ( 'row' , 0 ) ) , 'col' : int ( opts . get ( 'col' , 0 ) ) , 'line' : opts . get ( 'line' , '' ) , 'post' : opts . get ( 'post' , '' ) , } , } ) async with rqst . fetch ( ) as resp : return await resp . json ( )
def evaluate ( self , x ) : '''Evaluate this polynomial at value x , returning the result ( which is the sum of all evaluations at each term ) .'''
# Holds the sum over each term in the polynomial # c = 0 # Holds the current power of x . This is multiplied by x after each term # in the polynomial is added up . Initialized to x ^ 0 = 1 # p = 1 # for term in self . coefficients [ : : - 1 ] : # c = c + term * p # p = p * x # return c # Faster alternative using Horner ' s Scheme y = self [ 0 ] for i in _range ( 1 , len ( self ) ) : y = y * x + self . coefficients [ i ] return y
def _right_zero_blocks ( self , r ) : """Number of blocks with zeros from the right in block row ` r ` ."""
if not self . _include_off_diagonal : return self . _block_rows - r - 1 elif self . _upper : return 0 elif self . _include_diagonal : return self . _block_rows - r - 1 else : return self . _block_rows - r
def sent_folder ( self ) : """Shortcut to get SentItems Folder instance : rtype : mailbox . Folder"""
return self . folder_constructor ( parent = self , name = 'SentItems' , folder_id = OutlookWellKnowFolderNames . SENT . value )
def SLICE_0 ( self , instr ) : 'obj [ : ]'
value = self . ast_stack . pop ( ) kw = dict ( lineno = instr . lineno , col_offset = 0 ) slice = _ast . Slice ( lower = None , step = None , upper = None , ** kw ) subscr = _ast . Subscript ( value = value , slice = slice , ctx = _ast . Load ( ) , ** kw ) self . ast_stack . append ( subscr )
def parse ( self , line ) : """Parse a line of the Nginx error log"""
csv_list = line . split ( "," ) date_time_message = csv_list . pop ( 0 ) . split ( " " , 2 ) otherinfo = dict ( ) for item in csv_list : key_value_pair = item . split ( ":" , 1 ) key = key_value_pair [ 0 ] . strip ( ) if len ( key_value_pair ) > 1 : value = key_value_pair [ 1 ] . strip ( ) if not value : value = "-" else : value = "-" otherinfo [ key ] = value self . message = '%s\n' 'Date: %s\n' 'Time: %s\n' 'Request: %s\n' 'Referrer: %s\n' 'Server: %s\n' 'Client: %s\n' 'Host: %s\n' 'Upstream: %s\n' self . params = [ date_time_message [ 2 ] , date_time_message [ 0 ] , date_time_message [ 1 ] , otherinfo . get ( "request" , "-" ) , otherinfo . get ( "referrer" , "-" ) , otherinfo . get ( "server" , "-" ) , otherinfo . get ( "client" , "-" ) , otherinfo . get ( "host" , "-" ) , otherinfo . get ( "upstream" , "-" ) , ] self . site = otherinfo . get ( "referrer" , "-" )
def shutdown ( self ) : """Shutdown the application and exit : returns : No return value"""
task = asyncio . ensure_future ( self . core . shutdown ( ) ) self . loop . run_until_complete ( task )
def infer_object_subtype ( self , api , pid = None , create = False , default_pidspace = None ) : """Construct a DigitalObject or appropriate subclass , inferring the appropriate subtype using : meth : ` best _ subtype _ for _ object ` . Note that this method signature has been selected to match the : class : ` ~ eulfedora . models . DigitalObject ` constructor so that this method might be passed directly to : meth : ` get _ object ` as a ` type ` : : > > > obj = repo . get _ object ( pid , type = repo . infer _ object _ subtype ) See also : : class : ` TypeInferringRepository `"""
obj = DigitalObject ( api , pid , create , default_pidspace ) if create : return obj if not obj . exists : return obj match_type = self . best_subtype_for_object ( obj ) return match_type ( api , pid )