signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def _escape_str ( value ) : """Escape a value into a TS3 compatible string @ param value : Value @ type value : string / int"""
if isinstance ( value , int ) : return "%d" % value value = value . replace ( "\\" , r'\\' ) for i , j in ts3_escape . items ( ) : value = value . replace ( i , j ) return value
def _ScheduleVariableHunt ( hunt_obj ) : """Schedules flows for a variable hunt ."""
if hunt_obj . client_rate != 0 : raise VariableHuntCanNotHaveClientRateError ( hunt_obj . hunt_id , hunt_obj . client_rate ) seen_clients = set ( ) for flow_group in hunt_obj . args . variable . flow_groups : for client_id in flow_group . client_ids : if client_id in seen_clients : raise CanStartAtMostOneFlowPerClientError ( hunt_obj . hunt_id , client_id ) seen_clients . add ( client_id ) now = rdfvalue . RDFDatetime . Now ( ) for flow_group in hunt_obj . args . variable . flow_groups : flow_cls = registry . FlowRegistry . FlowClassByName ( flow_group . flow_name ) flow_args = flow_group . flow_args if flow_group . HasField ( "flow_args" ) else None for client_id in flow_group . client_ids : flow . StartFlow ( client_id = client_id , creator = hunt_obj . creator , cpu_limit = hunt_obj . per_client_cpu_limit , network_bytes_limit = hunt_obj . per_client_network_bytes_limit , flow_cls = flow_cls , flow_args = flow_args , # Setting start _ at explicitly ensures that flow . StartFlow won ' t # process flow ' s Start state right away . Only the flow request # will be scheduled . start_at = now , parent_hunt_id = hunt_obj . hunt_id )
def to_html ( self , classes = None , notebook = False ) : """Render a DataFrame to a html table . Parameters notebook : { True , False } , optional , default False Whether the generated HTML is for IPython Notebook ."""
html_renderer = HTMLFormatter ( self , classes = classes , max_rows = self . max_rows , max_cols = self . max_cols , notebook = notebook ) if hasattr ( self . buf , 'write' ) : html_renderer . write_result ( self . buf ) elif isinstance ( self . buf , six . string_types ) : with open ( self . buf , 'w' ) as f : html_renderer . write_result ( f ) else : raise TypeError ( 'buf is not a file name and it has no write ' ' method' )
def clearData ( self , type : str = '' ) -> None : """Remove data of type foramt . If type argument is omitted , remove all data ."""
type = normalize_type ( type ) if not type : self . __data . clear ( ) elif type in self . __data : del self . __data [ type ]
def _find_scalac_plugins ( self , scalac_plugins , classpath ) : """Returns a map from plugin name to list of plugin classpath entries . The first entry in each list is the classpath entry containing the plugin metadata . The rest are the internal transitive deps of the plugin . This allows us to have in - repo plugins with dependencies ( unlike javac , scalac doesn ' t load plugins or their deps from the regular classpath , so we have to provide these entries separately , in the - Xplugin : flag ) . Note that we don ' t currently support external plugins with dependencies , as we can ' t know which external classpath elements are required , and we ' d have to put the entire external classpath on each - Xplugin : flag , which seems excessive . Instead , external plugins should be published as " fat jars " ( which appears to be the norm , since SBT doesn ' t support plugins with dependencies anyway ) ."""
# Allow multiple flags and also comma - separated values in a single flag . plugin_names = { p for val in scalac_plugins for p in val . split ( ',' ) } if not plugin_names : return { } active_plugins = { } buildroot = get_buildroot ( ) cp_product = self . context . products . get_data ( 'runtime_classpath' ) for classpath_element in classpath : name = self . _maybe_get_plugin_name ( classpath_element ) if name in plugin_names : plugin_target_closure = self . _plugin_targets ( 'scalac' ) . get ( name , [ ] ) # It ' s important to use relative paths , as the compiler flags get embedded in the zinc # analysis file , and we port those between systems via the artifact cache . rel_classpath_elements = [ os . path . relpath ( cpe , buildroot ) for cpe in ClasspathUtil . internal_classpath ( plugin_target_closure , cp_product , self . _confs ) ] # If the plugin is external then rel _ classpath _ elements will be empty , so we take # just the external jar itself . rel_classpath_elements = rel_classpath_elements or [ classpath_element ] # Some classpath elements may be repeated , so we allow for that here . if active_plugins . get ( name , rel_classpath_elements ) != rel_classpath_elements : raise TaskError ( 'Plugin {} defined in {} and in {}' . format ( name , active_plugins [ name ] , classpath_element ) ) active_plugins [ name ] = rel_classpath_elements if len ( active_plugins ) == len ( plugin_names ) : # We ' ve found all the plugins , so return now to spare us from processing # of the rest of the classpath for no reason . return active_plugins # If we get here we must have unresolved plugins . unresolved_plugins = plugin_names - set ( active_plugins . keys ( ) ) raise TaskError ( 'Could not find requested plugins: {}' . format ( list ( unresolved_plugins ) ) )
def split_key ( key ) : """Splits a node key ."""
if key == KEY_SEP : return ( ) key_chunks = tuple ( key . strip ( KEY_SEP ) . split ( KEY_SEP ) ) if key_chunks [ 0 ] . startswith ( KEY_SEP ) : return ( key_chunks [ 0 ] [ len ( KEY_SEP ) : ] , ) + key_chunks [ 1 : ] else : return key_chunks
def add_curves_from_las ( self , fname , remap = None , funcs = None ) : """Given a LAS file , add curves from it to the current well instance . Essentially just wraps ` ` add _ curves _ from _ lasio ( ) ` ` . Args : fname ( str ) : The path of the LAS file to read curves from . remap ( dict ) : Optional . A dict of ' old ' : ' new ' LAS field names . funcs ( dict ) : Optional . A dict of ' las field ' : function ( ) for implementing a transform before loading . Can be a lambda . Returns : None . Works in place ."""
try : # To treat as a single file self . add_curves_from_lasio ( lasio . read ( fname ) , remap = remap , funcs = funcs ) except : # It ' s a list ! for f in fname : self . add_curves_from_lasio ( lasio . read ( f ) , remap = remap , funcs = funcs ) return None
async def reset_user_password ( self , username ) : """Reset user password . : param str username : Username : returns : A : class : ` ~ juju . user . User ` instance"""
user_facade = client . UserManagerFacade . from_connection ( self . connection ( ) ) entity = client . Entity ( tag . user ( username ) ) results = await user_facade . ResetPassword ( [ entity ] ) secret_key = results . results [ 0 ] . secret_key return await self . get_user ( username , secret_key = secret_key )
def remove_by_example ( cls , collection , example_data , wait_for_sync = None , limit = None ) : """This will find all documents in the collection that match the specified example object . Note : the limit attribute is not supported on sharded collections . Using it will result in an error . The options attributes waitForSync and limit can given yet without an ecapsulation into a json object . But this may be deprecated in future versions of arango Returns result dict of the request . : param collection Collection instance : param example _ data An example document that all collection documents are compared against . : param wait _ for _ sync if set to true , then all removal operations will instantly be synchronised to disk . If this is not specified , then the collection ' s default sync behavior will be applied . : param limit an optional value that determines how many documents to replace at most . If limit is specified but is less than the number of documents in the collection , it is undefined which of the documents will be replaced ."""
kwargs = { 'options' : { 'waitForSync' : wait_for_sync , 'limit' : limit , } } return cls . _construct_query ( name = 'remove-by-example' , collection = collection , example = example_data , result = False , ** kwargs )
def get_environment ( id = None , name = None ) : """Get a specific Environment by name or ID"""
data = get_environment_raw ( id , name ) if data : return utils . format_json ( data )
def get_scan_log_lines ( self , source_id , scan_id ) : """Get the log text for a Scan : rtype : Iterator over log lines ."""
return self . client . get_manager ( Scan ) . get_log_lines ( source_id = source_id , scan_id = scan_id )
def compute_file_metrics ( processors , language , key , token_list ) : """use processors to compute file metrics ."""
# multiply iterator tli = itertools . tee ( token_list , len ( processors ) ) metrics = OrderedDict ( ) # reset all processors for p in processors : p . reset ( ) # process all tokens for p , tl in zip ( processors , tli ) : p . process_file ( language , key , tl ) # collect metrics from all processors for p in processors : metrics . update ( p . metrics ) return metrics
def find_field_by_name ( browser , field_type , name ) : """Locate the control input with the given ` ` name ` ` . : param browser : ` ` world . browser ` ` : param string field _ type : a field type ( i . e . ` button ` ) : param string name : ` ` name ` ` attribute Returns : an : class : ` ElementSelector `"""
return ElementSelector ( browser , field_xpath ( field_type , 'name' ) % string_literal ( name ) , filter_displayed = True , )
def convert_type ( self , type ) : """Convert type to BigQuery"""
# Mapping mapping = { 'any' : 'STRING' , 'array' : None , 'boolean' : 'BOOLEAN' , 'date' : 'DATE' , 'datetime' : 'DATETIME' , 'duration' : None , 'geojson' : None , 'geopoint' : None , 'integer' : 'INTEGER' , 'number' : 'FLOAT' , 'object' : None , 'string' : 'STRING' , 'time' : 'TIME' , 'year' : 'INTEGER' , 'yearmonth' : None , } # Not supported type if type not in mapping : message = 'Type %s is not supported' % type raise tableschema . exceptions . StorageError ( message ) return mapping [ type ]
def ReadChildFlowObjects ( self , client_id , flow_id ) : """Reads flows that were started by a given flow from the database ."""
res = [ ] for flow in itervalues ( self . flows ) : if flow . client_id == client_id and flow . parent_flow_id == flow_id : res . append ( flow ) return res
def calc_zscale ( self , data , contrast = 0.25 , num_points = 1000 , num_per_row = None ) : """From the IRAF documentation : The zscale algorithm is designed to display the image values near the median image value without the time consuming process of computing a full image histogram . This is particularly useful for astronomical images which generally have a very peaked histogram corresponding to the background sky in direct imaging or the continuum in a two dimensional spectrum . The sample of pixels , specified by values greater than zero in the sample mask zmask or by an image section , is selected up to a maximum of nsample pixels . If a bad pixel mask is specified by the bpmask parameter then any pixels with mask values which are greater than zero are not counted in the sample . Only the first pixels up to the limit are selected where the order is by line beginning from the first line . If no mask is specified then a grid of pixels with even spacing along lines and columns that make up a number less than or equal to the maximum sample size is used . If a contrast of zero is specified ( or the zrange flag is used and the image does not have a valid minimum / maximum value ) then the minimum and maximum of the sample is used for the intensity mapping range . If the contrast is not zero the sample pixels are ranked in brightness to form the function I ( i ) , where i is the rank of the pixel and I is its value . Generally the midpoint of this function ( the median ) is very near the peak of the image histogram and there is a well defined slope about the midpoint which is related to the width of the histogram . At the ends of the I ( i ) function there are a few very bright and dark pixels due to objects and defects in the field . To determine the slope a linear function is fit with iterative rejection ; I ( i ) = intercept + slope * ( i - midpoint ) If more than half of the points are rejected then there is no well defined slope and the full range of the sample defines z1 and z2. Otherwise the endpoints of the linear function are used ( provided they are within the original range of the sample ) : z1 = I ( midpoint ) + ( slope / contrast ) * ( 1 - midpoint ) z2 = I ( midpoint ) + ( slope / contrast ) * ( npoints - midpoint ) As can be seen , the parameter contrast may be used to adjust the contrast produced by this algorithm ."""
assert len ( data . shape ) >= 2 , AutoCutsError ( "input data should be 2D or greater" ) ht , wd = data . shape [ : 2 ] assert ( 0.0 < contrast <= 1.0 ) , AutoCutsError ( "contrast (%.2f) not in range 0 < c <= 1" % ( contrast ) ) # calculate num _ points parameter , if omitted total_points = np . size ( data ) if num_points is None : num_points = max ( int ( total_points * 0.0002 ) , 600 ) num_points = min ( num_points , total_points ) assert ( 0 < num_points <= total_points ) , AutoCutsError ( "num_points not in range 0-%d" % ( total_points ) ) # calculate num _ per _ row parameter , if omitted if num_per_row is None : num_per_row = max ( int ( 0.015 * num_points ) , 1 ) self . logger . debug ( "contrast=%.4f num_points=%d num_per_row=%d" % ( contrast , num_points , num_per_row ) ) # sample the data num_rows = num_points // num_per_row xmax = wd - 1 xskip = max ( xmax // num_per_row , 1 ) ymax = ht - 1 yskip = max ( ymax // num_rows , 1 ) # evenly spaced sampling over rows and cols # # xskip = int ( max ( 1.0 , np . sqrt ( xmax * ymax / float ( num _ points ) ) ) ) # # yskip = xskip cutout = data [ 0 : ymax : yskip , 0 : xmax : xskip ] # flatten and trim off excess cutout = cutout . flat [ 0 : num_points ] # actual number of points selected num_pix = len ( cutout ) assert num_pix <= num_points , AutoCutsError ( "Actual number of points (%d) exceeds calculated " "number (%d)" % ( num_pix , num_points ) ) # sort the data by value cutout = np . sort ( cutout ) # flat distribution ? data_min = np . nanmin ( cutout ) data_max = np . nanmax ( cutout ) if ( data_min == data_max ) or ( contrast == 0.0 ) : return ( data_min , data_max ) # compute the midpoint and median midpoint = ( num_pix // 2 ) if num_pix % 2 != 0 : median = cutout [ midpoint ] else : median = 0.5 * ( cutout [ midpoint - 1 ] + cutout [ midpoint ] ) self . logger . debug ( "num_pix=%d midpoint=%d median=%.4f" % ( num_pix , midpoint , median ) ) # # # Remove outliers to aid fitting # # threshold = np . std ( cutout ) * 2.5 # # cutout = cutout [ np . where ( np . fabs ( cutout - median ) > threshold ) ] # # num _ pix = len ( cutout ) # zscale fitting function : # I ( x ) = slope * ( x - midpoint ) + intercept def fitting ( x , slope , intercept ) : y = slope * ( x - midpoint ) + intercept return y # compute a least squares fit X = np . arange ( num_pix ) Y = cutout sigma = np . array ( [ 1.0 ] * num_pix ) guess = np . array ( [ 0.0 , 0.0 ] ) # Curve fit with _lock : # NOTE : without this mutex , optimize . curvefit causes a fatal error # sometimes - - it appears not to be thread safe . # The error is : # " SystemError : null argument to internal routine " # " Fatal Python error : GC object already tracked " try : p , cov = optimize . curve_fit ( fitting , X , Y , guess , sigma ) except Exception as e : self . logger . debug ( "curve fitting failed: %s" % ( str ( e ) ) ) cov = None if cov is None : self . logger . debug ( "curve fitting failed" ) return ( float ( data_min ) , float ( data_max ) ) slope , intercept = p # # num _ chosen = 0 self . logger . debug ( "intercept=%f slope=%f" % ( intercept , slope ) ) # # if num _ chosen < ( num _ pix / / 2 ) : # # self . logger . debug ( " more than half pixels rejected - - falling back to min / max of sample " ) # # return ( data _ min , data _ max ) # finally , compute the range falloff = slope / contrast z1 = median - midpoint * falloff z2 = median + ( num_pix - midpoint ) * falloff # final sanity check on cut levels locut = max ( z1 , data_min ) hicut = min ( z2 , data_max ) if locut >= hicut : locut = data_min hicut = data_max return ( float ( locut ) , float ( hicut ) )
def getSlicesString ( self ) : """Returns a string representation of the slices that are used to get the sliced array . For example returns ' [ : , 5 ] ' if the combo box selects dimension 0 and the spin box 5."""
if not self . rtiIsSliceable : return '' # The dimensions that are selected in the combo boxes will be set to slice ( None ) , # the values from the spin boxes will be set as a single integer value nDims = self . rti . nDims sliceList = [ ':' ] * nDims for spinBox in self . _spinBoxes : dimNr = spinBox . property ( "dim_nr" ) sliceList [ dimNr ] = str ( spinBox . value ( ) ) # No need to shuffle combobox dimensions like in getSlicedArray ; all combobox dimensions # yield a colon . return "[" + ", " . join ( sliceList ) + "]"
def evaluate ( self , data ) : """Evaluate the code needed to compute a given Data object ."""
try : inputs = copy . deepcopy ( data . input ) hydrate_input_references ( inputs , data . process . input_schema ) hydrate_input_uploads ( inputs , data . process . input_schema ) # Include special ' proc ' variable in the context . inputs [ 'proc' ] = { 'data_id' : data . id , 'data_dir' : self . manager . get_executor ( ) . resolve_data_path ( ) , } # Include special ' requirements ' variable in the context . inputs [ 'requirements' ] = data . process . requirements # Inject default values and change resources according to # the current Django configuration . inputs [ 'requirements' ] [ 'resources' ] = data . process . get_resource_limits ( ) script_template = data . process . run . get ( 'program' , '' ) # Get the appropriate expression engine . If none is defined , do not evaluate # any expressions . expression_engine = data . process . requirements . get ( 'expression-engine' , None ) if not expression_engine : return script_template return self . get_expression_engine ( expression_engine ) . evaluate_block ( script_template , inputs , escape = self . _escape , safe_wrapper = SafeString , ) except EvaluationError as error : raise ExecutionError ( '{}' . format ( error ) )
def markers ( data , marker , f_tooltip = None , marker_preferred_size = 32 ) : """Draw markers : param data : data access object : param marker : full filename of the marker image : param f _ tooltip : function to generate a tooltip on mouseover : param marker _ preferred _ size : size in pixel for the marker images"""
from geoplotlib . layers import MarkersLayer _global_config . layers . append ( MarkersLayer ( data , marker , f_tooltip , marker_preferred_size ) )
def add_options ( self ) : """Add program options ."""
super ( RtorrentControl , self ) . add_options ( ) # basic options self . add_bool_option ( "--help-fields" , help = "show available fields and their description" ) self . add_bool_option ( "-n" , "--dry-run" , help = "don't commit changes, just tell what would happen" ) self . add_bool_option ( "--detach" , help = "run the process in the background" ) self . prompt . add_options ( ) # output control self . add_bool_option ( "-S" , "--shell" , help = "escape output following shell rules" ) self . add_bool_option ( "-0" , "--nul" , "--print0" , help = "use a NUL character instead of a linebreak after items" ) self . add_bool_option ( "-c" , "--column-headers" , help = "print column headers" ) self . add_bool_option ( "-+" , "--stats" , help = "add sum / avg / median of numerical fields" ) self . add_bool_option ( "--summary" , help = "print only statistical summary, without the items" ) # self . add _ bool _ option ( " - f " , " - - full " , # help = " print full torrent details " ) self . add_bool_option ( "--json" , help = "dump all items as JSON (use '-o f1,f2,...' to specify fields)" ) self . add_value_option ( "-o" , "--output-format" , "FORMAT" , help = "specify display format (use '-o-' to disable item display)" ) self . add_value_option ( "-O" , "--output-template" , "FILE" , help = "pass control of output formatting to the specified template" ) self . add_value_option ( "-s" , "--sort-fields" , "[-]FIELD[,...] [-s...]" , action = 'append' , default = [ ] , help = "fields used for sorting, descending if prefixed with a '-'; '-s*' uses output field list" ) self . add_bool_option ( "-r" , "--reverse-sort" , help = "reverse the sort order" ) self . add_value_option ( "-A" , "--anneal" , "MODE [-A...]" , type = 'choice' , action = 'append' , default = [ ] , choices = ( 'dupes+' , 'dupes-' , 'dupes=' , 'invert' , 'unique' ) , help = "modify result set using some pre-defined methods" ) self . add_value_option ( "-/" , "--select" , "[N-]M" , help = "select result subset by item position (counting from 1)" ) self . add_bool_option ( "-V" , "--view-only" , help = "show search result only in default ncurses view" ) self . add_value_option ( "--to-view" , "--to" , "NAME" , help = "show search result only in named ncurses view" ) self . add_bool_option ( "--append-view" , "--append" , help = "DEPRECATED: use '--alter append' instead" ) self . add_value_option ( "--alter-view" , "--alter" , "MODE" , type = 'choice' , default = None , choices = self . ALTER_MODES , help = "alter view according to mode: {} (modifies -V and --to behaviour)" . format ( ', ' . join ( self . ALTER_MODES ) ) ) self . add_bool_option ( "--tee-view" , "--tee" , help = "ADDITIONALLY show search results in ncurses view (modifies -V and --to behaviour)" ) self . add_value_option ( "--from-view" , "--from" , "NAME" , help = "select only items that are on view NAME (NAME can be an info hash to quickly select a single item)" ) self . add_value_option ( "-M" , "--modify-view" , "NAME" , help = "get items from given view and write result back to it (short-cut to combine --from-view and --to-view)" ) self . add_value_option ( "-Q" , "--fast-query" , "LEVEL" , type = 'choice' , default = '=' , choices = ( '=' , '0' , '1' , '2' ) , help = "enable query optimization (=: use config; 0: off; 1: safe; 2: danger seeker)" ) self . add_value_option ( "--call" , "CMD" , help = "call an OS command pattern in the shell" ) self . add_value_option ( "--spawn" , "CMD [--spawn ...]" , action = "append" , default = [ ] , help = "execute OS command pattern(s) directly" ) # TODO : implement - S # self . add _ bool _ option ( " - S " , " - - summary " , # help = " print statistics " ) # torrent state change ( actions ) for action in self . ACTION_MODES : action . setdefault ( "label" , action . name . upper ( ) ) action . setdefault ( "method" , action . name ) action . setdefault ( "interactive" , False ) action . setdefault ( "argshelp" , "" ) action . setdefault ( "args" , ( ) ) if action . argshelp : self . add_value_option ( * action . options + ( action . argshelp , ) , ** { "help" : action . help + ( " (implies -i)" if action . interactive else "" ) } ) else : self . add_bool_option ( * action . options , ** { "help" : action . help + ( " (implies -i)" if action . interactive else "" ) } ) self . add_value_option ( "--ignore" , "|" . join ( self . IGNORE_OPTIONS ) , type = "choice" , choices = self . IGNORE_OPTIONS , help = "set 'ignore commands' status on torrent" ) self . add_value_option ( "--prio" , "|" . join ( self . PRIO_OPTIONS ) , type = "choice" , choices = self . PRIO_OPTIONS , help = "set priority of torrent" ) self . add_bool_option ( "-F" , "--flush" , help = "flush changes immediately (save session data)" )
def validate_ids ( ctx , param , value ) : """Validate a list of IDs and convert them to a list ."""
if not value : return None ids = [ x . strip ( ) for x in value . split ( ',' ) ] for id_item in ids : if not id_item . isdigit ( ) : raise click . BadParameter ( 'Non-numeric value "{0}" provided for an ID.' . format ( id_item ) ) return ids
def visitValueSetValue ( self , ctx : ShExDocParser . ValueSetValueContext ) : """valueSetValue : iriRange | literalRange | languageRange | ' . ' ( iriExclusion + | literalExclusion + | languageExclusion + )"""
if ctx . iriRange ( ) or ctx . literalRange ( ) or ctx . languageRange ( ) : self . visitChildren ( ctx ) else : # ' . ' branch - wild card with exclusions if ctx . iriExclusion ( ) : vs_value = IriStemRange ( Wildcard ( ) , [ ] ) self . _iri_exclusions ( vs_value , ctx . iriExclusion ( ) ) elif ctx . literalExclusion ( ) : vs_value = LiteralStemRange ( Wildcard ( ) , [ ] ) self . _literal_exclusions ( vs_value , ctx . literalExclusion ( ) ) else : vs_value = LanguageStemRange ( Wildcard ( ) , [ ] ) self . _language_exclusions ( vs_value , ctx . languageExclusion ( ) ) self . nodeconstraint . values . append ( vs_value )
def register ( self , token , regexp ) : """Register a token . Args : token ( Token ) : the token class to register regexp ( str ) : the regexp for that token"""
self . _tokens . append ( ( token , re . compile ( regexp ) ) )
def tryDynMod ( name ) : '''Dynamically import a python module or exception .'''
try : return importlib . import_module ( name ) except ModuleNotFoundError : raise s_exc . NoSuchDyn ( name = name )
def add_periodic_callback ( self , callback , period_milliseconds , callback_id = None ) : """Adds a callback to be run every period _ milliseconds until it is removed . Returns an ID that can be used with remove _ periodic _ callback ."""
cb = _AsyncPeriodic ( callback , period_milliseconds , io_loop = self . _loop ) callback_id = self . _assign_remover ( callback , callback_id , self . _periodic_callback_removers , cb . stop ) cb . start ( ) return callback_id
def my_on_connect ( client ) : """Example on _ connect handler ."""
client . send ( 'You connected from %s\n' % client . addrport ( ) ) if CLIENTS : client . send ( 'Also connected are:\n' ) for neighbor in CLIENTS : client . send ( '%s\n' % neighbor . addrport ( ) ) else : client . send ( 'Sadly, you are alone.\n' ) CLIENTS . append ( client )
def init ( plugin_manager , _ , _2 , config ) : """Init the plugin Available configuration : plugins : - plugin _ module : inginious . frontend . plugins . git _ repo repo _ directory : " . / repo _ submissions " """
submission_git_saver = SubmissionGitSaver ( plugin_manager , config ) submission_git_saver . daemon = True submission_git_saver . start ( )
def compile ( * files , exe_name = None , cc = CC , ** cflags ) : """Compile C source files . : param files : filenames to be compiled : param exe _ name : name of resulting executable : param cc : compiler to use ( : data : ` check50 . c . CC ` by default ) : param cflags : additional flags to pass to the compiler : raises check50 . Failure : if compilation failed ( i . e . , if the compiler returns a non - zero exit status ) . : raises RuntimeError : if no filenames are specified If ` ` exe _ name ` ` is None , : func : ` check50 . c . compile ` will default to the first file specified sans the ` ` . c ` ` extension : : check50 . c . compile ( " foo . c " , " bar . c " ) # clang foo . c bar . c - o foo - std = c11 - ggdb - lm Additional CFLAGS may be passed as keyword arguments like so : : check50 . c . compile ( " foo . c " , " bar . c " , lcs50 = True ) # clang foo . c bar . c - o foo - std = c11 - ggdb - lm - lcs50 In the same vein , the default CFLAGS may be overriden via keyword arguments : : check50 . c . compile ( " foo . c " , " bar . c " , std = " c99 " , lm = False ) # clang foo . c bar . c - o foo - std = c99 - ggdb"""
if not files : raise RuntimeError ( _ ( "compile requires at least one file" ) ) if exe_name is None and files [ 0 ] . endswith ( ".c" ) : exe_name = Path ( files [ 0 ] ) . stem files = " " . join ( files ) flags = CFLAGS . copy ( ) flags . update ( cflags ) flags = " " . join ( ( f"-{flag}" + ( f"={value}" if value is not True else "" ) ) . replace ( "_" , "-" ) for flag , value in flags . items ( ) if value ) out_flag = f" -o {exe_name} " if exe_name is not None else " " run ( f"{cc} {files}{out_flag}{flags}" ) . exit ( 0 )
def _wait_on_metadata ( self , topic , max_wait ) : """Wait for cluster metadata including partitions for the given topic to be available . Arguments : topic ( str ) : topic we want metadata for max _ wait ( float ) : maximum time in secs for waiting on the metadata Returns : set : partition ids for the topic Raises : KafkaTimeoutError : if partitions for topic were not obtained before specified max _ wait timeout"""
# add topic to metadata topic list if it is not there already . self . _sender . add_topic ( topic ) begin = time . time ( ) elapsed = 0.0 metadata_event = None while True : partitions = self . _metadata . partitions_for_topic ( topic ) if partitions is not None : return partitions if not metadata_event : metadata_event = threading . Event ( ) log . debug ( "Requesting metadata update for topic %s" , topic ) metadata_event . clear ( ) future = self . _metadata . request_update ( ) future . add_both ( lambda e , * args : e . set ( ) , metadata_event ) self . _sender . wakeup ( ) metadata_event . wait ( max_wait - elapsed ) elapsed = time . time ( ) - begin if not metadata_event . is_set ( ) : raise Errors . KafkaTimeoutError ( "Failed to update metadata after %.1f secs." % ( max_wait , ) ) elif topic in self . _metadata . unauthorized_topics : raise Errors . TopicAuthorizationFailedError ( topic ) else : log . debug ( "_wait_on_metadata woke after %s secs." , elapsed )
def clone ( self , ** data ) : '''Utility method for cloning the instance as a new object . : parameter data : additional which override field data . : rtype : a new instance of this class .'''
meta = self . _meta session = self . session pkname = meta . pkname ( ) pkvalue = data . pop ( pkname , None ) fields = self . todict ( exclude_cache = True ) fields . update ( data ) fields . pop ( '__dbdata__' , None ) obj = self . _meta . make_object ( ( pkvalue , None , fields ) ) obj . session = session return obj
def ext_pillar ( minion_id , # pylint : disable = W0613 pillar , # pylint : disable = W0613 command ) : '''Execute a command and read the output as YAMLEX'''
try : command = command . replace ( '%s' , minion_id ) return deserialize ( __salt__ [ 'cmd.run' ] ( '{0}' . format ( command ) ) ) except Exception : log . critical ( 'YAML data from %s failed to parse' , command ) return { }
def to_representation ( self , value ) : """Transform the * outgoing * native value into primitive data ."""
raise NotImplementedError ( '{cls}.to_representation() must be implemented for field ' '{field_name}. If you do not need to support write operations ' 'you probably want to subclass `ReadOnlyField` instead.' . format ( cls = self . __class__ . __name__ , field_name = self . field_name , ) )
def assertrepr_compare ( config , op , left , right ) : """Return specialised explanations for some operators / operands"""
width = 80 - 15 - len ( op ) - 2 # 15 chars indentation , 1 space around op left_repr = py . io . saferepr ( left , maxsize = int ( width // 2 ) ) right_repr = py . io . saferepr ( right , maxsize = width - len ( left_repr ) ) summary = u ( '%s %s %s' ) % ( ecu ( left_repr ) , op , ecu ( right_repr ) ) issequence = lambda x : ( isinstance ( x , ( list , tuple , Sequence ) ) and not isinstance ( x , basestring ) ) istext = lambda x : isinstance ( x , basestring ) isdict = lambda x : isinstance ( x , dict ) isset = lambda x : isinstance ( x , ( set , frozenset ) ) def isiterable ( obj ) : try : iter ( obj ) return not istext ( obj ) except TypeError : return False verbose = config . getoption ( 'verbose' ) explanation = None try : if op == '==' : if istext ( left ) and istext ( right ) : explanation = _diff_text ( left , right , verbose ) else : if issequence ( left ) and issequence ( right ) : explanation = _compare_eq_sequence ( left , right , verbose ) elif isset ( left ) and isset ( right ) : explanation = _compare_eq_set ( left , right , verbose ) elif isdict ( left ) and isdict ( right ) : explanation = _compare_eq_dict ( left , right , verbose ) if isiterable ( left ) and isiterable ( right ) : expl = _compare_eq_iterable ( left , right , verbose ) if explanation is not None : explanation . extend ( expl ) else : explanation = expl elif op == 'not in' : if istext ( left ) and istext ( right ) : explanation = _notin_text ( left , right , verbose ) except Exception : _logger . exception ( "dessert: representation of details failed. " "Probably an object has a faulty __repr__." ) if not explanation : return None return [ summary ] + explanation
def _generate_filename ( cls , writer_spec , name , job_id , num , attempt = None , seg_index = None ) : """Generates a filename for a particular output . Args : writer _ spec : specification dictionary for the output writer . name : name of the job . job _ id : the ID number assigned to the job . num : shard number . attempt : the shard attempt number . seg _ index : index of the seg . None means the final output . Returns : a string containing the filename . Raises : BadWriterParamsError : if the template contains any errors such as invalid syntax or contains unknown substitution placeholders ."""
naming_format = cls . _TMP_FILE_NAMING_FORMAT if seg_index is None : naming_format = writer_spec . get ( cls . NAMING_FORMAT_PARAM , cls . _DEFAULT_NAMING_FORMAT ) template = string . Template ( naming_format ) try : # Check that template doesn ' t use undefined mappings and is formatted well if seg_index is None : return template . substitute ( name = name , id = job_id , num = num ) else : return template . substitute ( name = name , id = job_id , num = num , attempt = attempt , seg = seg_index ) except ValueError , error : raise errors . BadWriterParamsError ( "Naming template is bad, %s" % ( error ) ) except KeyError , error : raise errors . BadWriterParamsError ( "Naming template '%s' has extra " "mappings, %s" % ( naming_format , error ) )
def WriteEventBody ( self , event ) : """Writes the body of an event object to the output . Args : event ( EventObject ) : event . Raises : NoFormatterFound : If no event formatter can be found to match the data type in the event object ."""
output_values = self . _GetOutputValues ( event ) output_values [ 3 ] = self . _output_mediator . GetMACBRepresentation ( event ) output_values [ 6 ] = event . timestamp_desc or '-' self . _WriteOutputValues ( output_values )
def maybe_load_model ( savedir , container ) : """Load model if present at the specified path ."""
if savedir is None : return state_path = os . path . join ( os . path . join ( savedir , 'training_state.pkl.zip' ) ) if container is not None : logger . log ( "Attempting to download model from Azure" ) found_model = container . get ( savedir , 'training_state.pkl.zip' ) else : found_model = os . path . exists ( state_path ) if found_model : state = pickle_load ( state_path , compression = True ) model_dir = "model-{}" . format ( state [ "num_iters" ] ) if container is not None : container . get ( savedir , model_dir ) U . load_state ( os . path . join ( savedir , model_dir , "saved" ) ) logger . log ( "Loaded models checkpoint at {} iterations" . format ( state [ "num_iters" ] ) ) return state
def create_zip ( self , clean = True , increment_version = True , register = True ) : """Creates a GenePattern module zip file for upload and installation on a GenePattern server : param clean : boolean : return :"""
# First validate the attributes self . validate ( ) # Check to see if an existing interferes with module creation if os . path . exists ( MANIFEST_FILE_NAME ) : raise OSError ( "existing manifest blocks manifest file creation" ) # Write the manifest self . write_manifest ( ) # Create the zip self . _zip_files ( ) # Increment the version of the module if increment_version : self . version += 1 # Register the module with the LSID authority if register and self . lsid_authority : self . lsid_authority . register ( self ) # Clean up the manifest if clean : os . remove ( MANIFEST_FILE_NAME )
def configure_logger ( logger , filename , folder , log_level ) : '''Configure logging behvior for the simulations .'''
fmt = logging . Formatter ( '%(asctime)s %(levelname)s: %(message)s' ) if folder is not None : log_file = os . path . join ( folder , filename ) hdl = logging . FileHandler ( log_file ) hdl . setFormatter ( fmt ) hdl . setLevel ( log_level ) logger . addHandler ( hdl ) shdl = logging . StreamHandler ( ) shdl . setLevel ( log_level ) shdl . setFormatter ( fmt ) logger . addHandler ( shdl ) logger . setLevel ( log_level )
def visitAnnotation ( self , ctx : ShExDocParser . AnnotationContext ) : """annotation : ' / / ' predicate ( iri | literal )"""
# Annotations apply to the expression , NOT the shape ( ! ) annot = Annotation ( self . context . predicate_to_IRI ( ctx . predicate ( ) ) ) if ctx . iri ( ) : annot . object = self . context . iri_to_iriref ( ctx . iri ( ) ) else : annot . object = self . context . literal_to_ObjectLiteral ( ctx . literal ( ) ) self . annotations . append ( annot )
def get_user_deliveryserver ( self , domainid , serverid ) : """Get a user delivery server"""
return self . api_call ( ENDPOINTS [ 'userdeliveryservers' ] [ 'get' ] , dict ( domainid = domainid , serverid = serverid ) )
def R ( self , value ) : """measurement uncertainty"""
self . _R = value self . _R1_2 = cholesky ( self . _R , lower = True )
def solve ( self ) : """Runs a power flow @ rtype : dict @ return : Solution dictionary with the following keys : - C { V } - final complex voltages - C { converged } - boolean value indicating if the solver converged or not - C { iterations } - the number of iterations performed"""
# Zero result attributes . self . case . reset ( ) # Retrieve the contents of the case . b , l , g , _ , _ , _ , _ = self . _unpack_case ( self . case ) # Update bus indexes . self . case . index_buses ( b ) # Index buses accoding to type . # try : # _ , pq , pv , pvpq = self . _ index _ buses ( b ) # except SlackBusError : # logger . error ( " Swing bus required for DCPF . " ) # return { " converged " : False } refs , pq , pv , pvpq = self . _index_buses ( b ) if len ( refs ) != 1 : logger . error ( "Swing bus required for DCPF." ) return { "converged" : False } # Start the clock . t0 = time ( ) # Build the vector of initial complex bus voltages . V0 = self . _initial_voltage ( b , g ) # Save index and angle of original reference bus . # if self . qlimit : # ref0 = ref # Varef0 = b [ ref0 ] . Va # # List of buses at Q limits . # limits = [ ] # # Qg of generators at Q limits . # fixedQg = matrix ( 0.0 , ( g . size [ 0 ] , 1 ) ) repeat = True while repeat : # Build admittance matrices . Ybus , Yf , Yt = self . case . getYbus ( b , l ) # Compute complex bus power injections ( generation - load ) . Sbus = self . case . getSbus ( b ) # Run the power flow . V , converged , i = self . _run_power_flow ( Ybus , Sbus , V0 , pv , pq , pvpq ) # Update case with solution . self . case . pf_solution ( Ybus , Yf , Yt , V ) # Enforce generator Q limits . if self . qlimit : raise NotImplementedError else : repeat = False elapsed = time ( ) - t0 if converged and self . verbose : logger . info ( "AC power flow converged in %.3fs" % elapsed ) return { "converged" : converged , "elapsed" : elapsed , "iterations" : i , "V" : V }
def _http_put ( self , url , data , ** kwargs ) : """Performs the HTTP PUT request ."""
kwargs . update ( { 'data' : json . dumps ( data ) } ) return self . _http_request ( 'put' , url , kwargs )
def parse ( fileobject , schema = None ) : """Parses a file object This functon parses a KML file object , and optionally validates it against a provided schema ."""
if schema : # with validation parser = objectify . makeparser ( schema = schema . schema , strip_cdata = False ) return objectify . parse ( fileobject , parser = parser ) else : # without validation return objectify . parse ( fileobject )
def get_path ( self , dir = None ) : """Return path relative to the current working directory of the Node . FS . Base object that owns us ."""
if not dir : dir = self . fs . getcwd ( ) if self == dir : return '.' path_elems = self . get_path_elements ( ) pathname = '' try : i = path_elems . index ( dir ) except ValueError : for p in path_elems [ : - 1 ] : pathname += p . dirname else : for p in path_elems [ i + 1 : - 1 ] : pathname += p . dirname return pathname + path_elems [ - 1 ] . name
def cleanup_custom_options ( id , weakref = None ) : """Cleans up unused custom trees if all objects referencing the custom id have been garbage collected or tree is otherwise unreferenced ."""
try : if Store . _options_context : return weakrefs = Store . _weakrefs . get ( id , [ ] ) if weakref in weakrefs : weakrefs . remove ( weakref ) refs = [ ] for wr in list ( weakrefs ) : r = wr ( ) if r is None or r . id != id : weakrefs . remove ( wr ) else : refs . append ( r ) if not refs : for bk in Store . loaded_backends ( ) : if id in Store . _custom_options [ bk ] : Store . _custom_options [ bk ] . pop ( id ) if not weakrefs : Store . _weakrefs . pop ( id , None ) except Exception as e : raise Exception ( 'Cleanup of custom options tree with id %s ' 'failed with the following exception: %s, ' 'an unreferenced orphan tree may persist in ' 'memory' % ( e , id ) )
def _string_parser ( strip_whitespace ) : """Return a parser function for parsing string values ."""
def _parse_string_value ( element_text , _state ) : if element_text is None : value = '' elif strip_whitespace : value = element_text . strip ( ) else : value = element_text return value return _parse_string_value
def generate_is_role_functions ( cls , roles ) : """Generate ` class . is _ { role } ( ) ` methods for a class . : param class cls : The python class to be modified . : param dict roles : The roles to use for generation . This method is intended to be used by an inheriting class to generate ` is _ { role } ( ) ` methods based on the roles provided during generation . : class : ` RaxKeystone ` uses this to add these methods to a dynamically generated class which inherits from this class . Note that as this is an Anonymous user , these functions will always return ` False ` ."""
for access_role in roles . keys ( ) : setattr ( cls , "is_" + access_role , lambda x : False )
def peek_16 ( library , session , address ) : """Read an 16 - bit value from the specified address . Corresponds to viPeek16 function of the VISA library . : param library : the visa library wrapped by ctypes . : param session : Unique logical identifier to a session . : param address : Source address to read the value . : return : Data read from bus , return value of the library call . : rtype : bytes , : class : ` pyvisa . constants . StatusCode `"""
value_16 = ViUInt16 ( ) ret = library . viPeek16 ( session , address , byref ( value_16 ) ) return value_16 . value , ret
def delete_translations ( self , language = None ) : """Deletes related translations ."""
from . models import Translation return Translation . objects . delete_translations ( obj = self , language = language )
def update_user ( self , user_is_artist = "" , artist_level = "" , artist_specialty = "" , real_name = "" , tagline = "" , countryid = "" , website = "" , bio = "" ) : """Update the users profile information : param user _ is _ artist : Is the user an artist ? : param artist _ level : If the user is an artist , what level are they : param artist _ specialty : If the user is an artist , what is their specialty : param real _ name : The users real name : param tagline : The users tagline : param countryid : The users location : param website : The users personal website : param bio : The users bio"""
if self . standard_grant_type is not "authorization_code" : raise DeviantartError ( "Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint." ) post_data = { } if user_is_artist : post_data [ "user_is_artist" ] = user_is_artist if artist_level : post_data [ "artist_level" ] = artist_level if artist_specialty : post_data [ "artist_specialty" ] = artist_specialty if real_name : post_data [ "real_name" ] = real_name if tagline : post_data [ "tagline" ] = tagline if countryid : post_data [ "countryid" ] = countryid if website : post_data [ "website" ] = website if bio : post_data [ "bio" ] = bio response = self . _req ( '/user/profile/update' , post_data = post_data ) return response [ 'success' ]
def kill_process ( procname , scriptname ) : """kill WSGI processes that may be running in development"""
# from http : / / stackoverflow . com / a / 2940878 import signal import subprocess p = subprocess . Popen ( [ 'ps' , 'aux' ] , stdout = subprocess . PIPE ) out , err = p . communicate ( ) for line in out . decode ( ) . splitlines ( ) : if procname in line and scriptname in line : pid = int ( line . split ( ) [ 1 ] ) info ( 'Stopping %s %s %d' % ( procname , scriptname , pid ) ) os . kill ( pid , signal . SIGKILL )
def results ( self ) : """All metrics Returns dict results in a dictionary format"""
results = { 'overall' : self . results_overall_metrics ( ) , 'class_wise' : self . results_class_wise_metrics ( ) , 'class_wise_average' : self . results_class_wise_average_metrics ( ) } return results
def _perform_replacements ( self , chars ) : '''Performs simple key / value string replacements that require no logic . This is used to convert the fullwidth rōmaji , several ligatures , and the punctuation characters .'''
for n in range ( len ( chars ) ) : char = chars [ n ] if char in repl : chars [ n ] = repl [ char ] # Some replacements might result in multi - character strings # being inserted into the list . Ensure we still have a list # of single characters for iteration . return list ( '' . join ( chars ) )
def bool_str ( string ) : """Returns a boolean from a string imput of ' true ' or ' false '"""
if string not in BOOL_STRS : raise ValueError ( 'Invalid boolean string: "{}"' . format ( string ) ) return True if string == 'true' else False
def translate_stage_name ( stage ) : """Account for potential variability in stage / phase name definition . Since a pipeline author is free to name his / her processing phases / stages as desired , but these choices influence file names , enforce some standardization . Specifically , prohibit potentially problematic spaces . : param str | pypiper . Stage | function stage : Pipeline stage , its name , or a representative function . : return str : Standardized pipeline phase / stage name ."""
# First ensure that we have text . name = parse_stage_name ( stage ) # Cast to string to ensure that indexed stages ( ints are handled ) . return str ( name ) . lower ( ) . replace ( " " , STAGE_NAME_SPACE_REPLACEMENT )
def reset_to_coefficients ( self ) : """Keeps only the coefficient . This can be used to recalculate the IO tables for a new finald demand . Note The system can not be reconstructed after this steps because all absolute data is removed . Save the Y data in case a reconstruction might be necessary ."""
# Development note : The coefficient attributes are # defined in self . _ _ coefficients _ _ [ setattr ( self , key , None ) for key in self . get_DataFrame ( data = False , with_unit = False , with_population = False ) if key not in self . __coefficients__ ] return self
def cache_key ( self , repo : str , branch : str , task : Task , git_repo : Repo ) -> str : """Returns the key used for storing results in cache ."""
return "{repo}_{branch}_{hash}_{task}" . format ( repo = self . repo_id ( repo ) , branch = branch , hash = self . current_git_hash ( repo , branch , git_repo ) , task = task . hash )
def _get_property ( device_path : Union [ Path , str ] , property_name : str ) -> str : """Gets the given property for a device ."""
with open ( str ( Path ( device_path , property_name ) ) ) as file : return file . readline ( ) . strip ( )
def get_device_name ( file_name , sys_obj_id , delimiter = ":" ) : """Get device name by its SNMP sysObjectID property from the file map : param str file _ name : : param str sys _ obj _ id : : param str delimiter : : rtype : str"""
try : with open ( file_name , "rb" ) as csv_file : csv_reader = csv . reader ( csv_file , delimiter = delimiter ) for row in csv_reader : if len ( row ) >= 2 and row [ 0 ] == sys_obj_id : return row [ 1 ] except IOError : pass # file does not exists return sys_obj_id
def wait_socket ( _socket , session , timeout = 1 ) : """Helper function for testing non - blocking mode . This function blocks the calling thread for < timeout > seconds - to be used only for testing purposes . Also available at ` ssh2 . utils . wait _ socket `"""
directions = session . block_directions ( ) if directions == 0 : return 0 readfds = [ _socket ] if ( directions & LIBSSH2_SESSION_BLOCK_INBOUND ) else ( ) writefds = [ _socket ] if ( directions & LIBSSH2_SESSION_BLOCK_OUTBOUND ) else ( ) return select ( readfds , writefds , ( ) , timeout )
def default_cell_formatter ( table , column , row , value , ** _ ) : """: type column : tri . table . Column"""
formatter = _cell_formatters . get ( type ( value ) ) if formatter : value = formatter ( table = table , column = column , row = row , value = value ) if value is None : return '' return conditional_escape ( value )
def create_record ( self , rtype = None , name = None , content = None , ** kwargs ) : """Create record . If record already exists with the same content , do nothing ."""
if not rtype and kwargs . get ( 'type' ) : warnings . warn ( 'Parameter "type" is deprecated, use "rtype" instead.' , DeprecationWarning ) rtype = kwargs . get ( 'type' ) return self . _create_record ( rtype , name , content )
def load_private_key ( key_file , key_password = None ) : """Load a private key from disk . : param key _ file : File path to key file . : param key _ password : Optional . If the key file is encrypted , provide the password to decrypt it . Defaults to None . : return : PrivateKey < string >"""
key_file = os . path . expanduser ( key_file ) key_file = os . path . abspath ( key_file ) if not key_password : with open ( key_file , 'r' ) as key : return key . read ( ) with open ( key_file , 'rb' ) as key : key_bytes = key . read ( ) return decrypt_key ( key_bytes , key_password ) . decode ( ENCODING )
def _call_widget_constructed ( widget ) : """Static method , called when a widget is constructed ."""
if Widget . _widget_construction_callback is not None and callable ( Widget . _widget_construction_callback ) : Widget . _widget_construction_callback ( widget )
def get_key ( cls , key ) : "Get the path to ` key ` in the config file ."
return cls . get ( ) . get ( key , cls . DEFAULT_CONFIG . get ( key , None ) )
def register ( self , key_or_tag , obj ) : """Register a custom Transit tag and new parsing function with the decoder . Also , you can optionally set the ' default _ decoder ' with this function . Your new tag and parse / decode function will be added to the interal dictionary of decoders for this Decoder object ."""
if key_or_tag == "default_decoder" : self . options [ "default_decoder" ] = obj else : self . decoders [ key_or_tag ] = obj
def probe ( self , key_id = None , ssh_user = None ) : """If no parameter is provided , mist . io will try to probe the machine with the default : param key _ id : Optional . Give if you explicitly want to probe with this key _ id : param ssh _ user : Optional . Give if you explicitly want a specific user : returns : A list of data received by the probing ( e . g . uptime etc )"""
ips = [ ip for ip in self . info [ 'public_ips' ] if ':' not in ip ] if not ips : raise Exception ( "No public IPv4 address available to connect to" ) payload = { 'host' : ips [ 0 ] , 'key' : key_id , 'ssh_user' : ssh_user } data = json . dumps ( payload ) req = self . request ( self . mist_client . uri + "/clouds/" + self . cloud . id + "/machines/" + self . id + "/probe" , data = data ) probe_info = req . post ( ) . json ( ) self . probed = True return probe_info
def from_dir ( cwd ) : "Context manager to ensure in the cwd directory ."
import os curdir = os . getcwd ( ) try : os . chdir ( cwd ) yield finally : os . chdir ( curdir )
def _get_audit_defaults ( option = None ) : '''Loads audit . csv defaults into a dict in _ _ context _ _ called ' lgpo . audit _ defaults ' . The dictionary includes fieldnames and all configurable policies as keys . The values are used to create / modify the ` ` audit . csv ` ` file . The first entry is ` fieldnames ` used to create the header for the csv file . The rest of the entries are the audit policy names . Sample data follows : ' fieldnames ' : [ ' Machine Name ' , ' Policy Target ' , ' Subcategory ' , ' Subcategory GUID ' , ' Inclusion Setting ' , ' Exclusion Setting ' , ' Setting Value ' ] , ' Audit Sensitive Privilege Use ' : { ' Auditpol Name ' : ' Sensitive Privilege Use ' , ' Exclusion Setting ' : ' ' , ' Inclusion Setting ' : ' No Auditing ' , ' Machine Name ' : ' WIN - 8FGT3E045SE ' , ' Policy Target ' : ' System ' , ' Setting Value ' : ' 0 ' , ' Subcategory ' : u ' Audit Sensitive Privilege Use ' , ' Subcategory GUID ' : ' { 0CCE9228-69AE - 11D9 - BED3-505054503030 } ' } , ' Audit Special Logon ' : { ' Auditpol Name ' : ' Special Logon ' , ' Exclusion Setting ' : ' ' , ' Inclusion Setting ' : ' No Auditing ' , ' Machine Name ' : ' WIN - 8FGT3E045SE ' , ' Policy Target ' : ' System ' , ' Setting Value ' : ' 0 ' , ' Subcategory ' : u ' Audit Special Logon ' , ' Subcategory GUID ' : ' { 0CCE921B - 69AE - 11D9 - BED3-505054503030 } ' } , ' Audit System Integrity ' : { ' Auditpol Name ' : ' System Integrity ' , ' Exclusion Setting ' : ' ' , ' Inclusion Setting ' : ' No Auditing ' , ' Machine Name ' : ' WIN - 8FGT3E045SE ' , ' Policy Target ' : ' System ' , ' Setting Value ' : ' 0 ' , ' Subcategory ' : u ' Audit System Integrity ' , ' Subcategory GUID ' : ' { 0CCE9212-69AE - 11D9 - BED3-505054503030 } ' } , . . note : : ` Auditpol Name ` designates the value to use when setting the value with the auditpol command Args : option ( str ) : The item from the dictionary to return . If ` ` None ` ` the entire dictionary is returned . Default is ` ` None ` ` Returns : dict : If ` ` None ` ` or one of the audit settings is passed list : If ` ` fieldnames ` ` is passed'''
if 'lgpo.audit_defaults' not in __context__ : # Get available setting names and GUIDs # This is used to get the fieldnames and GUIDs for individual policies log . debug ( 'Loading auditpol defaults into __context__' ) dump = __utils__ [ 'auditpol.get_auditpol_dump' ] ( ) reader = csv . DictReader ( dump ) audit_defaults = { 'fieldnames' : reader . fieldnames } for row in reader : row [ 'Machine Name' ] = '' row [ 'Auditpol Name' ] = row [ 'Subcategory' ] # Special handling for snowflake scenarios where the audit . csv names # don ' t match the auditpol names if row [ 'Subcategory' ] == 'Central Policy Staging' : row [ 'Subcategory' ] = 'Audit Central Access Policy Staging' elif row [ 'Subcategory' ] == 'Plug and Play Events' : row [ 'Subcategory' ] = 'Audit PNP Activity' elif row [ 'Subcategory' ] == 'Token Right Adjusted Events' : row [ 'Subcategory' ] = 'Audit Token Right Adjusted' else : row [ 'Subcategory' ] = 'Audit {0}' . format ( row [ 'Subcategory' ] ) audit_defaults [ row [ 'Subcategory' ] ] = row __context__ [ 'lgpo.audit_defaults' ] = audit_defaults if option : return __context__ [ 'lgpo.audit_defaults' ] [ option ] else : return __context__ [ 'lgpo.audit_defaults' ]
def update ( self , skill = None , author = None ) : """Update all downloaded skills or one specified skill ."""
if skill is None : return self . update_all ( ) else : if isinstance ( skill , str ) : skill = self . find_skill ( skill , author ) entry = get_skill_entry ( skill . name , self . skills_data ) if entry : entry [ 'beta' ] = skill . is_beta if skill . update ( ) : # On successful update update the update value if entry : entry [ 'updated' ] = time . time ( )
def provider ( func = None , * , singleton = False , injector = None ) : """Decorator to mark a function as a provider . Args : singleton ( bool ) : The returned value should be a singleton or shared instance . If False ( the default ) the provider function will be invoked again for every time it ' s needed for injection . injector ( Injector ) : If provided , the function is immediately registered as a provider with the injector instance . Example : @ diay . provider ( singleton = True ) def myfunc ( ) - > MyClass : return MyClass ( args )"""
def decorator ( func ) : wrapped = _wrap_provider_func ( func , { 'singleton' : singleton } ) if injector : injector . register_provider ( wrapped ) return wrapped if func : return decorator ( func ) return decorator
def check_infile_and_wp ( curinf , curwp ) : """Check the existence of the given file and directory path . 1 . Raise Runtime exception of both not existed . 2 . If the ` ` curwp ` ` is None , the set the base folder of ` ` curinf ` ` to it ."""
if not os . path . exists ( curinf ) : if curwp is None : TauDEM . error ( 'You must specify one of the workspace and the ' 'full path of input file!' ) curinf = curwp + os . sep + curinf curinf = os . path . abspath ( curinf ) if not os . path . exists ( curinf ) : TauDEM . error ( 'Input files parameter %s is not existed!' % curinf ) else : curinf = os . path . abspath ( curinf ) if curwp is None : curwp = os . path . dirname ( curinf ) return curinf , curwp
def install ( path , name = None ) : """Compiles a Thrift file and installs it as a submodule of the caller . Given a tree organized like so : : foo / _ _ init _ _ . py bar . py my _ service . thrift You would do , . . code - block : : python my _ service = thriftrw . install ( ' my _ service . thrift ' ) To install ` ` my _ service ` ` as a submodule of the module from which you made the call . If the call was made in ` ` foo / bar . py ` ` , the compiled Thrift file will be installed as ` ` foo . bar . my _ service ` ` . If the call was made in ` ` foo / _ _ init _ _ . py ` ` , the compiled Thrift file will be installed as ` ` foo . my _ service ` ` . This allows other modules to import ` ` from ` ` the compiled module like so , . . code - block : : python from foo . my _ service import MyService . . versionadded : : 0.2 : param path : Path of the Thrift file . This may be an absolute path , or a path relative to the Python module making the call . : param str name : Name of the submodule . Defaults to the basename of the Thrift file . : returns : The compiled module"""
if name is None : name = os . path . splitext ( os . path . basename ( path ) ) [ 0 ] callermod = inspect . getmodule ( inspect . stack ( ) [ 1 ] [ 0 ] ) name = '%s.%s' % ( callermod . __name__ , name ) if name in sys . modules : return sys . modules [ name ] if not os . path . isabs ( path ) : callerfile = callermod . __file__ path = os . path . normpath ( os . path . join ( os . path . dirname ( callerfile ) , path ) ) sys . modules [ name ] = mod = load ( path , name = name ) return mod
def stripped_name ( self ) : """Remove extraneous information from C + + demangled function names ."""
name = self . name # Strip function parameters from name by recursively removing paired parenthesis while True : name , n = self . _parenthesis_re . subn ( '' , name ) if not n : break # Strip const qualifier name = self . _const_re . sub ( '' , name ) # Strip template parameters from name by recursively removing paired angles while True : name , n = self . _angles_re . subn ( '' , name ) if not n : break return name
def cancel_broadcast ( self , broadcast_guid ) : '''Cancel a broadcast specified by guid'''
subpath = 'broadcasts/%s/update' % broadcast_guid broadcast = { 'status' : 'CANCELED' } bcast_dict = self . _call ( subpath , method = 'POST' , data = broadcast , content_type = 'application/json' ) return bcast_dict
def _track_tasks ( task_ids , cluster ) : """Poll task status until STOPPED"""
while True : statuses = _get_task_statuses ( task_ids , cluster ) if all ( [ status == 'STOPPED' for status in statuses ] ) : logger . info ( 'ECS tasks {0} STOPPED' . format ( ',' . join ( task_ids ) ) ) break time . sleep ( POLL_TIME ) logger . debug ( 'ECS task status for tasks {0}: {1}' . format ( task_ids , statuses ) )
def delete_mappings_in_network ( network_id , network_2_id = None , ** kwargs ) : """Delete all the resource attribute mappings in a network . If another network is specified , only delete the mappings between the two networks ."""
qry = db . DBSession . query ( ResourceAttrMap ) . filter ( or_ ( ResourceAttrMap . network_a_id == network_id , ResourceAttrMap . network_b_id == network_id ) ) if network_2_id is not None : qry = qry . filter ( or_ ( ResourceAttrMap . network_a_id == network_2_id , ResourceAttrMap . network_b_id == network_2_id ) ) mappings = qry . all ( ) for m in mappings : db . DBSession . delete ( m ) db . DBSession . flush ( ) return 'OK'
def max_subsequence_sum ( arr : list , length : int , i_index : int , k_index : int ) -> int : """Calculate the maximum sum of an increasing subsequence from the start of the list up to the index ' i _ index ' . Subsequence must include the element at position ' k _ index ' ( where k _ index > i _ index ) . Args : arr ( list ) : the data list length ( int ) : length of the list i _ index ( int ) : Position ' i ' in the list k _ index ( int ) : Position ' k ' in the list ( where k > i ) Returns : int : Maximum sum of an increasing subsequence . Examples : > > > max _ subsequence _ sum ( [ 1 , 101 , 2 , 3 , 100 , 4 , 5 ] , 7 , 4 , 6) 11 > > > max _ subsequence _ sum ( [ 1 , 101 , 2 , 3 , 100 , 4 , 5 ] , 7 , 2 , 5) > > > max _ subsequence _ sum ( [ 11 , 15 , 19 , 21 , 26 , 28 , 31 ] , 7 , 2 , 4) 71"""
dp = [ [ 0 for _ in range ( length ) ] for _ in range ( length ) ] for i in range ( length ) : dp [ 0 ] [ i ] = arr [ i ] + arr [ 0 ] if arr [ i ] > arr [ 0 ] else arr [ i ] for i in range ( 1 , length ) : for j in range ( length ) : if arr [ j ] > arr [ i ] and j > i : dp [ i ] [ j ] = max ( dp [ i - 1 ] [ i ] + arr [ j ] , dp [ i - 1 ] [ j ] ) else : dp [ i ] [ j ] = dp [ i - 1 ] [ j ] return dp [ i_index ] [ k_index ]
async def delete ( self , * args , ** kwargs ) : '''Corresponds to DELETE request with a resource identifier , deleting a single document from the database'''
pk = self . pk_type ( kwargs [ 'pk' ] ) result = await self . _meta . object_class . delete_entries ( db = self . db , query = { self . pk : pk } ) if result . acknowledged : if result . deleted_count == 0 : raise NotFound ( ) else : raise BadRequest ( 'Failed to delete object' )
def histogram ( transform , dimensions , nbins ) : '''Computes the N - dimensional histogram of the transformed data . Parameters transform : pyemma . coordinates . transfrom . Transformer object transform that provides the input data dimensions : tuple of indices indices of the dimensions you want to examine nbins : tuple of ints number of bins along each dimension Returns counts : ( bins [ 0 ] , bins [ 1 ] , . . . ) ndarray of ints counts compatible with pyplot . pcolormesh and pyplot . bar edges : list of ( bins [ i ] ) ndarrays bin edges compatible with pyplot . pcolormesh and pyplot . bar , see below . Examples > > > import matplotlib . pyplot as plt # doctest : + SKIP Only for ipython notebook > > % matplotlib inline # doctest : + SKIP > > > counts , edges = histogram ( transform , dimensions = ( 0,1 ) , nbins = ( 20 , 30 ) ) # doctest : + SKIP > > > plt . pcolormesh ( edges [ 0 ] , edges [ 1 ] , counts . T ) # doctest : + SKIP > > > counts , edges = histogram ( transform , dimensions = ( 1 , ) , nbins = ( 50 , ) ) # doctest : + SKIP > > > plt . bar ( edges [ 0 ] [ : - 1 ] , counts , width = edges [ 0 ] [ 1 : ] - edges [ 0 ] [ : - 1 ] ) # doctest : + SKIP'''
maximum = np . ones ( len ( dimensions ) ) * ( - np . inf ) minimum = np . ones ( len ( dimensions ) ) * np . inf # compute min and max for _ , chunk in transform : maximum = np . max ( np . vstack ( ( maximum , np . max ( chunk [ : , dimensions ] , axis = 0 ) ) ) , axis = 0 ) minimum = np . min ( np . vstack ( ( minimum , np . min ( chunk [ : , dimensions ] , axis = 0 ) ) ) , axis = 0 ) # define bins bins = [ np . linspace ( m , M , num = n ) for m , M , n in zip ( minimum , maximum , nbins ) ] res = np . zeros ( np . array ( nbins ) - 1 ) # compute actual histogram for _ , chunk in transform : part , _ = np . histogramdd ( chunk [ : , dimensions ] , bins = bins ) res += part return res , bins
def noise_plot ( signal , noise , normalise = False , ** kwargs ) : """Plot signal and noise fourier transforms and the difference . : type signal : ` obspy . core . stream . Stream ` : param signal : Stream of " signal " window : type noise : ` obspy . core . stream . Stream ` : param noise : Stream of the " noise " window . : type normalise : bool : param normalise : Whether to normalise the data before plotting or not . : return : ` matplotlib . pyplot . Figure `"""
import matplotlib . pyplot as plt # Work out how many traces we can plot n_traces = 0 for tr in signal : try : noise . select ( id = tr . id ) [ 0 ] except IndexError : # pragma : no cover continue n_traces += 1 fig , axes = plt . subplots ( n_traces , 2 , sharex = True ) if len ( signal ) > 1 : axes = axes . ravel ( ) i = 0 lines = [ ] labels = [ ] for tr in signal : try : noise_tr = noise . select ( id = tr . id ) [ 0 ] except IndexError : # pragma : no cover continue ax1 = axes [ i ] ax2 = axes [ i + 1 ] fft_len = fftpack . next_fast_len ( max ( noise_tr . stats . npts , tr . stats . npts ) ) if not normalise : signal_fft = fftpack . rfft ( tr . data , fft_len ) noise_fft = fftpack . rfft ( noise_tr . data , fft_len ) else : signal_fft = fftpack . rfft ( tr . data / max ( tr . data ) , fft_len ) noise_fft = fftpack . rfft ( noise_tr . data / max ( noise_tr . data ) , fft_len ) frequencies = np . linspace ( 0 , 1 / ( 2 * tr . stats . delta ) , fft_len // 2 ) noise_line , = ax1 . semilogy ( frequencies , 2.0 / fft_len * np . abs ( noise_fft [ 0 : fft_len // 2 ] ) , 'k' , label = "noise" ) signal_line , = ax1 . semilogy ( frequencies , 2.0 / fft_len * np . abs ( signal_fft [ 0 : fft_len // 2 ] ) , 'r' , label = "signal" ) if "signal" not in labels : labels . append ( "signal" ) lines . append ( signal_line ) if "noise" not in labels : labels . append ( "noise" ) lines . append ( noise_line ) ax1 . set_ylabel ( tr . id , rotation = 0 , horizontalalignment = 'right' ) ax2 . plot ( frequencies , ( 2.0 / fft_len * np . abs ( signal_fft [ 0 : fft_len // 2 ] ) ) - ( 2.0 / fft_len * np . abs ( noise_fft [ 0 : fft_len // 2 ] ) ) , 'k' ) ax2 . yaxis . tick_right ( ) ax2 . set_ylim ( bottom = 0 ) i += 2 axes [ - 1 ] . set_xlabel ( "Frequency (Hz)" ) axes [ - 2 ] . set_xlabel ( "Frequency (Hz)" ) axes [ 0 ] . set_title ( "Spectra" ) axes [ 1 ] . set_title ( "Signal - noise" ) plt . figlegend ( lines , labels , 'upper left' ) plt . tight_layout ( ) plt . subplots_adjust ( hspace = 0 ) fig = _finalise_figure ( fig = fig , ** kwargs ) # pragma : no cover return fig
def op ( self , i , o ) : """Tries to update the registers values with the given instruction ."""
for ii in range ( len ( o ) ) : if is_register ( o [ ii ] ) : o [ ii ] = o [ ii ] . lower ( ) if i == 'ld' : self . set ( o [ 0 ] , o [ 1 ] ) return if i == 'push' : if valnum ( self . regs [ 'sp' ] ) : self . set ( 'sp' , ( self . getv ( self . regs [ 'sp' ] ) - 2 ) % 0xFFFF ) else : self . set ( 'sp' , None ) self . stack . append ( self . regs [ o [ 0 ] ] ) return if i == 'pop' : self . set ( o [ 0 ] , self . stack and self . stack . pop ( ) or None ) if valnum ( self . regs [ 'sp' ] ) : self . set ( 'sp' , ( self . getv ( self . regs [ 'sp' ] ) + 2 ) % 0xFFFF ) else : self . set ( 'sp' , None ) return if i == 'inc' : self . inc ( o [ 0 ] ) return if i == 'dec' : self . dec ( o [ 0 ] ) return if i == 'rra' : self . rr ( 'a' ) return if i == 'rla' : self . rl ( 'a' ) return if i == 'rlca' : self . rlc ( 'a' ) return if i == 'rrca' : self . rrc ( 'a' ) return if i == 'rr' : self . rr ( o [ 0 ] ) return if i == 'rl' : self . rl ( o [ 0 ] ) return if i == 'exx' : tmp = self . regs [ 'bc' ] self . set ( 'bc' , "bc'" ) self . set ( "bc'" , tmp ) tmp = self . regs [ 'de' ] self . set ( 'de' , "de'" ) self . set ( "de'" , tmp ) tmp = self . regs [ 'hl' ] self . set ( 'hl' , "hl'" ) self . set ( "hl'" , tmp ) return if i == 'ex' : tmp = self . get ( o [ 1 ] ) self . set ( o [ 1 ] , o [ 0 ] ) self . set ( o [ 0 ] , tmp ) return if i == 'xor' : self . C = 0 if o [ 0 ] == 'a' : self . set ( 'a' , 0 ) self . Z = 1 return if self . getv ( 'a' ) is None or self . getv ( o [ 0 ] ) is None : self . Z = None self . set ( 'a' , None ) return self . set ( 'a' , self . getv ( 'a' ) ^ self . getv ( o [ 0 ] ) ) self . Z = int ( self . get ( 'a' ) == 0 ) return if i in ( 'or' , 'and' ) : self . C = 0 if self . getv ( 'a' ) is None or self . getv ( o [ 0 ] ) is None : self . Z = None self . set ( 'a' , None ) return if i == 'or' : self . set ( 'a' , self . getv ( 'a' ) | self . getv ( o [ 0 ] ) ) else : self . set ( 'a' , self . getv ( 'a' ) & self . getv ( o [ 0 ] ) ) self . Z = int ( self . get ( 'a' ) == 0 ) return if i in ( 'adc' , 'sbc' ) : if len ( o ) == 1 : o = [ 'a' , o [ 0 ] ] if self . C is None : self . set ( o [ 0 ] , 'None' ) self . Z = None self . set ( o [ 0 ] , None ) return if i == 'sbc' and o [ 0 ] == o [ 1 ] : self . Z = int ( not self . C ) self . set ( o [ 0 ] , - self . C ) return if self . getv ( o [ 0 ] ) is None or self . getv ( o [ 1 ] ) is None : self . set_flag ( None ) self . set ( o [ 0 ] , None ) return if i == 'adc' : val = self . getv ( o [ 0 ] ) + self . getv ( o [ 1 ] ) + self . C if is_8bit_register ( o [ 0 ] ) : self . C = int ( val > 0xFF ) else : self . C = int ( val > 0xFFFF ) self . set ( o [ 0 ] , val ) return val = self . getv ( o [ 0 ] ) - self . getv ( o [ 1 ] ) - self . C self . C = int ( val < 0 ) self . Z = int ( val == 0 ) self . set ( o [ 0 ] , val ) return if i in ( 'add' , 'sub' ) : if len ( o ) == 1 : o = [ 'a' , o [ 0 ] ] if i == 'sub' and o [ 0 ] == o [ 1 ] : self . Z = 1 self . C = 0 self . set ( o [ 0 ] , 0 ) return if not is_number ( self . get ( o [ 0 ] ) ) or not is_number ( self . get ( o [ 1 ] ) ) is None : self . set_flag ( None ) self . set ( o [ 0 ] , None ) return if i == 'add' : val = self . getv ( o [ 0 ] ) + self . getv ( o [ 1 ] ) if is_8bit_register ( o [ 0 ] ) : self . C = int ( val > 0xFF ) val &= 0xFF self . Z = int ( val == 0 ) self . S = val >> 7 else : self . C = int ( val > 0xFFFF ) val &= 0xFFFF self . set ( o [ 0 ] , val ) return val = self . getv ( o [ 0 ] ) - self . getv ( o [ 1 ] ) if is_8bit_register ( o [ 0 ] ) : self . C = int ( val < 0 ) val &= 0xFF self . Z = int ( val == 0 ) self . S = val >> 7 else : self . C = int ( val < 0 ) val &= 0xFFFF self . set ( o [ 0 ] , val ) return if i == 'neg' : if self . getv ( 'a' ) is None : self . set_flag ( None ) return val = - self . getv ( 'a' ) self . set ( 'a' , val ) self . Z = int ( not val ) val &= 0xFF self . S = val >> 7 return if i == 'scf' : self . C = 1 return if i == 'ccf' : if self . C is not None : self . C = int ( not self . C ) return if i == 'cpl' : if self . getv ( 'a' ) is None : return self . set ( 'a' , 0xFF ^ self . getv ( 'a' ) ) return # Unknown . Resets ALL self . reset ( )
def get_swagger_operation ( self , context = default_context ) : """get the swagger _ schema operation representation ."""
consumes = produces = context . contenttype_serializers . keys ( ) parameters = get_swagger_parameters ( self . parameters , context ) responses = { "400" : Response ( { "description" : "invalid input received" , "schema" : Schema ( { "title" : "FailureObject" , "type" : "object" , "properties" : { "success" : { "type" : "boolean" } , "result" : { "type" : "string" } , } , "required" : [ "success" , "result" ] , } ) , } ) } for code , details in self . response_types . items ( ) : responses [ str ( code ) ] = details . swagger_definition ( context ) return Operation ( { "summary" : self . summary , "description" : self . description , "consumes" : consumes , "produces" : produces , "parameters" : parameters , "responses" : responses , "operationId" : self . raw_func . __name__ , "tags" : self . tags , } )
def query ( self , zipcode = None , prefix = None , pattern = None , city = None , state = None , lat = None , lng = None , radius = None , population_lower = None , population_upper = None , population_density_lower = None , population_density_upper = None , land_area_in_sqmi_lower = None , land_area_in_sqmi_upper = None , water_area_in_sqmi_lower = None , water_area_in_sqmi_upper = None , housing_units_lower = None , housing_units_upper = None , occupied_housing_units_lower = None , occupied_housing_units_upper = None , median_home_value_lower = None , median_home_value_upper = None , median_household_income_lower = None , median_household_income_upper = None , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . zipcode . name , ascending = True , returns = DEFAULT_LIMIT ) : """Query zipcode the simple way . : param zipcode : int or str , find the exactly matched zipcode . Will be automatically zero padding to 5 digits : param prefix : str , zipcode prefix . : param pattern : str , zipcode wildcard . : param city : str , city name . : param state : str , state name , two letter abbr or state full name . : param lat : latitude . : param lng : longitude . : param radius : number , only returns zipcodes within a specific circle . : param population _ lower : : param population _ upper : : param population _ density _ lower : : param population _ density _ upper : : param land _ area _ in _ sqmi _ lower : : param land _ area _ in _ sqmi _ upper : : param water _ area _ in _ sqmi _ lower : : param water _ area _ in _ sqmi _ upper : : param housing _ units _ lower : : param housing _ units _ upper : : param occupied _ housing _ units _ lower : : param occupied _ housing _ units _ upper : : param median _ home _ value _ lower : : param median _ home _ value _ upper : : param median _ household _ income _ lower : : param median _ household _ income _ upper : : param zipcode _ type : str or : class ` ~ uszipcode . model . ZipcodeType ` attribute . if None , allows to return any type of zipcode . if specified , only return specified zipcode type . : param sort _ by : str or : class : ` ~ uszipcode . model . Zipcode ` attribute , specified which field is used for sorting . : param ascending : bool , True means ascending , False means descending . : param returns : int or None , limit the number of result to returns . : return : list of : class : ` ~ uszipcode . model . SimpleZipcode ` or : class : ` ~ uszipcode . model . Zipcode ` ."""
filters = list ( ) # by coordinates _n_radius_param_not_null = sum ( [ isinstance ( lat , ( integer_types , float ) ) , isinstance ( lng , ( integer_types , float ) ) , isinstance ( radius , ( integer_types , float ) ) , ] ) if _n_radius_param_not_null == 3 : flag_radius_query = True if radius <= 0 : # pragma : no cover raise ValueError ( "`radius` parameters can't less than 0!" ) elif radius <= 50 : # pragma : no cover radius_coef = 1.05 elif radius <= 100 : # pragma : no cover radius_coef = 1.10 elif radius <= 250 : # pragma : no cover radius_coef = 1.25 elif radius <= 500 : # pragma : no cover radius_coef = 1.5 else : # pragma : no cover radius_coef = 2.0 if radius >= 250 : # pragma : no cover msg = ( "\nwarning! search within radius >= 250 miles " "may greatly slow down the query!" ) sys . stdout . write ( msg ) # define lat lng boundary dist_btwn_lat_deg = 69.172 dist_btwn_lon_deg = math . cos ( lat ) * 69.172 lat_degr_rad = abs ( radius * radius_coef / dist_btwn_lat_deg ) lon_degr_rad = abs ( radius * radius_coef / dist_btwn_lon_deg ) lat_lower = lat - lat_degr_rad lat_upper = lat + lat_degr_rad lng_lower = lng - lon_degr_rad lng_upper = lng + lon_degr_rad filters . append ( self . zip_klass . lat >= lat_lower ) filters . append ( self . zip_klass . lat <= lat_upper ) filters . append ( self . zip_klass . lng >= lng_lower ) filters . append ( self . zip_klass . lng <= lng_upper ) elif _n_radius_param_not_null == 0 : flag_radius_query = False else : msg = "You can either specify all of `lat`, `lng`, `radius` or none of them" raise ValueError ( msg ) # by city or state if ( state is not None ) and ( city is not None ) : try : state = self . find_state ( state , best_match = True ) [ 0 ] city = self . find_city ( city , state , best_match = True ) [ 0 ] filters . append ( self . zip_klass . state == state ) filters . append ( self . zip_klass . major_city == city ) except ValueError : # pragma : no cover return [ ] elif ( state is not None ) : try : state = self . find_state ( state , best_match = True ) [ 0 ] filters . append ( self . zip_klass . state == state ) except ValueError : # pragma : no cover return [ ] elif ( city is not None ) : try : city = self . find_city ( city , None , best_match = True ) [ 0 ] filters . append ( self . zip_klass . major_city == city ) except ValueError : # pragma : no cover return [ ] else : pass # by common filter if sum ( [ zipcode is None , prefix is None , pattern is None ] ) <= 1 : msg = "You can only specify one of the `zipcode`, `prefix` and `pattern`!" raise ValueError ( msg ) if zipcode_type is not None : filters . append ( self . zip_klass . zipcode_type == zipcode_type ) if zipcode is not None : filters . append ( self . zip_klass . zipcode == str ( zipcode ) ) if prefix is not None : filters . append ( self . zip_klass . zipcode . startswith ( str ( prefix ) ) ) if pattern is not None : filters . append ( self . zip_klass . zipcode . like ( "%%%s%%" % str ( pattern ) ) ) if population_lower is not None : filters . append ( self . zip_klass . population >= population_lower ) if population_upper is not None : filters . append ( self . zip_klass . population <= population_upper ) if population_density_lower is not None : filters . append ( self . zip_klass . population_density >= population_density_lower ) if population_density_upper is not None : filters . append ( self . zip_klass . population_density <= population_density_upper ) if land_area_in_sqmi_lower is not None : filters . append ( self . zip_klass . land_area_in_sqmi >= land_area_in_sqmi_lower ) if land_area_in_sqmi_upper is not None : filters . append ( self . zip_klass . land_area_in_sqmi <= land_area_in_sqmi_upper ) if water_area_in_sqmi_lower is not None : filters . append ( self . zip_klass . water_area_in_sqmi >= water_area_in_sqmi_lower ) if water_area_in_sqmi_upper is not None : filters . append ( self . zip_klass . water_area_in_sqmi <= water_area_in_sqmi_upper ) if housing_units_lower is not None : filters . append ( self . zip_klass . housing_units >= housing_units_lower ) if housing_units_upper is not None : filters . append ( self . zip_klass . housing_units <= housing_units_upper ) if occupied_housing_units_lower is not None : filters . append ( self . zip_klass . occupied_housing_units >= occupied_housing_units_lower ) if occupied_housing_units_upper is not None : filters . append ( self . zip_klass . occupied_housing_units <= occupied_housing_units_upper ) if median_home_value_lower is not None : filters . append ( self . zip_klass . median_home_value >= median_home_value_lower ) if median_home_value_upper is not None : filters . append ( self . zip_klass . median_home_value <= median_home_value_upper ) if median_household_income_lower is not None : filters . append ( self . zip_klass . median_household_income >= median_household_income_lower ) if median_household_income_upper is not None : filters . append ( self . zip_klass . median_household_income <= median_household_income_upper ) # - - - solve coordinates and other search sort _ by conflict - - - sort_by = self . _resolve_sort_by ( sort_by , flag_radius_query ) q = self . ses . query ( self . zip_klass ) . filter ( * filters ) if sort_by is None : pass elif sort_by == SORT_BY_DIST : pass else : field = getattr ( self . zip_klass , sort_by ) if ascending : by = field . asc ( ) else : by = field . desc ( ) q = q . order_by ( by ) if flag_radius_query : # if we query by radius , then ignore returns limit before the # distance calculation , and then manually limit the returns pairs = list ( ) for z in q : dist = z . dist_from ( lat , lng ) if dist <= radius : pairs . append ( ( dist , z ) ) if sort_by == SORT_BY_DIST : if ascending : if returns : pairs_new = heapq . nsmallest ( returns , pairs , key = lambda x : x [ 0 ] ) else : pairs_new = list ( sorted ( pairs , key = lambda x : x [ 0 ] ) ) else : if returns : pairs_new = heapq . nlargest ( returns , pairs , key = lambda x : x [ 0 ] ) else : pairs_new = list ( sorted ( pairs , key = lambda x : x [ 0 ] , reverse = True ) ) return [ z for _ , z in pairs_new ] else : return [ z for _ , z in pairs [ : returns ] ] else : if returns : return q . limit ( returns ) . all ( ) else : return q . all ( )
def strfdelta ( tdelta , fmt ) : """Used to format ` datetime . timedelta ` objects . Works just like ` strftime ` > > > strfdelta ( duration , ' % H : % M : % S ' ) param tdelta : Time duration which is an instance of datetime . timedelta param fmt : The pattern to format the timedelta with rtype : str"""
substitutes = dict ( ) hours , rem = divmod ( tdelta . total_seconds ( ) , 3600 ) minutes , seconds = divmod ( rem , 60 ) substitutes [ "H" ] = '{:02d}' . format ( int ( hours ) ) substitutes [ "M" ] = '{:02d}' . format ( int ( minutes ) ) substitutes [ "S" ] = '{:02d}' . format ( int ( seconds ) ) return DeltaTemplate ( fmt ) . substitute ( ** substitutes )
def patch_installed ( name , advisory_ids = None , downloadonly = None , ** kwargs ) : '''. . versionadded : : 2017.7.0 Ensure that packages related to certain advisory ids are installed . Currently supported for the following pkg providers : : mod : ` yumpkg < salt . modules . yumpkg > ` and : mod : ` zypper < salt . modules . zypper > ` CLI Example : . . code - block : : yaml issue - foo - fixed : pkg . patch _ installed : - advisory _ ids : - SUSE - SLE - SERVER - 12 - SP2-2017-185 - SUSE - SLE - SERVER - 12 - SP2-2017-150 - SUSE - SLE - SERVER - 12 - SP2-2017-120'''
ret = { 'name' : name , 'changes' : { } , 'result' : None , 'comment' : '' } if 'pkg.list_patches' not in __salt__ : ret [ 'result' ] = False ret [ 'comment' ] = 'The pkg.patch_installed state is not available on ' 'this platform' return ret if not advisory_ids and isinstance ( advisory_ids , list ) : ret [ 'result' ] = True ret [ 'comment' ] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets ( name , advisory_ids , ** kwargs ) if isinstance ( targets , dict ) and 'result' in targets : return targets elif not isinstance ( targets , list ) : ret [ 'result' ] = False ret [ 'comment' ] = 'An error was encountered while checking targets: ' '{0}' . format ( targets ) return ret if __opts__ [ 'test' ] : summary = ', ' . join ( targets ) ret [ 'comment' ] = 'The following advisory patches would be ' 'downloaded: {0}' . format ( summary ) return ret try : pkg_ret = __salt__ [ 'pkg.install' ] ( name = name , advisory_ids = advisory_ids , downloadonly = downloadonly , ** kwargs ) ret [ 'result' ] = True ret [ 'changes' ] . update ( pkg_ret ) except CommandExecutionError as exc : ret = { 'name' : name , 'result' : False } if exc . info : # Get information for state return from the exception . ret [ 'changes' ] = exc . info . get ( 'changes' , { } ) ret [ 'comment' ] = exc . strerror_without_changes else : ret [ 'changes' ] = { } ret [ 'comment' ] = ( 'An error was encountered while downloading ' 'package(s): {0}' . format ( exc ) ) return ret if not ret [ 'changes' ] and not ret [ 'comment' ] : status = 'downloaded' if downloadonly else 'installed' ret [ 'result' ] = True ret [ 'comment' ] = ( 'Advisory patch is not needed or related packages ' 'are already {0}' . format ( status ) ) return ret
def vote_count ( self ) : """Returns the total number of votes cast for this poll options ."""
return Vote . objects . filter ( content_type = ContentType . objects . get_for_model ( self ) , object_id = self . id ) . aggregate ( Sum ( 'vote' ) ) [ 'vote__sum' ] or 0
def supervise ( project , workspace , namespace , workflow , sample_sets , recovery_file ) : """Supervise submission of jobs from a Firehose - style workflow of workflows"""
# Get arguments logging . info ( "Initializing FireCloud Supervisor..." ) logging . info ( "Saving recovery checkpoints to " + recovery_file ) # Parse workflow description # these three objects must be saved in order to recover the supervisor args = { 'project' : project , 'workspace' : workspace , 'namespace' : namespace , 'workflow' : workflow , 'sample_sets' : sample_sets } monitor_data , dependencies = init_supervisor_data ( workflow , sample_sets ) recovery_data = { 'args' : args , 'monitor_data' : monitor_data , 'dependencies' : dependencies } # Monitor loop . Keep going until all nodes have been evaluated supervise_until_complete ( monitor_data , dependencies , args , recovery_file )
def get_goids_sections ( sections ) : """Return all the GO IDs in a 2 - D sections list ."""
goids_all = set ( ) for _ , goids_sec in sections : goids_all |= set ( goids_sec ) return goids_all
def count ( cls , name ) : """Return the count of ` ` name ` `"""
counter = cls . collection . find_one ( { 'name' : name } ) or { } return counter . get ( 'seq' , 0 )
def extract_version_from_filename ( filename ) : """Extract version number from sdist filename ."""
filename = os . path . splitext ( os . path . basename ( filename ) ) [ 0 ] if filename . endswith ( '.tar' ) : filename = os . path . splitext ( filename ) [ 0 ] return filename . partition ( '-' ) [ 2 ]
def get ( self , key , default = '' , stringify = True ) : """Returns dictionary values or default . Args : key : string . Dictionary key to look up . default : string . Return this value if key not found . stringify : bool . Force all return values to string for compatibility reasons . Returns : python - wrapped CF object or default if not found ."""
obj = self . __getitem__ ( key ) if obj is None : obj = default elif stringify : obj = str ( obj ) return obj
def _get_resampled ( self , rule , how = { 'ohlc' : 'last' , 'volume' : 'sum' } , df = None , ** kwargs ) : """Returns a resampled DataFrame Parameters rule : str the offset string or object representing target conversion for all aliases available see http : / / pandas . pydata . org / pandas - docs / stable / timeseries . html # offset - aliases how : str or dict states the form in which the resampling will be done . Examples : how = { ' volume ' : ' sum ' } how = ' count ' df : DataFrame If omitted then the QuantFigure . DataFrame is resampled . kwargs For more information see http : / / pandas . pydata . org / pandas - docs / stable / generated / pandas . DataFrame . resample . html"""
df = self . df . copy ( ) if df is None else df if rule == None : return df else : if isinstance ( how , dict ) : if 'ohlc' in how : v = how . pop ( 'ohlc' ) for _ in [ 'open' , 'high' , 'low' , 'close' ] : how [ _ ] = v _how = how . copy ( ) for _ in _how : if _ not in self . _d : del how [ _ ] return df . resample ( rule = rule , ** kwargs ) . apply ( how )
def get_languages ( self ) : """Get the list of languages we need to start servers and create clients for ."""
languages = [ 'python' ] all_options = CONF . options ( self . CONF_SECTION ) for option in all_options : if option in [ l . lower ( ) for l in LSP_LANGUAGES ] : languages . append ( option ) return languages
def tab ( self , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) : """Tap ` ` tab ` ` key for ` ` n ` ` times , with ` ` interval ` ` seconds of interval . * * 中文文档 * * 以 ` ` interval ` ` 中定义的频率按下某个tab键 ` ` n ` ` 次 。"""
self . delay ( pre_dl ) self . k . tap_key ( self . k . tab_key , n , interval ) self . delay ( post_dl )
def iflatten ( L ) : """Iterative flatten ."""
for sublist in L : if hasattr ( sublist , '__iter__' ) : for item in iflatten ( sublist ) : yield item else : yield sublist
def generate_tokens ( self ) : """Tokenize the file , run physical line checks and yield tokens ."""
if self . _io_error : self . report_error ( 1 , 0 , 'E902 %s' % self . _io_error , readlines ) tokengen = tokenize . generate_tokens ( self . readline ) try : for token in tokengen : if token [ 2 ] [ 0 ] > self . total_lines : return self . noqa = token [ 4 ] and noqa ( token [ 4 ] ) self . maybe_check_physical ( token ) yield token except ( SyntaxError , tokenize . TokenError ) : self . report_invalid_syntax ( )
def _bigger_than_zero ( value : str ) -> int : """Type evaluator for argparse ."""
ivalue = int ( value ) if ivalue < 0 : raise ArgumentTypeError ( '{} should be bigger than 0' . format ( ivalue ) ) return ivalue
def validate ( self ) : """if schema exists we run shape file validation code of fiona by trying to save to in MemoryFile"""
if self . _schema is not None : with MemoryFile ( ) as memfile : with memfile . open ( driver = "ESRI Shapefile" , schema = self . schema ) as target : for _item in self . _results : # getting rid of the assets that don ' t behave well becasue of in memroy rasters item = GeoFeature ( _item . geometry , _item . properties ) target . write ( item . to_record ( item . crs ) )