idx
int64
0
252k
question
stringlengths
48
5.28k
target
stringlengths
5
1.23k
245,200
def tobinary ( self , path , prefix = 'series' , overwrite = False , credentials = None ) : from thunder . series . writers import tobinary tobinary ( self , path , prefix = prefix , overwrite = overwrite , credentials = credentials )
Write data to binary files .
245,201
def addextension ( path , ext = None ) : if ext : if '*' in path : return path elif os . path . splitext ( path ) [ 1 ] : return path else : if not ext . startswith ( '.' ) : ext = '.' + ext if not path . endswith ( ext ) : if not path . endswith ( os . path . sep ) : path += os . path . sep return path + '*' + ext els...
Helper function for handling of paths given separately passed file extensions .
245,202
def select ( files , start , stop ) : if start or stop : if start is None : start = 0 if stop is None : stop = len ( files ) files = files [ start : stop ] return files
Helper function for handling start and stop indices
245,203
def listrecursive ( path , ext = None ) : filenames = set ( ) for root , dirs , files in os . walk ( path ) : if ext : if ext == 'tif' or ext == 'tiff' : tmp = fnmatch . filter ( files , '*.' + 'tiff' ) files = tmp + fnmatch . filter ( files , '*.' + 'tif' ) else : files = fnmatch . filter ( files , '*.' + ext ) for fi...
List files recurisvely
245,204
def listflat ( path , ext = None ) : if os . path . isdir ( path ) : if ext : if ext == 'tif' or ext == 'tiff' : files = glob . glob ( os . path . join ( path , '*.tif' ) ) files = files + glob . glob ( os . path . join ( path , '*.tiff' ) ) else : files = glob . glob ( os . path . join ( path , '*.' + ext ) ) else : f...
List files without recursion
245,205
def normalize_scheme ( path , ext ) : path = addextension ( path , ext ) parsed = urlparse ( path ) if parsed . scheme : return path else : import os dirname , filename = os . path . split ( path ) if not os . path . isabs ( dirname ) : dirname = os . path . abspath ( dirname ) path = os . path . join ( dirname , filen...
Normalize scheme for paths related to hdfs
245,206
def list ( path , ext = None , start = None , stop = None , recursive = False ) : files = listflat ( path , ext ) if not recursive else listrecursive ( path , ext ) if len ( files ) < 1 : raise FileNotFoundError ( 'Cannot find files of type "%s" in %s' % ( ext if ext else '*' , path ) ) files = select ( files , start ,...
Get sorted list of file paths matching path and extension
245,207
def read ( self , path , ext = None , start = None , stop = None , recursive = False , npartitions = None ) : path = uri_to_path ( path ) files = self . list ( path , ext = ext , start = start , stop = stop , recursive = recursive ) nfiles = len ( files ) self . nfiles = nfiles if spark and isinstance ( self . engine ,...
Sets up Spark RDD across files specified by dataPath on local filesystem .
245,208
def list ( path , filename = None , start = None , stop = None , recursive = False , directories = False ) : path = uri_to_path ( path ) if not filename and recursive : return listrecursive ( path ) if filename : if os . path . isdir ( path ) : path = os . path . join ( path , filename ) else : path = os . path . join ...
List files specified by dataPath .
245,209
def parse_query ( query , delim = '/' ) : key = '' prefix = '' postfix = '' parsed = urlparse ( query ) query = parsed . path . lstrip ( delim ) bucket = parsed . netloc if not parsed . scheme . lower ( ) in ( '' , "gs" , "s3" , "s3n" ) : raise ValueError ( "Query scheme must be one of '', 'gs', 's3', or 's3n'; " "got:...
Parse a boto query
245,210
def retrieve_keys ( bucket , key , prefix = '' , postfix = '' , delim = '/' , directories = False , recursive = False ) : if key and prefix : assert key . endswith ( delim ) key += prefix if not key . endswith ( delim ) and key : if BotoClient . check_prefix ( bucket , key + delim , delim = delim ) : key += delim listd...
Retrieve keys from a bucket
245,211
def getfiles ( self , path , ext = None , start = None , stop = None , recursive = False ) : from . utils import connection_with_anon , connection_with_gs parse = BotoClient . parse_query ( path ) scheme = parse [ 0 ] bucket_name = parse [ 1 ] if scheme == 's3' or scheme == 's3n' : conn = connection_with_anon ( self . ...
Get scheme bucket and keys for a set of files
245,212
def list ( self , dataPath , ext = None , start = None , stop = None , recursive = False ) : scheme , bucket_name , keylist = self . getfiles ( dataPath , ext = ext , start = start , stop = stop , recursive = recursive ) return [ "%s:///%s/%s" % ( scheme , bucket_name , key ) for key in keylist ]
List files from remote storage
245,213
def read ( self , path , ext = None , start = None , stop = None , recursive = False , npartitions = None ) : from . utils import connection_with_anon , connection_with_gs path = addextension ( path , ext ) scheme , bucket_name , keylist = self . getfiles ( path , start = start , stop = stop , recursive = recursive ) i...
Sets up Spark RDD across S3 or GS objects specified by dataPath .
245,214
def getkeys ( self , path , filename = None , directories = False , recursive = False ) : from . utils import connection_with_anon , connection_with_gs parse = BotoClient . parse_query ( path ) scheme = parse [ 0 ] bucket_name = parse [ 1 ] key = parse [ 2 ] if scheme == 's3' or scheme == 's3n' : conn = connection_with...
Get matching keys for a path
245,215
def getkey ( self , path , filename = None ) : scheme , keys = self . getkeys ( path , filename = filename ) try : key = next ( keys ) except StopIteration : raise FileNotFoundError ( "Could not find object for: '%s'" % path ) nextKey = None try : nextKey = next ( keys ) except StopIteration : pass if nextKey : raise V...
Get single matching key for a path
245,216
def list ( self , path , filename = None , start = None , stop = None , recursive = False , directories = False ) : storageScheme , keys = self . getkeys ( path , filename = filename , directories = directories , recursive = recursive ) keys = [ storageScheme + ":///" + key . bucket . name + "/" + key . name for key in...
List objects specified by path .
245,217
def read ( self , path , filename = None , offset = None , size = - 1 ) : storageScheme , key = self . getkey ( path , filename = filename ) if offset or ( size > - 1 ) : if not offset : offset = 0 if size > - 1 : sizeStr = offset + size - 1 else : sizeStr = "" headers = { "Range" : "bytes=%d-%s" % ( offset , sizeStr )...
Read a file specified by path .
245,218
def open ( self , path , filename = None ) : scheme , key = self . getkey ( path , filename = filename ) return BotoReadFileHandle ( scheme , key )
Open a file specified by path .
245,219
def check_path ( path , credentials = None ) : from thunder . readers import get_file_reader reader = get_file_reader ( path ) ( credentials = credentials ) existing = reader . list ( path , directories = True ) if existing : raise ValueError ( 'Path %s appears to already exist. Specify a new directory, ' 'or call with...
Check that specified output path does not already exist
245,220
def connection_with_anon ( credentials , anon = True ) : from boto . s3 . connection import S3Connection from boto . exception import NoAuthHandlerFound try : conn = S3Connection ( aws_access_key_id = credentials [ 'access' ] , aws_secret_access_key = credentials [ 'secret' ] ) return conn except NoAuthHandlerFound : i...
Connect to S3 with automatic handling for anonymous access .
245,221
def activate ( self , path , isdirectory ) : from . utils import connection_with_anon , connection_with_gs parsed = BotoClient . parse_query ( path ) scheme = parsed [ 0 ] bucket_name = parsed [ 1 ] key = parsed [ 2 ] if scheme == 's3' or scheme == 's3n' : conn = connection_with_anon ( self . credentials ) bucket = con...
Set up a boto connection .
245,222
def topng ( images , path , prefix = "image" , overwrite = False , credentials = None ) : value_shape = images . value_shape if not len ( value_shape ) in [ 2 , 3 ] : raise ValueError ( "Only 2D or 3D images can be exported to png, " "images are %d-dimensional." % len ( value_shape ) ) from scipy . misc import imsave f...
Write out PNG files for 2d image data .
245,223
def tobinary ( images , path , prefix = "image" , overwrite = False , credentials = None ) : from thunder . writers import get_parallel_writer def tobuffer ( kv ) : key , img = kv fname = prefix + "-" + "%05d.bin" % int ( key ) return fname , img . copy ( ) writer = get_parallel_writer ( path ) ( path , overwrite = ove...
Write out images as binary files .
245,224
def yearInfo2yearDay ( yearInfo ) : yearInfo = int ( yearInfo ) res = 29 * 12 leap = False if yearInfo % 16 != 0 : leap = True res += 29 yearInfo //= 16 for i in range ( 12 + leap ) : if yearInfo % 2 == 1 : res += 1 yearInfo //= 2 return res
calculate the days in a lunar year from the lunar year s info
245,225
def cleanupFilename ( self , name ) : context = self . context id = '' name = name . replace ( '\\' , '/' ) name = name . split ( '/' ) [ - 1 ] for c in name : if c . isalnum ( ) or c in '._' : id += c if context . check_id ( id ) is None and getattr ( context , id , None ) is None : return id count = 1 while 1 : if co...
Generate a unique id which doesn t match the system generated ids
245,226
def parse_data_slots ( value ) : value = unquote ( value ) if '>' in value : wrappers , children = value . split ( '>' , 1 ) else : wrappers = value children = '' if '*' in children : prepends , appends = children . split ( '*' , 1 ) else : prepends = children appends = '' wrappers = list ( filter ( bool , list ( map (...
Parse data - slots value into slots used to wrap node prepend to node or append to node .
245,227
def cook_layout ( layout , ajax ) : layout = re . sub ( '\r' , '\n' , re . sub ( '\r\n' , '\n' , layout ) ) if isinstance ( layout , six . text_type ) : result = getHTMLSerializer ( [ layout . encode ( 'utf-8' ) ] , encoding = 'utf-8' ) else : result = getHTMLSerializer ( [ layout ] , encoding = 'utf-8' ) if '<![CDATA[...
Return main_template compatible layout
245,228
def existing ( self ) : catalog = api . portal . get_tool ( 'portal_catalog' ) results = [ ] layout_path = self . _get_layout_path ( self . request . form . get ( 'layout' , '' ) ) for brain in catalog ( layout = layout_path ) : results . append ( { 'title' : brain . Title , 'url' : brain . getURL ( ) } ) return json ....
find existing content assigned to this layout
245,229
def load_reader_options ( ) : options = os . environ [ 'PANDOC_READER_OPTIONS' ] options = json . loads ( options , object_pairs_hook = OrderedDict ) return options
Retrieve Pandoc Reader options from the environment
245,230
def yaml_filter ( element , doc , tag = None , function = None , tags = None , strict_yaml = False ) : assert ( tag is None ) + ( tags is None ) == 1 if tags is None : tags = { tag : function } if type ( element ) == CodeBlock : for tag in tags : if tag in element . classes : function = tags [ tag ] if not strict_yaml ...
Convenience function for parsing code blocks with YAML options
245,231
def _set_content ( self , value , oktypes ) : if value is None : value = [ ] self . _content = ListContainer ( * value , oktypes = oktypes , parent = self )
Similar to content . setter but when there are no existing oktypes
245,232
def offset ( self , n ) : idx = self . index if idx is not None : sibling = idx + n container = self . container if 0 <= sibling < len ( container ) : return container [ sibling ]
Return a sibling element offset by n
245,233
def search ( self , term : str , case_sensitive : bool = False ) -> 'PrettyDir' : if case_sensitive : return PrettyDir ( self . obj , [ pattr for pattr in self . pattrs if term in pattr . name ] ) else : term = term . lower ( ) return PrettyDir ( self . obj , [ pattr for pattr in self . pattrs if term in pattr . name ....
Searches for names that match some pattern .
245,234
def properties ( self ) -> 'PrettyDir' : return PrettyDir ( self . obj , [ pattr for pattr in self . pattrs if category_match ( pattr . category , AttrCategory . PROPERTY ) ] , )
Returns all properties of the inspected object .
245,235
def methods ( self ) -> 'PrettyDir' : return PrettyDir ( self . obj , [ pattr for pattr in self . pattrs if category_match ( pattr . category , AttrCategory . FUNCTION ) ] , )
Returns all methods of the inspected object .
245,236
def public ( self ) -> 'PrettyDir' : return PrettyDir ( self . obj , [ pattr for pattr in self . pattrs if not pattr . name . startswith ( '_' ) ] )
Returns public attributes of the inspected object .
245,237
def own ( self ) -> 'PrettyDir' : return PrettyDir ( self . obj , [ pattr for pattr in self . pattrs if pattr . name in type ( self . obj ) . __dict__ or pattr . name in self . obj . __dict__ ] , )
Returns attributes that are not inhterited from parent classes .
245,238
def get_oneline_doc ( self ) -> str : attr = self . attr_obj if self . display_group == AttrCategory . DESCRIPTOR : if isinstance ( attr , property ) : doc_list = [ '@property with getter' ] if attr . fset : doc_list . append ( SETTER ) if attr . fdel : doc_list . append ( DELETER ) else : doc_list = [ 'class %s' % att...
Doc doesn t necessarily mean doctring . It could be anything that should be put after the attr s name as an explanation .
245,239
def format_pattrs ( pattrs : List [ 'api.PrettyAttribute' ] ) -> str : output = [ ] pattrs . sort ( key = lambda x : ( _FORMATTER [ x . display_group ] . display_index , x . display_group , x . name , ) ) for display_group , grouped_pattrs in groupby ( pattrs , lambda x : x . display_group ) : output . append ( _FORMAT...
Generates repr string given a list of pattrs .
245,240
def get_attr_from_dict ( inspected_obj : Any , attr_name : str ) -> Any : if inspect . isclass ( inspected_obj ) : obj_list = [ inspected_obj ] + list ( inspected_obj . __mro__ ) else : obj_list = [ inspected_obj ] + list ( inspected_obj . __class__ . __mro__ ) for obj in obj_list : if hasattr ( obj , '__dict__' ) and ...
Ensures we get descriptor object instead of its return value .
245,241
def attr_category_postprocess ( get_attr_category_func ) : @ functools . wraps ( get_attr_category_func ) def wrapped ( name : str , attr : Any , obj : Any ) -> Tuple [ AttrCategory , ... ] : category = get_attr_category_func ( name , attr , obj ) category = list ( category ) if isinstance ( category , tuple ) else [ c...
Unifies attr_category to a tuple add AttrCategory . SLOT if needed .
245,242
def get_peak_mem ( ) : import resource rusage_denom = 1024. if sys . platform == 'darwin' : rusage_denom = rusage_denom * rusage_denom mem = resource . getrusage ( resource . RUSAGE_SELF ) . ru_maxrss / rusage_denom return mem
this returns peak memory use since process starts till the moment its called
245,243
def dfs_do_func_on_graph ( node , func , * args , ** kwargs ) : for _node in node . tree_iterator ( ) : func ( _node , * args , ** kwargs )
invoke func on each node of the dr graph
245,244
def sparse_is_desireable ( lhs , rhs ) : return False if len ( lhs . shape ) == 1 : return False else : lhs_rows , lhs_cols = lhs . shape if len ( rhs . shape ) == 1 : rhs_rows = 1 rhs_cols = rhs . size else : rhs_rows , rhs_cols = rhs . shape result_size = lhs_rows * rhs_cols if sp . issparse ( lhs ) and sp . issparse...
Examines a pair of matrices and determines if the result of their multiplication should be sparse or not .
245,245
def convert_inputs_to_sparse_if_necessary ( lhs , rhs ) : if not sp . issparse ( lhs ) or not sp . issparse ( rhs ) : if sparse_is_desireable ( lhs , rhs ) : if not sp . issparse ( lhs ) : lhs = sp . csc_matrix ( lhs ) if not sp . issparse ( rhs ) : rhs = sp . csc_matrix ( rhs ) return lhs , rhs
This function checks to see if a sparse output is desireable given the inputs and if so casts the inputs to sparse in order to make it so .
245,246
def dr_wrt ( self , wrt , profiler = None ) : if wrt is self . x : jacs = [ ] for fvi , freevar in enumerate ( self . free_variables ) : tm = timer ( ) if isinstance ( freevar , ch . Select ) : new_jac = self . obj . dr_wrt ( freevar . a , profiler = profiler ) try : new_jac = new_jac [ : , freevar . idxs ] except : ne...
Loop over free variables and delete cache for the whole tree after finished each one
245,247
def J ( self ) : result = self . dr_wrt ( self . x , profiler = self . profiler ) . copy ( ) if self . profiler : self . profiler . harvest ( ) return np . atleast_2d ( result ) if not sp . issparse ( result ) else result
Compute Jacobian . Analyze dr graph first to disable unnecessary caching
245,248
def sid ( self ) : pnames = list ( self . terms ) + list ( self . dterms ) pnames . sort ( ) return ( self . __class__ , tuple ( [ ( k , id ( self . __dict__ [ k ] ) ) for k in pnames if k in self . __dict__ ] ) )
Semantic id .
245,249
def compute_dr_wrt ( self , wrt ) : if wrt is self : return sp . eye ( self . x . size , self . x . size ) return None
Default method for objects that just contain a number or ndarray
245,250
def get_ubuntu_release_from_sentry ( self , sentry_unit ) : msg = None cmd = 'lsb_release -cs' release , code = sentry_unit . run ( cmd ) if code == 0 : self . log . debug ( '{} lsb_release: {}' . format ( sentry_unit . info [ 'unit_name' ] , release ) ) else : msg = ( '{} `{}` returned {} ' '{}' . format ( sentry_unit...
Get Ubuntu release codename from sentry unit .
245,251
def validate_services ( self , commands ) : self . log . debug ( 'Checking status of system services...' ) self . log . warn ( 'DEPRECATION WARNING: use ' 'validate_services_by_name instead of validate_services ' 'due to init system differences.' ) for k , v in six . iteritems ( commands ) : for cmd in v : output , co...
Validate that lists of commands succeed on service units . Can be used to verify system services are running on the corresponding service units .
245,252
def validate_services_by_name ( self , sentry_services ) : self . log . debug ( 'Checking status of system services...' ) systemd_switch = self . ubuntu_releases . index ( 'vivid' ) for sentry_unit , services_list in six . iteritems ( sentry_services ) : release , ret = self . get_ubuntu_release_from_sentry ( sentry_un...
Validate system service status by service name automatically detecting init system based on Ubuntu release codename .
245,253
def _get_config ( self , unit , filename ) : file_contents = unit . file_contents ( filename ) config = configparser . ConfigParser ( allow_no_value = True ) config . readfp ( io . StringIO ( file_contents ) ) return config
Get a ConfigParser object for parsing a unit s config file .
245,254
def validate_config_data ( self , sentry_unit , config_file , section , expected ) : self . log . debug ( 'Validating config file data ({} in {} on {})' '...' . format ( section , config_file , sentry_unit . info [ 'unit_name' ] ) ) config = self . _get_config ( sentry_unit , config_file ) if section != 'DEFAULT' and n...
Validate config file data .
245,255
def _validate_dict_data ( self , expected , actual ) : self . log . debug ( 'actual: {}' . format ( repr ( actual ) ) ) self . log . debug ( 'expected: {}' . format ( repr ( expected ) ) ) for k , v in six . iteritems ( expected ) : if k in actual : if ( isinstance ( v , six . string_types ) or isinstance ( v , bool ) ...
Validate dictionary data .
245,256
def validate_relation_data ( self , sentry_unit , relation , expected ) : actual = sentry_unit . relation ( relation [ 0 ] , relation [ 1 ] ) return self . _validate_dict_data ( expected , actual )
Validate actual relation data based on expected relation data .
245,257
def _validate_list_data ( self , expected , actual ) : for e in expected : if e not in actual : return "expected item {} not found in actual list" . format ( e ) return None
Compare expected list vs actual list data .
245,258
def service_restarted ( self , sentry_unit , service , filename , pgrep_full = None , sleep_time = 20 ) : self . log . warn ( 'DEPRECATION WARNING: use ' 'validate_service_config_changed instead of ' 'service_restarted due to known races.' ) time . sleep ( sleep_time ) if ( self . _get_proc_start_time ( sentry_unit , ...
Check if service was restarted .
245,259
def service_restarted_since ( self , sentry_unit , mtime , service , pgrep_full = None , sleep_time = 20 , retry_count = 30 , retry_sleep_time = 10 ) : unit_name = sentry_unit . info [ 'unit_name' ] self . log . debug ( 'Checking that %s service restarted since %s on ' '%s' % ( service , mtime , unit_name ) ) time . sl...
Check if service was been started after a given time .
245,260
def config_updated_since ( self , sentry_unit , filename , mtime , sleep_time = 20 , retry_count = 30 , retry_sleep_time = 10 ) : unit_name = sentry_unit . info [ 'unit_name' ] self . log . debug ( 'Checking that %s updated since %s on ' '%s' % ( filename , mtime , unit_name ) ) time . sleep ( sleep_time ) file_mtime =...
Check if file was modified after a given time .
245,261
def validate_service_config_changed ( self , sentry_unit , mtime , service , filename , pgrep_full = None , sleep_time = 20 , retry_count = 30 , retry_sleep_time = 10 ) : service_restart = self . service_restarted_since ( sentry_unit , mtime , service , pgrep_full = pgrep_full , sleep_time = sleep_time , retry_count = ...
Check service and file were updated after mtime
245,262
def file_to_url ( self , file_rel_path ) : _abs_path = os . path . abspath ( file_rel_path ) return urlparse . urlparse ( _abs_path , scheme = 'file' ) . geturl ( )
Convert a relative file path to a file URL .
245,263
def check_commands_on_units ( self , commands , sentry_units ) : self . log . debug ( 'Checking exit codes for {} commands on {} ' 'sentry units...' . format ( len ( commands ) , len ( sentry_units ) ) ) for sentry_unit in sentry_units : for cmd in commands : output , code = sentry_unit . run ( cmd ) if code == 0 : sel...
Check that all commands in a list exit zero on all sentry units in a list .
245,264
def get_unit_process_ids ( self , unit_processes , expect_success = True , pgrep_full = False ) : pid_dict = { } for sentry_unit , process_list in six . iteritems ( unit_processes ) : pid_dict [ sentry_unit ] = { } for process in process_list : pids = self . get_process_id_list ( sentry_unit , process , expect_success ...
Construct a dict containing unit sentries process names and process IDs .
245,265
def validate_unit_process_ids ( self , expected , actual ) : self . log . debug ( 'Checking units for running processes...' ) self . log . debug ( 'Expected PIDs: {}' . format ( expected ) ) self . log . debug ( 'Actual PIDs: {}' . format ( actual ) ) if len ( actual ) != len ( expected ) : return ( 'Unit count mismatc...
Validate process id quantities for services on units .
245,266
def validate_list_of_identical_dicts ( self , list_of_dicts ) : hashes = [ ] for _dict in list_of_dicts : hashes . append ( hash ( frozenset ( _dict . items ( ) ) ) ) self . log . debug ( 'Hashes: {}' . format ( hashes ) ) if len ( set ( hashes ) ) == 1 : self . log . debug ( 'Dicts within list are identical' ) else : ...
Check that all dicts within a list are identical .
245,267
def get_unit_hostnames ( self , units ) : host_names = { } for unit in units : host_names [ unit . info [ 'unit_name' ] ] = str ( unit . file_contents ( '/etc/hostname' ) . strip ( ) ) self . log . debug ( 'Unit host names: {}' . format ( host_names ) ) return host_names
Return a dict of juju unit names to hostnames .
245,268
def run_cmd_unit ( self , sentry_unit , cmd ) : output , code = sentry_unit . run ( cmd ) if code == 0 : self . log . debug ( '{} `{}` command returned {} ' '(OK)' . format ( sentry_unit . info [ 'unit_name' ] , cmd , code ) ) else : msg = ( '{} `{}` command returned {} ' '{}' . format ( sentry_unit . info [ 'unit_name...
Run a command on a unit return the output and exit code .
245,269
def file_exists_on_unit ( self , sentry_unit , file_name ) : try : sentry_unit . file_stat ( file_name ) return True except IOError : return False except Exception as e : msg = 'Error checking file {}: {}' . format ( file_name , e ) amulet . raise_status ( amulet . FAIL , msg = msg )
Check if a file exists on a unit .
245,270
def file_contents_safe ( self , sentry_unit , file_name , max_wait = 60 , fatal = False ) : unit_name = sentry_unit . info [ 'unit_name' ] file_contents = False tries = 0 while not file_contents and tries < ( max_wait / 4 ) : try : file_contents = sentry_unit . file_contents ( file_name ) except IOError : self . log . ...
Get file contents from a sentry unit . Wrap amulet file_contents with retry logic to address races where a file checks as existing but no longer exists by the time file_contents is called . Return None if file not found . Optionally raise if fatal is True .
245,271
def port_knock_tcp ( self , host = "localhost" , port = 22 , timeout = 15 ) : try : connect_host = socket . gethostbyname ( host ) host_human = "{} ({})" . format ( connect_host , host ) except socket . error as e : self . log . warn ( 'Unable to resolve address: ' '{} ({}) Trying anyway!' . format ( host , e ) ) conne...
Open a TCP socket to check for a listening sevice on a host .
245,272
def port_knock_units ( self , sentry_units , port = 22 , timeout = 15 , expect_success = True ) : for unit in sentry_units : host = unit . info [ 'public-address' ] connected = self . port_knock_tcp ( host , port , timeout ) if not connected and expect_success : return 'Socket connect failed.' elif connected and not ex...
Open a TCP socket to check for a listening sevice on each listed juju unit .
245,273
def wait_on_action ( self , action_id , _check_output = subprocess . check_output ) : data = amulet . actions . get_action_output ( action_id , full_output = True ) return data . get ( u"status" ) == "completed"
Wait for a given action returning if it completed or not .
245,274
def status_get ( self , unit ) : raw_status , return_code = unit . run ( "status-get --format=json --include-data" ) if return_code != 0 : return ( "unknown" , "" ) status = json . loads ( raw_status ) return ( status [ "status" ] , status [ "message" ] )
Return the current service status of this unit .
245,275
def execute ( self , sql ) : cursor = self . connection . cursor ( ) try : cursor . execute ( sql ) finally : cursor . close ( )
Execute arbitary SQL against the database .
245,276
def select ( self , sql ) : cursor = self . connection . cursor ( ) try : cursor . execute ( sql ) results = [ list ( i ) for i in cursor . fetchall ( ) ] finally : cursor . close ( ) return results
Execute arbitrary SQL select query against the database and return the results .
245,277
def migrate_passwords_to_leader_storage ( self , excludes = None ) : if not is_leader ( ) : log ( "Skipping password migration as not the lead unit" , level = DEBUG ) return dirname = os . path . dirname ( self . root_passwd_file_template ) path = os . path . join ( dirname , '*.passwd' ) for f in glob . glob ( path ) ...
Migrate any passwords storage on disk to leader storage .
245,278
def get_mysql_password_on_disk ( self , username = None , password = None ) : if username : template = self . user_passwd_file_template passwd_file = template . format ( username ) else : passwd_file = self . root_passwd_file_template _password = None if os . path . exists ( passwd_file ) : log ( "Using existing passwo...
Retrieve generate or store a mysql password for the provided username on disk .
245,279
def passwd_keys ( self , username ) : keys = [ ] if username == 'mysql' : log ( "Bad username '%s'" % ( username ) , level = WARNING ) if username : keys . append ( 'mysql-%s.passwd' % ( username ) ) keys . append ( '%s.passwd' % ( username ) ) else : keys . append ( 'mysql.passwd' ) for key in keys : yield key
Generator to return keys used to store passwords in peer store .
245,280
def get_mysql_password ( self , username = None , password = None ) : excludes = [ ] try : for key in self . passwd_keys ( username ) : _password = leader_get ( key ) if _password : break if _password and not username : excludes . append ( self . root_passwd_file_template ) except ValueError : _password = None if not _...
Retrieve generate or store a mysql password for the provided username using peer relation cluster .
245,281
def set_mysql_password ( self , username , password ) : if username is None : username = 'root' rel_username = None if username == 'root' else username cur_passwd = self . get_mysql_password ( rel_username ) new_passwd = password try : self . connect ( user = username , password = cur_passwd ) cursor = self . connectio...
Update a mysql password for the provided username changing the leader settings
245,282
def get_allowed_units ( self , database , username , relation_id = None ) : self . connect ( password = self . get_mysql_root_password ( ) ) allowed_units = set ( ) for unit in related_units ( relation_id ) : settings = relation_get ( rid = relation_id , unit = unit ) for attr in [ "%s_hostname" % ( database ) , 'hostn...
Get list of units with access grants for database with username .
245,283
def configure_db ( self , hostname , database , username , admin = False ) : self . connect ( password = self . get_mysql_root_password ( ) ) if not self . database_exists ( database ) : self . create_database ( database ) remote_ip = self . normalize_address ( hostname ) password = self . get_mysql_password ( username...
Configure access to database for username from hostname .
245,284
def human_to_bytes ( self , human ) : num_re = re . compile ( '^[0-9]+$' ) if num_re . match ( human ) : return human factors = { 'K' : 1024 , 'M' : 1048576 , 'G' : 1073741824 , 'T' : 1099511627776 } modifier = human [ - 1 ] if modifier in factors : return int ( human [ : - 1 ] ) * factors [ modifier ] if modifier == '...
Convert human readable configuration options to bytes .
245,285
def sys_mem_limit ( self ) : if platform . machine ( ) in [ 'armv7l' ] : _mem_limit = self . human_to_bytes ( '2700M' ) else : _mem_limit = self . human_to_bytes ( '4G' ) return _mem_limit
Determine the default memory limit for the current service unit .
245,286
def get_mem_total ( self ) : with open ( '/proc/meminfo' ) as meminfo_file : for line in meminfo_file : key , mem = line . split ( ':' , 2 ) if key == 'MemTotal' : mtot , modifier = mem . strip ( ) . split ( ' ' ) return '%s%s' % ( mtot , modifier [ 0 ] . upper ( ) )
Calculate the total memory in the current service unit .
245,287
def parse_config ( self ) : config = config_get ( ) mysql_config = { } if 'max-connections' in config : mysql_config [ 'max_connections' ] = config [ 'max-connections' ] if 'wait-timeout' in config : mysql_config [ 'wait_timeout' ] = config [ 'wait-timeout' ] if 'innodb-flush-log-at-trx-commit' in config : mysql_config...
Parse charm configuration and calculate values for config files .
245,288
def create_loopback ( file_path ) : file_path = os . path . abspath ( file_path ) check_call ( [ 'losetup' , '--find' , file_path ] ) for d , f in six . iteritems ( loopback_devices ( ) ) : if f == file_path : return d
Create a loopback device for a given backing file .
245,289
def ensure_loopback_device ( path , size ) : for d , f in six . iteritems ( loopback_devices ( ) ) : if f == path : return d if not os . path . exists ( path ) : cmd = [ 'truncate' , '--size' , size , path ] check_call ( cmd ) return create_loopback ( path )
Ensure a loopback device exists for a given backing file path and size . If it a loopback device is not mapped to file a new one will be created .
245,290
def leader_get ( attribute = None , rid = None ) : migration_key = '__leader_get_migrated_settings__' if not is_leader ( ) : return _leader_get ( attribute = attribute ) settings_migrated = False leader_settings = _leader_get ( attribute = attribute ) previously_migrated = _leader_get ( attribute = migration_key ) if p...
Wrapper to ensure that settings are migrated from the peer relation .
245,291
def relation_set ( relation_id = None , relation_settings = None , ** kwargs ) : try : if relation_id in relation_ids ( 'cluster' ) : return leader_set ( settings = relation_settings , ** kwargs ) else : raise NotImplementedError except NotImplementedError : return _relation_set ( relation_id = relation_id , relation_s...
Attempt to use leader - set if supported in the current version of Juju otherwise falls back on relation - set .
245,292
def relation_get ( attribute = None , unit = None , rid = None ) : try : if rid in relation_ids ( 'cluster' ) : return leader_get ( attribute , rid ) else : raise NotImplementedError except NotImplementedError : return _relation_get ( attribute = attribute , rid = rid , unit = unit )
Attempt to use leader - get if supported in the current version of Juju otherwise falls back on relation - get .
245,293
def peer_retrieve ( key , relation_name = 'cluster' ) : cluster_rels = relation_ids ( relation_name ) if len ( cluster_rels ) > 0 : cluster_rid = cluster_rels [ 0 ] return relation_get ( attribute = key , rid = cluster_rid , unit = local_unit ( ) ) else : raise ValueError ( 'Unable to detect' 'peer relation {}' . forma...
Retrieve a named key from peer relation relation_name .
245,294
def peer_echo ( includes = None , force = False ) : try : is_leader ( ) except NotImplementedError : pass else : if not force : return relation_get = _relation_get relation_set = _relation_set rdata = relation_get ( ) echo_data = { } if includes is None : echo_data = rdata . copy ( ) for ex in [ 'private-address' , 'pu...
Echo filtered attributes back onto the same relation for storage .
245,295
def peer_store_and_set ( relation_id = None , peer_relation_name = 'cluster' , peer_store_fatal = False , relation_settings = None , delimiter = '_' , ** kwargs ) : relation_settings = relation_settings if relation_settings else { } relation_set ( relation_id = relation_id , relation_settings = relation_settings , ** k...
Store passed - in arguments both in argument relation and in peer storage .
245,296
def sed ( filename , before , after , flags = 'g' ) : expression = r's/{0}/{1}/{2}' . format ( before , after , flags ) return subprocess . check_call ( [ "sed" , "-i" , "-r" , "-e" , expression , os . path . expanduser ( filename ) ] )
Search and replaces the given pattern on filename .
245,297
def get_listening ( self , listen = [ '0.0.0.0' ] ) : if listen == [ '0.0.0.0' ] : return listen value = [ ] for network in listen : try : ip = get_address_in_network ( network = network , fatal = True ) except ValueError : if is_ip ( network ) : ip = network else : try : ip = get_iface_addr ( iface = network , fatal =...
Returns a list of addresses SSH can list on
245,298
def get_loader ( templates_dir , os_release ) : tmpl_dirs = [ ( rel , os . path . join ( templates_dir , rel ) ) for rel in six . itervalues ( OPENSTACK_CODENAMES ) ] if not os . path . isdir ( templates_dir ) : log ( 'Templates directory not found @ %s.' % templates_dir , level = ERROR ) raise OSConfigException loader...
Create a jinja2 . ChoiceLoader containing template dirs up to and including os_release . If directory template directory is missing at templates_dir it will be omitted from the loader . templates_dir is added to the bottom of the search list as a base loading dir .
245,299
def complete_contexts ( self ) : if self . _complete_contexts : return self . _complete_contexts self . context ( ) return self . _complete_contexts
Return a list of interfaces that have satisfied contexts .