idx
int64
0
252k
question
stringlengths
48
5.28k
target
stringlengths
5
1.23k
245,400
def create_cirros_image ( self , glance , image_name , hypervisor_type = None ) : self . log . warn ( '/!\\ DEPRECATION WARNING: use ' 'glance_create_image instead of ' 'create_cirros_image.' ) self . log . debug ( 'Creating glance cirros image ' '({})...' . format ( image_name ) ) http_proxy = os . getenv ( 'AMULET_HTTP_PROXY' ) self . log . debug ( 'AMULET_HTTP_PROXY: {}' . format ( http_proxy ) ) if http_proxy : proxies = { 'http' : http_proxy } opener = urllib . FancyURLopener ( proxies ) else : opener = urllib . FancyURLopener ( ) f = opener . open ( 'http://download.cirros-cloud.net/version/released' ) version = f . read ( ) . strip ( ) cirros_img = 'cirros-{}-x86_64-disk.img' . format ( version ) cirros_url = 'http://{}/{}/{}' . format ( 'download.cirros-cloud.net' , version , cirros_img ) f . close ( ) return self . glance_create_image ( glance , image_name , cirros_url , hypervisor_type = hypervisor_type )
Download the latest cirros image and upload it to glance validate and return a resource pointer .
245,401
def delete_image ( self , glance , image ) : self . log . warn ( '/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_image.' ) self . log . debug ( 'Deleting glance image ({})...' . format ( image ) ) return self . delete_resource ( glance . images , image , msg = 'glance image' )
Delete the specified image .
245,402
def create_instance ( self , nova , image_name , instance_name , flavor ) : self . log . debug ( 'Creating instance ' '({}|{}|{})' . format ( instance_name , image_name , flavor ) ) image = nova . glance . find_image ( image_name ) flavor = nova . flavors . find ( name = flavor ) instance = nova . servers . create ( name = instance_name , image = image , flavor = flavor ) count = 1 status = instance . status while status != 'ACTIVE' and count < 60 : time . sleep ( 3 ) instance = nova . servers . get ( instance . id ) status = instance . status self . log . debug ( 'instance status: {}' . format ( status ) ) count += 1 if status != 'ACTIVE' : self . log . error ( 'instance creation timed out' ) return None return instance
Create the specified instance .
245,403
def delete_instance ( self , nova , instance ) : self . log . warn ( '/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_instance.' ) self . log . debug ( 'Deleting instance ({})...' . format ( instance ) ) return self . delete_resource ( nova . servers , instance , msg = 'nova instance' )
Delete the specified instance .
245,404
def create_or_get_keypair ( self , nova , keypair_name = "testkey" ) : try : _keypair = nova . keypairs . get ( keypair_name ) self . log . debug ( 'Keypair ({}) already exists, ' 'using it.' . format ( keypair_name ) ) return _keypair except Exception : self . log . debug ( 'Keypair ({}) does not exist, ' 'creating it.' . format ( keypair_name ) ) _keypair = nova . keypairs . create ( name = keypair_name ) return _keypair
Create a new keypair or return pointer if it already exists .
245,405
def create_cinder_volume ( self , cinder , vol_name = "demo-vol" , vol_size = 1 , img_id = None , src_vol_id = None , snap_id = None ) : if img_id and not src_vol_id and not snap_id : self . log . debug ( 'Creating cinder volume from glance image...' ) bootable = 'true' elif src_vol_id and not img_id and not snap_id : self . log . debug ( 'Cloning cinder volume...' ) bootable = cinder . volumes . get ( src_vol_id ) . bootable elif snap_id and not src_vol_id and not img_id : self . log . debug ( 'Creating cinder volume from snapshot...' ) snap = cinder . volume_snapshots . find ( id = snap_id ) vol_size = snap . size snap_vol_id = cinder . volume_snapshots . get ( snap_id ) . volume_id bootable = cinder . volumes . get ( snap_vol_id ) . bootable elif not img_id and not src_vol_id and not snap_id : self . log . debug ( 'Creating cinder volume...' ) bootable = 'false' else : msg = ( 'Invalid method use - name:{} size:{} img_id:{} ' 'src_vol_id:{} snap_id:{}' . format ( vol_name , vol_size , img_id , src_vol_id , snap_id ) ) amulet . raise_status ( amulet . FAIL , msg = msg ) try : vol_new = cinder . volumes . create ( display_name = vol_name , imageRef = img_id , size = vol_size , source_volid = src_vol_id , snapshot_id = snap_id ) vol_id = vol_new . id except TypeError : vol_new = cinder . volumes . create ( name = vol_name , imageRef = img_id , size = vol_size , source_volid = src_vol_id , snapshot_id = snap_id ) vol_id = vol_new . id except Exception as e : msg = 'Failed to create volume: {}' . format ( e ) amulet . raise_status ( amulet . FAIL , msg = msg ) ret = self . resource_reaches_status ( cinder . volumes , vol_id , expected_stat = "available" , msg = "Volume status wait" ) if not ret : msg = 'Cinder volume failed to reach expected state.' amulet . raise_status ( amulet . FAIL , msg = msg ) self . log . debug ( 'Validating volume attributes...' ) val_vol_name = self . _get_cinder_obj_name ( cinder . volumes . get ( vol_id ) ) val_vol_boot = cinder . volumes . get ( vol_id ) . bootable val_vol_stat = cinder . volumes . get ( vol_id ) . status val_vol_size = cinder . volumes . get ( vol_id ) . size msg_attr = ( 'Volume attributes - name:{} id:{} stat:{} boot:' '{} size:{}' . format ( val_vol_name , vol_id , val_vol_stat , val_vol_boot , val_vol_size ) ) if val_vol_boot == bootable and val_vol_stat == 'available' and val_vol_name == vol_name and val_vol_size == vol_size : self . log . debug ( msg_attr ) else : msg = ( 'Volume validation failed, {}' . format ( msg_attr ) ) amulet . raise_status ( amulet . FAIL , msg = msg ) return vol_new
Create cinder volume optionally from a glance image OR optionally as a clone of an existing volume OR optionally from a snapshot . Wait for the new volume status to reach the expected status validate and return a resource pointer .
245,406
def delete_resource ( self , resource , resource_id , msg = "resource" , max_wait = 120 ) : self . log . debug ( 'Deleting OpenStack resource ' '{} ({})' . format ( resource_id , msg ) ) num_before = len ( list ( resource . list ( ) ) ) resource . delete ( resource_id ) tries = 0 num_after = len ( list ( resource . list ( ) ) ) while num_after != ( num_before - 1 ) and tries < ( max_wait / 4 ) : self . log . debug ( '{} delete check: ' '{} [{}:{}] {}' . format ( msg , tries , num_before , num_after , resource_id ) ) time . sleep ( 4 ) num_after = len ( list ( resource . list ( ) ) ) tries += 1 self . log . debug ( '{}: expected, actual count = {}, ' '{}' . format ( msg , num_before - 1 , num_after ) ) if num_after == ( num_before - 1 ) : return True else : self . log . error ( '{} delete timed out' . format ( msg ) ) return False
Delete one openstack resource such as one instance keypair image volume stack etc . and confirm deletion within max wait time .
245,407
def resource_reaches_status ( self , resource , resource_id , expected_stat = 'available' , msg = 'resource' , max_wait = 120 ) : tries = 0 resource_stat = resource . get ( resource_id ) . status while resource_stat != expected_stat and tries < ( max_wait / 4 ) : self . log . debug ( '{} status check: ' '{} [{}:{}] {}' . format ( msg , tries , resource_stat , expected_stat , resource_id ) ) time . sleep ( 4 ) resource_stat = resource . get ( resource_id ) . status tries += 1 self . log . debug ( '{}: expected, actual status = {}, ' '{}' . format ( msg , resource_stat , expected_stat ) ) if resource_stat == expected_stat : return True else : self . log . debug ( '{} never reached expected status: ' '{}' . format ( resource_id , expected_stat ) ) return False
Wait for an openstack resources status to reach an expected status within a specified time . Useful to confirm that nova instances cinder vols snapshots glance images heat stacks and other resources eventually reach the expected status .
245,408
def get_ceph_pools ( self , sentry_unit ) : pools = { } cmd = 'sudo ceph osd lspools' output , code = sentry_unit . run ( cmd ) if code != 0 : msg = ( '{} `{}` returned {} ' '{}' . format ( sentry_unit . info [ 'unit_name' ] , cmd , code , output ) ) amulet . raise_status ( amulet . FAIL , msg = msg ) output = output . replace ( "\n" , "," ) for pool in str ( output ) . split ( ',' ) : pool_id_name = pool . split ( ' ' ) if len ( pool_id_name ) == 2 : pool_id = pool_id_name [ 0 ] pool_name = pool_id_name [ 1 ] pools [ pool_name ] = int ( pool_id ) self . log . debug ( 'Pools on {}: {}' . format ( sentry_unit . info [ 'unit_name' ] , pools ) ) return pools
Return a dict of ceph pools from a single ceph unit with pool name as keys pool id as vals .
245,409
def get_ceph_df ( self , sentry_unit ) : cmd = 'sudo ceph df --format=json' output , code = sentry_unit . run ( cmd ) if code != 0 : msg = ( '{} `{}` returned {} ' '{}' . format ( sentry_unit . info [ 'unit_name' ] , cmd , code , output ) ) amulet . raise_status ( amulet . FAIL , msg = msg ) return json . loads ( output )
Return dict of ceph df json output including ceph pool state .
245,410
def get_ceph_pool_sample ( self , sentry_unit , pool_id = 0 ) : df = self . get_ceph_df ( sentry_unit ) for pool in df [ 'pools' ] : if pool [ 'id' ] == pool_id : pool_name = pool [ 'name' ] obj_count = pool [ 'stats' ] [ 'objects' ] kb_used = pool [ 'stats' ] [ 'kb_used' ] self . log . debug ( 'Ceph {} pool (ID {}): {} objects, ' '{} kb used' . format ( pool_name , pool_id , obj_count , kb_used ) ) return pool_name , obj_count , kb_used
Take a sample of attributes of a ceph pool returning ceph pool name object count and disk space used for the specified pool ID number .
245,411
def validate_ceph_pool_samples ( self , samples , sample_type = "resource pool" ) : original , created , deleted = range ( 3 ) if samples [ created ] <= samples [ original ] or samples [ deleted ] >= samples [ created ] : return ( 'Ceph {} samples ({}) ' 'unexpected.' . format ( sample_type , samples ) ) else : self . log . debug ( 'Ceph {} samples (OK): ' '{}' . format ( sample_type , samples ) ) return None
Validate ceph pool samples taken over time such as pool object counts or pool kb used before adding after adding and after deleting items which affect those pool attributes . The 2nd element is expected to be greater than the 1st ; 3rd is expected to be less than the 2nd .
245,412
def rmq_wait_for_cluster ( self , deployment , init_sleep = 15 , timeout = 1200 ) : if init_sleep : time . sleep ( init_sleep ) message = re . compile ( '^Unit is ready and clustered$' ) deployment . _auto_wait_for_status ( message = message , timeout = timeout , include_only = [ 'rabbitmq-server' ] )
Wait for rmq units extended status to show cluster readiness after an optional initial sleep period . Initial sleep is likely necessary to be effective following a config change as status message may not instantly update to non - ready .
245,413
def get_rmq_cluster_status ( self , sentry_unit ) : cmd = 'rabbitmqctl cluster_status' output , _ = self . run_cmd_unit ( sentry_unit , cmd ) self . log . debug ( '{} cluster_status:\n{}' . format ( sentry_unit . info [ 'unit_name' ] , output ) ) return str ( output )
Execute rabbitmq cluster status command on a unit and return the full output .
245,414
def get_rmq_cluster_running_nodes ( self , sentry_unit ) : str_stat = self . get_rmq_cluster_status ( sentry_unit ) if 'running_nodes' in str_stat : pos_start = str_stat . find ( "{running_nodes," ) + 15 pos_end = str_stat . find ( "]}," , pos_start ) + 1 str_run_nodes = str_stat [ pos_start : pos_end ] . replace ( "'" , '"' ) run_nodes = json . loads ( str_run_nodes ) return run_nodes else : return [ ]
Parse rabbitmqctl cluster_status output string return list of running rabbitmq cluster nodes .
245,415
def validate_rmq_cluster_running_nodes ( self , sentry_units ) : host_names = self . get_unit_hostnames ( sentry_units ) errors = [ ] for query_unit in sentry_units : query_unit_name = query_unit . info [ 'unit_name' ] running_nodes = self . get_rmq_cluster_running_nodes ( query_unit ) for validate_unit in sentry_units : val_host_name = host_names [ validate_unit . info [ 'unit_name' ] ] val_node_name = 'rabbit@{}' . format ( val_host_name ) if val_node_name not in running_nodes : errors . append ( 'Cluster member check failed on {}: {} not ' 'in {}\n' . format ( query_unit_name , val_node_name , running_nodes ) ) if errors : return '' . join ( errors )
Check that all rmq unit hostnames are represented in the cluster_status output of all units .
245,416
def rmq_ssl_is_enabled_on_unit ( self , sentry_unit , port = None ) : host = sentry_unit . info [ 'public-address' ] unit_name = sentry_unit . info [ 'unit_name' ] conf_file = '/etc/rabbitmq/rabbitmq.config' conf_contents = str ( self . file_contents_safe ( sentry_unit , conf_file , max_wait = 16 ) ) conf_ssl = 'ssl' in conf_contents conf_port = str ( port ) in conf_contents if port and conf_port and conf_ssl : self . log . debug ( 'SSL is enabled @{}:{} ' '({})' . format ( host , port , unit_name ) ) return True elif port and not conf_port and conf_ssl : self . log . debug ( 'SSL is enabled @{} but not on port {} ' '({})' . format ( host , port , unit_name ) ) return False elif not port and conf_ssl : self . log . debug ( 'SSL is enabled @{}:{} ' '({})' . format ( host , port , unit_name ) ) return True elif not conf_ssl : self . log . debug ( 'SSL not enabled @{}:{} ' '({})' . format ( host , port , unit_name ) ) return False else : msg = ( 'Unknown condition when checking SSL status @{}:{} ' '({})' . format ( host , port , unit_name ) ) amulet . raise_status ( amulet . FAIL , msg )
Check a single juju rmq unit for ssl and port in the config file .
245,417
def validate_rmq_ssl_enabled_units ( self , sentry_units , port = None ) : for sentry_unit in sentry_units : if not self . rmq_ssl_is_enabled_on_unit ( sentry_unit , port = port ) : return ( 'Unexpected condition: ssl is disabled on unit ' '({})' . format ( sentry_unit . info [ 'unit_name' ] ) ) return None
Check that ssl is enabled on rmq juju sentry units .
245,418
def validate_rmq_ssl_disabled_units ( self , sentry_units ) : for sentry_unit in sentry_units : if self . rmq_ssl_is_enabled_on_unit ( sentry_unit ) : return ( 'Unexpected condition: ssl is enabled on unit ' '({})' . format ( sentry_unit . info [ 'unit_name' ] ) ) return None
Check that ssl is enabled on listed rmq juju sentry units .
245,419
def configure_rmq_ssl_on ( self , sentry_units , deployment , port = None , max_wait = 60 ) : self . log . debug ( 'Setting ssl charm config option: on' ) config = { 'ssl' : 'on' } if port : config [ 'ssl_port' ] = port deployment . d . configure ( 'rabbitmq-server' , config ) self . rmq_wait_for_cluster ( deployment ) tries = 0 ret = self . validate_rmq_ssl_enabled_units ( sentry_units , port = port ) while ret and tries < ( max_wait / 4 ) : time . sleep ( 4 ) self . log . debug ( 'Attempt {}: {}' . format ( tries , ret ) ) ret = self . validate_rmq_ssl_enabled_units ( sentry_units , port = port ) tries += 1 if ret : amulet . raise_status ( amulet . FAIL , ret )
Turn ssl charm config option on with optional non - default ssl port specification . Confirm that it is enabled on every unit .
245,420
def configure_rmq_ssl_off ( self , sentry_units , deployment , max_wait = 60 ) : self . log . debug ( 'Setting ssl charm config option: off' ) config = { 'ssl' : 'off' } deployment . d . configure ( 'rabbitmq-server' , config ) self . rmq_wait_for_cluster ( deployment ) tries = 0 ret = self . validate_rmq_ssl_disabled_units ( sentry_units ) while ret and tries < ( max_wait / 4 ) : time . sleep ( 4 ) self . log . debug ( 'Attempt {}: {}' . format ( tries , ret ) ) ret = self . validate_rmq_ssl_disabled_units ( sentry_units ) tries += 1 if ret : amulet . raise_status ( amulet . FAIL , ret )
Turn ssl charm config option off confirm that it is disabled on every unit .
245,421
def connect_amqp_by_unit ( self , sentry_unit , ssl = False , port = None , fatal = True , username = "testuser1" , password = "changeme" ) : host = sentry_unit . info [ 'public-address' ] unit_name = sentry_unit . info [ 'unit_name' ] if ssl and not port : port = 5671 elif not ssl and not port : port = 5672 self . log . debug ( 'Connecting to amqp on {}:{} ({}) as ' '{}...' . format ( host , port , unit_name , username ) ) try : credentials = pika . PlainCredentials ( username , password ) parameters = pika . ConnectionParameters ( host = host , port = port , credentials = credentials , ssl = ssl , connection_attempts = 3 , retry_delay = 5 , socket_timeout = 1 ) connection = pika . BlockingConnection ( parameters ) assert connection . is_open is True assert connection . is_closing is False self . log . debug ( 'Connect OK' ) return connection except Exception as e : msg = ( 'amqp connection failed to {}:{} as ' '{} ({})' . format ( host , port , username , str ( e ) ) ) if fatal : amulet . raise_status ( amulet . FAIL , msg ) else : self . log . warn ( msg ) return None
Establish and return a pika amqp connection to the rabbitmq service running on a rmq juju unit .
245,422
def publish_amqp_message_by_unit ( self , sentry_unit , message , queue = "test" , ssl = False , username = "testuser1" , password = "changeme" , port = None ) : self . log . debug ( 'Publishing message to {} queue:\n{}' . format ( queue , message ) ) connection = self . connect_amqp_by_unit ( sentry_unit , ssl = ssl , port = port , username = username , password = password ) self . log . debug ( 'Defining channel...' ) channel = connection . channel ( ) self . log . debug ( 'Declaring queue...' ) channel . queue_declare ( queue = queue , auto_delete = False , durable = True ) self . log . debug ( 'Publishing message...' ) channel . basic_publish ( exchange = '' , routing_key = queue , body = message ) self . log . debug ( 'Closing channel...' ) channel . close ( ) self . log . debug ( 'Closing connection...' ) connection . close ( )
Publish an amqp message to a rmq juju unit .
245,423
def get_amqp_message_by_unit ( self , sentry_unit , queue = "test" , username = "testuser1" , password = "changeme" , ssl = False , port = None ) : connection = self . connect_amqp_by_unit ( sentry_unit , ssl = ssl , port = port , username = username , password = password ) channel = connection . channel ( ) method_frame , _ , body = channel . basic_get ( queue ) if method_frame : self . log . debug ( 'Retreived message from {} queue:\n{}' . format ( queue , body ) ) channel . basic_ack ( method_frame . delivery_tag ) channel . close ( ) connection . close ( ) return body else : msg = 'No message retrieved.' amulet . raise_status ( amulet . FAIL , msg )
Get an amqp message from a rmq juju unit .
245,424
def validate_memcache ( self , sentry_unit , conf , os_release , earliest_release = 5 , section = 'keystone_authtoken' , check_kvs = None ) : if os_release < earliest_release : self . log . debug ( 'Skipping memcache checks for deployment. {} <' 'mitaka' . format ( os_release ) ) return _kvs = check_kvs or { 'memcached_servers' : 'inet6:[::1]:11211' } self . log . debug ( 'Checking memcached is running' ) ret = self . validate_services_by_name ( { sentry_unit : [ 'memcached' ] } ) if ret : amulet . raise_status ( amulet . FAIL , msg = 'Memcache running check' 'failed {}' . format ( ret ) ) else : self . log . debug ( 'OK' ) self . log . debug ( 'Checking memcache url is configured in {}' . format ( conf ) ) if self . validate_config_data ( sentry_unit , conf , section , _kvs ) : message = "Memcache config error in: {}" . format ( conf ) amulet . raise_status ( amulet . FAIL , msg = message ) else : self . log . debug ( 'OK' ) self . log . debug ( 'Checking memcache configuration in ' '/etc/memcached.conf' ) contents = self . file_contents_safe ( sentry_unit , '/etc/memcached.conf' , fatal = True ) ubuntu_release , _ = self . run_cmd_unit ( sentry_unit , 'lsb_release -cs' ) if CompareHostReleases ( ubuntu_release ) <= 'trusty' : memcache_listen_addr = 'ip6-localhost' else : memcache_listen_addr = '::1' expected = { '-p' : '11211' , '-l' : memcache_listen_addr } found = [ ] for key , value in expected . items ( ) : for line in contents . split ( '\n' ) : if line . startswith ( key ) : self . log . debug ( 'Checking {} is set to {}' . format ( key , value ) ) assert value == line . split ( ) [ - 1 ] self . log . debug ( line . split ( ) [ - 1 ] ) found . append ( key ) if sorted ( found ) == sorted ( expected . keys ( ) ) : self . log . debug ( 'OK' ) else : message = "Memcache config error in: /etc/memcached.conf" amulet . raise_status ( amulet . FAIL , msg = message )
Check Memcache is running and is configured to be used
245,425
def acquire ( self , lock ) : unit = hookenv . local_unit ( ) ts = self . requests [ unit ] . get ( lock ) if not ts : self . requests . setdefault ( lock , { } ) self . requests [ unit ] [ lock ] = _timestamp ( ) self . msg ( 'Requested {}' . format ( lock ) ) if self . granted ( lock ) : self . msg ( 'Acquired {}' . format ( lock ) ) return True if hookenv . is_leader ( ) : return self . grant ( lock , unit ) return False
Acquire the named lock non - blocking .
245,426
def granted ( self , lock ) : unit = hookenv . local_unit ( ) ts = self . requests [ unit ] . get ( lock ) if ts and self . grants . get ( unit , { } ) . get ( lock ) == ts : return True return False
Return True if a previously requested lock has been granted
245,427
def request_timestamp ( self , lock ) : ts = self . requests [ hookenv . local_unit ( ) ] . get ( lock , None ) if ts is not None : return datetime . strptime ( ts , _timestamp_format )
Return the timestamp of our outstanding request for lock or None .
245,428
def grant ( self , lock , unit ) : if not hookenv . is_leader ( ) : return False granted = set ( ) for u in self . grants : if lock in self . grants [ u ] : granted . add ( u ) if unit in granted : return True reqs = set ( ) for u in self . requests : if u in granted : continue for _lock , ts in self . requests [ u ] . items ( ) : if _lock == lock : reqs . add ( ( ts , u ) ) queue = [ t [ 1 ] for t in sorted ( reqs ) ] if unit not in queue : return False grant_func = getattr ( self , 'grant_{}' . format ( lock ) , self . default_grant ) if grant_func ( lock , unit , granted , queue ) : self . msg ( 'Leader grants {} to {}' . format ( lock , unit ) ) self . grants . setdefault ( unit , { } ) [ lock ] = self . requests [ unit ] [ lock ] return True return False
Maybe grant the lock to a unit .
245,429
def released ( self , unit , lock , timestamp ) : interval = _utcnow ( ) - timestamp self . msg ( 'Leader released {} from {}, held {}' . format ( lock , unit , interval ) )
Called on the leader when it has released a lock .
245,430
def require ( self , lock , guard_func , * guard_args , ** guard_kw ) : def decorator ( f ) : @ wraps ( f ) def wrapper ( * args , ** kw ) : if self . granted ( lock ) : self . msg ( 'Granted {}' . format ( lock ) ) return f ( * args , ** kw ) if guard_func ( * guard_args , ** guard_kw ) and self . acquire ( lock ) : return f ( * args , ** kw ) return None return wrapper return decorator
Decorate a function to be run only when a lock is acquired .
245,431
def msg ( self , msg ) : hookenv . log ( 'coordinator.{} {}' . format ( self . _name ( ) , msg ) , level = hookenv . INFO )
Emit a message . Override to customize log spam .
245,432
def deprecate ( warning , date = None , log = None ) : def wrap ( f ) : @ functools . wraps ( f ) def wrapped_f ( * args , ** kwargs ) : try : module = inspect . getmodule ( f ) file = inspect . getsourcefile ( f ) lines = inspect . getsourcelines ( f ) f_name = "{}-{}-{}..{}-{}" . format ( module . __name__ , file , lines [ 0 ] , lines [ - 1 ] , f . __name__ ) except ( IOError , TypeError ) : f_name = f . __name__ if f_name not in __deprecated_functions : __deprecated_functions [ f_name ] = True s = "DEPRECATION WARNING: Function {} is being removed" . format ( f . __name__ ) if date : s = "{} on/around {}" . format ( s , date ) if warning : s = "{} : {}" . format ( s , warning ) if log : log ( s ) else : print ( s ) return f ( * args , ** kwargs ) return wrapped_f return wrap
Add a deprecation warning the first time the function is used . The date which is a string in semi - ISO8660 format indicate the year - month that the function is officially going to be removed .
245,433
def download ( self , source , dest ) : proto , netloc , path , params , query , fragment = urlparse ( source ) if proto in ( 'http' , 'https' ) : auth , barehost = splituser ( netloc ) if auth is not None : source = urlunparse ( ( proto , barehost , path , params , query , fragment ) ) username , password = splitpasswd ( auth ) passman = HTTPPasswordMgrWithDefaultRealm ( ) passman . add_password ( None , source , username , password ) authhandler = HTTPBasicAuthHandler ( passman ) opener = build_opener ( authhandler ) install_opener ( opener ) response = urlopen ( source ) try : with open ( dest , 'wb' ) as dest_file : dest_file . write ( response . read ( ) ) except Exception as e : if os . path . isfile ( dest ) : os . unlink ( dest ) raise e
Download an archive file .
245,434
def install ( self , source , dest = None , checksum = None , hash_type = 'sha1' ) : url_parts = self . parse_url ( source ) dest_dir = os . path . join ( os . environ . get ( 'CHARM_DIR' ) , 'fetched' ) if not os . path . exists ( dest_dir ) : mkdir ( dest_dir , perms = 0o755 ) dld_file = os . path . join ( dest_dir , os . path . basename ( url_parts . path ) ) try : self . download ( source , dld_file ) except URLError as e : raise UnhandledSource ( e . reason ) except OSError as e : raise UnhandledSource ( e . strerror ) options = parse_qs ( url_parts . fragment ) for key , value in options . items ( ) : if not six . PY3 : algorithms = hashlib . algorithms else : algorithms = hashlib . algorithms_available if key in algorithms : if len ( value ) != 1 : raise TypeError ( "Expected 1 hash value, not %d" % len ( value ) ) expected = value [ 0 ] check_hash ( dld_file , expected , key ) if checksum : check_hash ( dld_file , checksum , hash_type ) return extract ( dld_file , dest )
Download and install an archive file with optional checksum validation .
245,435
def set_trace ( addr = DEFAULT_ADDR , port = DEFAULT_PORT ) : atexit . register ( close_port , port ) try : log ( "Starting a remote python debugger session on %s:%s" % ( addr , port ) ) open_port ( port ) debugger = Rpdb ( addr = addr , port = port ) debugger . set_trace ( sys . _getframe ( ) . f_back ) except Exception : _error ( "Cannot start a remote debug session on %s:%s" % ( addr , port ) )
Set a trace point using the remote debugger
245,436
def device_info ( device ) : status = subprocess . check_output ( [ 'ibstat' , device , '-s' ] ) . splitlines ( ) regexes = { "CA type: (.*)" : "device_type" , "Number of ports: (.*)" : "num_ports" , "Firmware version: (.*)" : "fw_ver" , "Hardware version: (.*)" : "hw_ver" , "Node GUID: (.*)" : "node_guid" , "System image GUID: (.*)" : "sys_guid" , } device = DeviceInfo ( ) for line in status : for expression , key in regexes . items ( ) : matches = re . search ( expression , line ) if matches : setattr ( device , key , matches . group ( 1 ) ) return device
Returns a DeviceInfo object with the current device settings
245,437
def ipoib_interfaces ( ) : interfaces = [ ] for interface in network_interfaces ( ) : try : driver = re . search ( '^driver: (.+)$' , subprocess . check_output ( [ 'ethtool' , '-i' , interface ] ) , re . M ) . group ( 1 ) if driver in IPOIB_DRIVERS : interfaces . append ( interface ) except Exception : log ( "Skipping interface %s" % interface , level = INFO ) continue return interfaces
Return a list of IPOIB capable ethernet interfaces
245,438
def get_audits ( ) : audits = [ TemplatedFile ( '/etc/login.defs' , LoginContext ( ) , template_dir = TEMPLATES_DIR , user = 'root' , group = 'root' , mode = 0o0444 ) ] return audits
Get OS hardening login . defs audits .
245,439
def _get_defaults ( modules ) : default = os . path . join ( os . path . dirname ( __file__ ) , 'defaults/%s.yaml' % ( modules ) ) return yaml . safe_load ( open ( default ) )
Load the default config for the provided modules .
245,440
def _get_schema ( modules ) : schema = os . path . join ( os . path . dirname ( __file__ ) , 'defaults/%s.yaml.schema' % ( modules ) ) return yaml . safe_load ( open ( schema ) )
Load the config schema for the provided modules .
245,441
def _get_user_provided_overrides ( modules ) : overrides = os . path . join ( os . environ [ 'JUJU_CHARM_DIR' ] , 'hardening.yaml' ) if os . path . exists ( overrides ) : log ( "Found user-provided config overrides file '%s'" % ( overrides ) , level = DEBUG ) settings = yaml . safe_load ( open ( overrides ) ) if settings and settings . get ( modules ) : log ( "Applying '%s' overrides" % ( modules ) , level = DEBUG ) return settings . get ( modules ) log ( "No overrides found for '%s'" % ( modules ) , level = DEBUG ) else : log ( "No hardening config overrides file '%s' found in charm " "root dir" % ( overrides ) , level = DEBUG ) return { }
Load user - provided config overrides .
245,442
def _apply_overrides ( settings , overrides , schema ) : if overrides : for k , v in six . iteritems ( overrides ) : if k in schema : if schema [ k ] is None : settings [ k ] = v elif type ( schema [ k ] ) is dict : settings [ k ] = _apply_overrides ( settings [ k ] , overrides [ k ] , schema [ k ] ) else : raise Exception ( "Unexpected type found in schema '%s'" % type ( schema [ k ] ) , level = ERROR ) else : log ( "Unknown override key '%s' - ignoring" % ( k ) , level = INFO ) return settings
Get overrides config overlayed onto modules defaults .
245,443
def ensure_permissions ( path , user , group , permissions , maxdepth = - 1 ) : if not os . path . exists ( path ) : log ( "File '%s' does not exist - cannot set permissions" % ( path ) , level = WARNING ) return _user = pwd . getpwnam ( user ) os . chown ( path , _user . pw_uid , grp . getgrnam ( group ) . gr_gid ) os . chmod ( path , permissions ) if maxdepth == 0 : log ( "Max recursion depth reached - skipping further recursion" , level = DEBUG ) return elif maxdepth > 0 : maxdepth -= 1 if os . path . isdir ( path ) : contents = glob . glob ( "%s/*" % ( path ) ) for c in contents : ensure_permissions ( c , user = user , group = group , permissions = permissions , maxdepth = maxdepth )
Ensure permissions for path .
245,444
def create ( sysctl_dict , sysctl_file , ignore = False ) : if type ( sysctl_dict ) is not dict : try : sysctl_dict_parsed = yaml . safe_load ( sysctl_dict ) except yaml . YAMLError : log ( "Error parsing YAML sysctl_dict: {}" . format ( sysctl_dict ) , level = ERROR ) return else : sysctl_dict_parsed = sysctl_dict with open ( sysctl_file , "w" ) as fd : for key , value in sysctl_dict_parsed . items ( ) : fd . write ( "{}={}\n" . format ( key , value ) ) log ( "Updating sysctl_file: {} values: {}" . format ( sysctl_file , sysctl_dict_parsed ) , level = DEBUG ) call = [ "sysctl" , "-p" , sysctl_file ] if ignore : call . append ( "-e" ) check_call ( call )
Creates a sysctl . conf file from a YAML associative array
245,445
def canonical_url ( configs , endpoint_type = PUBLIC ) : scheme = _get_scheme ( configs ) address = resolve_address ( endpoint_type ) if is_ipv6 ( address ) : address = "[{}]" . format ( address ) return '%s://%s' % ( scheme , address )
Returns the correct HTTP URL to this host given the state of HTTPS configuration hacluster and charm configuration .
245,446
def _get_address_override ( endpoint_type = PUBLIC ) : override_key = ADDRESS_MAP [ endpoint_type ] [ 'override' ] addr_override = config ( override_key ) if not addr_override : return None else : return addr_override . format ( service_name = service_name ( ) )
Returns any address overrides that the user has defined based on the endpoint type .
245,447
def resolve_address ( endpoint_type = PUBLIC , override = True ) : resolved_address = None if override : resolved_address = _get_address_override ( endpoint_type ) if resolved_address : return resolved_address vips = config ( 'vip' ) if vips : vips = vips . split ( ) net_type = ADDRESS_MAP [ endpoint_type ] [ 'config' ] net_addr = config ( net_type ) net_fallback = ADDRESS_MAP [ endpoint_type ] [ 'fallback' ] binding = ADDRESS_MAP [ endpoint_type ] [ 'binding' ] clustered = is_clustered ( ) if clustered and vips : if net_addr : for vip in vips : if is_address_in_network ( net_addr , vip ) : resolved_address = vip break else : try : bound_cidr = resolve_network_cidr ( network_get_primary_address ( binding ) ) for vip in vips : if is_address_in_network ( bound_cidr , vip ) : resolved_address = vip break except ( NotImplementedError , NoNetworkBinding ) : resolved_address = vips [ 0 ] else : if config ( 'prefer-ipv6' ) : fallback_addr = get_ipv6_addr ( exc_list = vips ) [ 0 ] else : fallback_addr = unit_get ( net_fallback ) if net_addr : resolved_address = get_address_in_network ( net_addr , fallback_addr ) else : try : resolved_address = network_get_primary_address ( binding ) except ( NotImplementedError , NoNetworkBinding ) : resolved_address = fallback_addr if resolved_address is None : raise ValueError ( "Unable to resolve a suitable IP address based on " "charm state and configuration. (net_type=%s, " "clustered=%s)" % ( net_type , clustered ) ) return resolved_address
Return unit address depending on net config .
245,448
def hugepage_support ( user , group = 'hugetlb' , nr_hugepages = 256 , max_map_count = 65536 , mnt_point = '/run/hugepages/kvm' , pagesize = '2MB' , mount = True , set_shmmax = False ) : group_info = add_group ( group ) gid = group_info . gr_gid add_user_to_group ( user , group ) if max_map_count < 2 * nr_hugepages : max_map_count = 2 * nr_hugepages sysctl_settings = { 'vm.nr_hugepages' : nr_hugepages , 'vm.max_map_count' : max_map_count , 'vm.hugetlb_shm_group' : gid , } if set_shmmax : shmmax_current = int ( check_output ( [ 'sysctl' , '-n' , 'kernel.shmmax' ] ) ) shmmax_minsize = bytes_from_string ( pagesize ) * nr_hugepages if shmmax_minsize > shmmax_current : sysctl_settings [ 'kernel.shmmax' ] = shmmax_minsize sysctl . create ( yaml . dump ( sysctl_settings ) , '/etc/sysctl.d/10-hugepage.conf' ) mkdir ( mnt_point , owner = 'root' , group = 'root' , perms = 0o755 , force = False ) lfstab = fstab . Fstab ( ) fstab_entry = lfstab . get_entry_by_attr ( 'mountpoint' , mnt_point ) if fstab_entry : lfstab . remove_entry ( fstab_entry ) entry = lfstab . Entry ( 'nodev' , mnt_point , 'hugetlbfs' , 'mode=1770,gid={},pagesize={}' . format ( gid , pagesize ) , 0 , 0 ) lfstab . add_entry ( entry ) if mount : fstab_mount ( mnt_point )
Enable hugepages on system .
245,449
def ensure_compliance ( self ) : if not self . modules : return try : loaded_modules = self . _get_loaded_modules ( ) non_compliant_modules = [ ] for module in self . modules : if module in loaded_modules : log ( "Module '%s' is enabled but should not be." % ( module ) , level = INFO ) non_compliant_modules . append ( module ) if len ( non_compliant_modules ) == 0 : return for module in non_compliant_modules : self . _disable_module ( module ) self . _restart_apache ( ) except subprocess . CalledProcessError as e : log ( 'Error occurred auditing apache module compliance. ' 'This may have been already reported. ' 'Output is: %s' % e . output , level = ERROR )
Ensures that the modules are not loaded .
245,450
def _get_loaded_modules ( ) : output = subprocess . check_output ( [ 'apache2ctl' , '-M' ] ) if six . PY3 : output = output . decode ( 'utf-8' ) modules = [ ] for line in output . splitlines ( ) : matcher = re . search ( r'^ (\S*)_module (\S*)' , line ) if matcher : modules . append ( matcher . group ( 1 ) ) return modules
Returns the modules which are enabled in Apache .
245,451
def _disable_module ( module ) : try : subprocess . check_call ( [ 'a2dismod' , module ] ) except subprocess . CalledProcessError as e : log ( 'Error occurred disabling module %s. ' 'Output is: %s' % ( module , e . output ) , level = ERROR )
Disables the specified module in Apache .
245,452
def get_template_path ( template_dir , path ) : return os . path . join ( template_dir , os . path . basename ( path ) )
Returns the template file which would be used to render the path .
245,453
def render_and_write ( template_dir , path , context ) : env = Environment ( loader = FileSystemLoader ( template_dir ) ) template_file = os . path . basename ( path ) template = env . get_template ( template_file ) log ( 'Rendering from template: %s' % template . name , level = DEBUG ) rendered_content = template . render ( context ) if not rendered_content : log ( "Render returned None - skipping '%s'" % path , level = WARNING ) return write ( path , rendered_content . encode ( 'utf-8' ) . strip ( ) ) log ( 'Wrote template %s' % path , level = DEBUG )
Renders the specified template into the file .
245,454
def get_audits ( ) : audits = [ AptConfig ( [ { 'key' : 'APT::Get::AllowUnauthenticated' , 'expected' : 'false' } ] ) ] settings = get_settings ( 'os' ) clean_packages = settings [ 'security' ] [ 'packages_clean' ] if clean_packages : security_packages = settings [ 'security' ] [ 'packages_list' ] if security_packages : audits . append ( RestrictedPackages ( security_packages ) ) return audits
Get OS hardening apt audits .
245,455
def get_audits ( ) : audits = [ ] settings = utils . get_settings ( 'os' ) if settings [ 'auth' ] [ 'pam_passwdqc_enable' ] : audits . append ( PasswdqcPAM ( '/etc/passwdqc.conf' ) ) if settings [ 'auth' ] [ 'retries' ] : audits . append ( Tally2PAM ( '/usr/share/pam-configs/tally2' ) ) else : audits . append ( DeletedFile ( '/usr/share/pam-configs/tally2' ) ) return audits
Get OS hardening PAM authentication audits .
245,456
def install_ansible_support ( from_ppa = True , ppa_location = 'ppa:rquillo/ansible' ) : if from_ppa : charmhelpers . fetch . add_source ( ppa_location ) charmhelpers . fetch . apt_update ( fatal = True ) charmhelpers . fetch . apt_install ( 'ansible' ) with open ( ansible_hosts_path , 'w+' ) as hosts_file : hosts_file . write ( 'localhost ansible_connection=local ansible_remote_tmp=/root/.ansible/tmp' )
Installs the ansible package .
245,457
def execute ( self , args ) : hook_name = os . path . basename ( args [ 0 ] ) extra_vars = None if hook_name in self . _actions : extra_vars = self . _actions [ hook_name ] ( args [ 1 : ] ) else : super ( AnsibleHooks , self ) . execute ( args ) charmhelpers . contrib . ansible . apply_playbook ( self . playbook_path , tags = [ hook_name ] , extra_vars = extra_vars )
Execute the hook followed by the playbook using the hook as tag .
245,458
def action ( self , * action_names ) : def action_wrapper ( decorated ) : @ functools . wraps ( decorated ) def wrapper ( argv ) : kwargs = dict ( arg . split ( '=' ) for arg in argv ) try : return decorated ( ** kwargs ) except TypeError as e : if decorated . __doc__ : e . args += ( decorated . __doc__ , ) raise self . register_action ( decorated . __name__ , wrapper ) if '_' in decorated . __name__ : self . register_action ( decorated . __name__ . replace ( '_' , '-' ) , wrapper ) return wrapper return action_wrapper
Decorator registering them as actions
245,459
def get_logger ( self , name = "deployment-logger" , level = logging . DEBUG ) : log = logging logger = log . getLogger ( name ) fmt = log . Formatter ( "%(asctime)s %(funcName)s " "%(levelname)s: %(message)s" ) handler = log . StreamHandler ( stream = sys . stdout ) handler . setLevel ( level ) handler . setFormatter ( fmt ) logger . addHandler ( handler ) logger . setLevel ( level ) return logger
Get a logger object that will log to stdout .
245,460
def _determine_branch_locations ( self , other_services ) : self . log . info ( 'OpenStackAmuletDeployment: determine branch locations' ) base_charms = { 'mysql' : [ 'trusty' ] , 'mongodb' : [ 'trusty' ] , 'nrpe' : [ 'trusty' , 'xenial' ] , } for svc in other_services : if svc . get ( 'location' ) : continue if svc [ 'name' ] in base_charms : target_series = self . series if self . series not in base_charms [ svc [ 'name' ] ] : target_series = base_charms [ svc [ 'name' ] ] [ - 1 ] svc [ 'location' ] = 'cs:{}/{}' . format ( target_series , svc [ 'name' ] ) elif self . stable : svc [ 'location' ] = 'cs:{}/{}' . format ( self . series , svc [ 'name' ] ) else : svc [ 'location' ] = 'cs:~openstack-charmers-next/{}/{}' . format ( self . series , svc [ 'name' ] ) return other_services
Determine the branch locations for the other services .
245,461
def _auto_wait_for_status ( self , message = None , exclude_services = None , include_only = None , timeout = None ) : if not timeout : timeout = int ( os . environ . get ( 'AMULET_SETUP_TIMEOUT' , 1800 ) ) self . log . info ( 'Waiting for extended status on units for {}s...' '' . format ( timeout ) ) all_services = self . d . services . keys ( ) if exclude_services and include_only : raise ValueError ( 'exclude_services can not be used ' 'with include_only' ) if message : if isinstance ( message , re . _pattern_type ) : match = message . pattern else : match = message self . log . debug ( 'Custom extended status wait match: ' '{}' . format ( match ) ) else : self . log . debug ( 'Default extended status wait match: contains ' 'READY (case-insensitive)' ) message = re . compile ( '.*ready.*' , re . IGNORECASE ) if exclude_services : self . log . debug ( 'Excluding services from extended status match: ' '{}' . format ( exclude_services ) ) else : exclude_services = [ ] if include_only : services = include_only else : services = list ( set ( all_services ) - set ( exclude_services ) ) self . log . debug ( 'Waiting up to {}s for extended status on services: ' '{}' . format ( timeout , services ) ) service_messages = { service : message for service in services } self . d . sentry . wait ( timeout = timeout ) self . d . sentry . wait_for_status ( self . d . juju_env , services , timeout = timeout ) self . d . sentry . wait_for_messages ( service_messages , timeout = timeout ) self . log . info ( 'OK' )
Wait for all units to have a specific extended status except for any defined as excluded . Unless specified via message any status containing any case of ready will be considered a match .
245,462
def _get_openstack_release ( self ) : for i , os_pair in enumerate ( OPENSTACK_RELEASES_PAIRS ) : setattr ( self , os_pair , i ) releases = { ( 'trusty' , None ) : self . trusty_icehouse , ( 'trusty' , 'cloud:trusty-kilo' ) : self . trusty_kilo , ( 'trusty' , 'cloud:trusty-liberty' ) : self . trusty_liberty , ( 'trusty' , 'cloud:trusty-mitaka' ) : self . trusty_mitaka , ( 'xenial' , None ) : self . xenial_mitaka , ( 'xenial' , 'cloud:xenial-newton' ) : self . xenial_newton , ( 'xenial' , 'cloud:xenial-ocata' ) : self . xenial_ocata , ( 'xenial' , 'cloud:xenial-pike' ) : self . xenial_pike , ( 'xenial' , 'cloud:xenial-queens' ) : self . xenial_queens , ( 'yakkety' , None ) : self . yakkety_newton , ( 'zesty' , None ) : self . zesty_ocata , ( 'artful' , None ) : self . artful_pike , ( 'bionic' , None ) : self . bionic_queens , ( 'bionic' , 'cloud:bionic-rocky' ) : self . bionic_rocky , ( 'bionic' , 'cloud:bionic-stein' ) : self . bionic_stein , ( 'cosmic' , None ) : self . cosmic_rocky , ( 'disco' , None ) : self . disco_stein , } return releases [ ( self . series , self . openstack ) ]
Get openstack release .
245,463
def _get_openstack_release_string ( self ) : releases = OrderedDict ( [ ( 'trusty' , 'icehouse' ) , ( 'xenial' , 'mitaka' ) , ( 'yakkety' , 'newton' ) , ( 'zesty' , 'ocata' ) , ( 'artful' , 'pike' ) , ( 'bionic' , 'queens' ) , ( 'cosmic' , 'rocky' ) , ( 'disco' , 'stein' ) , ] ) if self . openstack : os_origin = self . openstack . split ( ':' ) [ 1 ] return os_origin . split ( '%s-' % self . series ) [ 1 ] . split ( '/' ) [ 0 ] else : return releases [ self . series ]
Get openstack release string .
245,464
def get_ceph_expected_pools ( self , radosgw = False ) : if self . _get_openstack_release ( ) == self . trusty_icehouse : pools = [ 'data' , 'metadata' , 'rbd' , 'cinder-ceph' , 'glance' ] elif ( self . trusty_kilo <= self . _get_openstack_release ( ) <= self . zesty_ocata ) : pools = [ 'rbd' , 'cinder-ceph' , 'glance' ] else : pools = [ 'cinder-ceph' , 'glance' ] if radosgw : pools . extend ( [ '.rgw.root' , '.rgw.control' , '.rgw' , '.rgw.gc' , '.users.uid' ] ) return pools
Return a list of expected ceph pools in a ceph + cinder + glance test scenario based on OpenStack release and whether ceph radosgw is flagged as present or not .
245,465
def get_platform ( ) : tuple_platform = platform . linux_distribution ( ) current_platform = tuple_platform [ 0 ] if "Ubuntu" in current_platform : return "ubuntu" elif "CentOS" in current_platform : return "centos" elif "debian" in current_platform : return "ubuntu" else : raise RuntimeError ( "This module is not supported on {}." . format ( current_platform ) )
Return the current OS platform .
245,466
def current_version_string ( ) : return "{0}.{1}.{2}" . format ( sys . version_info . major , sys . version_info . minor , sys . version_info . micro )
Current system python version as string major . minor . micro
245,467
def get_audits ( ) : if subprocess . call ( [ 'which' , 'mysql' ] , stdout = subprocess . PIPE ) != 0 : log ( "MySQL does not appear to be installed on this node - " "skipping mysql hardening" , level = WARNING ) return [ ] settings = utils . get_settings ( 'mysql' ) hardening_settings = settings [ 'hardening' ] my_cnf = hardening_settings [ 'mysql-conf' ] audits = [ FilePermissionAudit ( paths = [ my_cnf ] , user = 'root' , group = 'root' , mode = 0o0600 ) , TemplatedFile ( hardening_settings [ 'hardening-conf' ] , MySQLConfContext ( ) , TEMPLATES_DIR , mode = 0o0750 , user = 'mysql' , group = 'root' , service_actions = [ { 'service' : 'mysql' , 'actions' : [ 'restart' ] } ] ) , DirectoryPermissionAudit ( '/var/lib/mysql' , user = 'mysql' , group = 'mysql' , recursive = False , mode = 0o755 ) , DirectoryPermissionAudit ( '/etc/mysql' , user = 'root' , group = 'root' , recursive = False , mode = 0o700 ) , ] return audits
Get MySQL hardening config audits .
245,468
def service_reload ( service_name , restart_on_failure = False , ** kwargs ) : service_result = service ( 'reload' , service_name , ** kwargs ) if not service_result and restart_on_failure : service_result = service ( 'restart' , service_name , ** kwargs ) return service_result
Reload a system service optionally falling back to restart if reload fails .
245,469
def service_pause ( service_name , init_dir = "/etc/init" , initd_dir = "/etc/init.d" , ** kwargs ) : stopped = True if service_running ( service_name , ** kwargs ) : stopped = service_stop ( service_name , ** kwargs ) upstart_file = os . path . join ( init_dir , "{}.conf" . format ( service_name ) ) sysv_file = os . path . join ( initd_dir , service_name ) if init_is_systemd ( ) : service ( 'disable' , service_name ) service ( 'mask' , service_name ) elif os . path . exists ( upstart_file ) : override_path = os . path . join ( init_dir , '{}.override' . format ( service_name ) ) with open ( override_path , 'w' ) as fh : fh . write ( "manual\n" ) elif os . path . exists ( sysv_file ) : subprocess . check_call ( [ "update-rc.d" , service_name , "disable" ] ) else : raise ValueError ( "Unable to detect {0} as SystemD, Upstart {1} or" " SysV {2}" . format ( service_name , upstart_file , sysv_file ) ) return stopped
Pause a system service .
245,470
def service_resume ( service_name , init_dir = "/etc/init" , initd_dir = "/etc/init.d" , ** kwargs ) : upstart_file = os . path . join ( init_dir , "{}.conf" . format ( service_name ) ) sysv_file = os . path . join ( initd_dir , service_name ) if init_is_systemd ( ) : service ( 'unmask' , service_name ) service ( 'enable' , service_name ) elif os . path . exists ( upstart_file ) : override_path = os . path . join ( init_dir , '{}.override' . format ( service_name ) ) if os . path . exists ( override_path ) : os . unlink ( override_path ) elif os . path . exists ( sysv_file ) : subprocess . check_call ( [ "update-rc.d" , service_name , "enable" ] ) else : raise ValueError ( "Unable to detect {0} as SystemD, Upstart {1} or" " SysV {2}" . format ( service_name , upstart_file , sysv_file ) ) started = service_running ( service_name , ** kwargs ) if not started : started = service_start ( service_name , ** kwargs ) return started
Resume a system service .
245,471
def service ( action , service_name , ** kwargs ) : if init_is_systemd ( ) : cmd = [ 'systemctl' , action , service_name ] else : cmd = [ 'service' , service_name , action ] for key , value in six . iteritems ( kwargs ) : parameter = '%s=%s' % ( key , value ) cmd . append ( parameter ) return subprocess . call ( cmd ) == 0
Control a system service .
245,472
def service_running ( service_name , ** kwargs ) : if init_is_systemd ( ) : return service ( 'is-active' , service_name ) else : if os . path . exists ( _UPSTART_CONF . format ( service_name ) ) : try : cmd = [ 'status' , service_name ] for key , value in six . iteritems ( kwargs ) : parameter = '%s=%s' % ( key , value ) cmd . append ( parameter ) output = subprocess . check_output ( cmd , stderr = subprocess . STDOUT ) . decode ( 'UTF-8' ) except subprocess . CalledProcessError : return False else : if ( "start/running" in output or "is running" in output or "up and running" in output ) : return True elif os . path . exists ( _INIT_D_CONF . format ( service_name ) ) : return service ( 'status' , service_name ) return False
Determine whether a system service is running .
245,473
def adduser ( username , password = None , shell = '/bin/bash' , system_user = False , primary_group = None , secondary_groups = None , uid = None , home_dir = None ) : try : user_info = pwd . getpwnam ( username ) log ( 'user {0} already exists!' . format ( username ) ) if uid : user_info = pwd . getpwuid ( int ( uid ) ) log ( 'user with uid {0} already exists!' . format ( uid ) ) except KeyError : log ( 'creating user {0}' . format ( username ) ) cmd = [ 'useradd' ] if uid : cmd . extend ( [ '--uid' , str ( uid ) ] ) if home_dir : cmd . extend ( [ '--home' , str ( home_dir ) ] ) if system_user or password is None : cmd . append ( '--system' ) else : cmd . extend ( [ '--create-home' , '--shell' , shell , '--password' , password , ] ) if not primary_group : try : grp . getgrnam ( username ) primary_group = username except KeyError : pass if primary_group : cmd . extend ( [ '-g' , primary_group ] ) if secondary_groups : cmd . extend ( [ '-G' , ',' . join ( secondary_groups ) ] ) cmd . append ( username ) subprocess . check_call ( cmd ) user_info = pwd . getpwnam ( username ) return user_info
Add a user to the system .
245,474
def user_exists ( username ) : try : pwd . getpwnam ( username ) user_exists = True except KeyError : user_exists = False return user_exists
Check if a user exists
245,475
def uid_exists ( uid ) : try : pwd . getpwuid ( uid ) uid_exists = True except KeyError : uid_exists = False return uid_exists
Check if a uid exists
245,476
def group_exists ( groupname ) : try : grp . getgrnam ( groupname ) group_exists = True except KeyError : group_exists = False return group_exists
Check if a group exists
245,477
def gid_exists ( gid ) : try : grp . getgrgid ( gid ) gid_exists = True except KeyError : gid_exists = False return gid_exists
Check if a gid exists
245,478
def add_group ( group_name , system_group = False , gid = None ) : try : group_info = grp . getgrnam ( group_name ) log ( 'group {0} already exists!' . format ( group_name ) ) if gid : group_info = grp . getgrgid ( gid ) log ( 'group with gid {0} already exists!' . format ( gid ) ) except KeyError : log ( 'creating group {0}' . format ( group_name ) ) add_new_group ( group_name , system_group , gid ) group_info = grp . getgrnam ( group_name ) return group_info
Add a group to the system
245,479
def chage ( username , lastday = None , expiredate = None , inactive = None , mindays = None , maxdays = None , root = None , warndays = None ) : cmd = [ 'chage' ] if root : cmd . extend ( [ '--root' , root ] ) if lastday : cmd . extend ( [ '--lastday' , lastday ] ) if expiredate : cmd . extend ( [ '--expiredate' , expiredate ] ) if inactive : cmd . extend ( [ '--inactive' , inactive ] ) if mindays : cmd . extend ( [ '--mindays' , mindays ] ) if maxdays : cmd . extend ( [ '--maxdays' , maxdays ] ) if warndays : cmd . extend ( [ '--warndays' , warndays ] ) cmd . append ( username ) subprocess . check_call ( cmd )
Change user password expiry information
245,480
def rsync ( from_path , to_path , flags = '-r' , options = None , timeout = None ) : options = options or [ '--delete' , '--executability' ] cmd = [ '/usr/bin/rsync' , flags ] if timeout : cmd = [ 'timeout' , str ( timeout ) ] + cmd cmd . extend ( options ) cmd . append ( from_path ) cmd . append ( to_path ) log ( " " . join ( cmd ) ) return subprocess . check_output ( cmd , stderr = subprocess . STDOUT ) . decode ( 'UTF-8' ) . strip ( )
Replicate the contents of a path
245,481
def write_file ( path , content , owner = 'root' , group = 'root' , perms = 0o444 ) : uid = pwd . getpwnam ( owner ) . pw_uid gid = grp . getgrnam ( group ) . gr_gid existing_content = None existing_uid , existing_gid , existing_perms = None , None , None try : with open ( path , 'rb' ) as target : existing_content = target . read ( ) stat = os . stat ( path ) existing_uid , existing_gid , existing_perms = ( stat . st_uid , stat . st_gid , stat . st_mode ) except Exception : pass if content != existing_content : log ( "Writing file {} {}:{} {:o}" . format ( path , owner , group , perms ) , level = DEBUG ) with open ( path , 'wb' ) as target : os . fchown ( target . fileno ( ) , uid , gid ) os . fchmod ( target . fileno ( ) , perms ) if six . PY3 and isinstance ( content , six . string_types ) : content = content . encode ( 'UTF-8' ) target . write ( content ) return if existing_uid != uid : log ( "Changing uid on already existing content: {} -> {}" . format ( existing_uid , uid ) , level = DEBUG ) os . chown ( path , uid , - 1 ) if existing_gid != gid : log ( "Changing gid on already existing content: {} -> {}" . format ( existing_gid , gid ) , level = DEBUG ) os . chown ( path , - 1 , gid ) if existing_perms != perms : log ( "Changing permissions on existing content: {} -> {}" . format ( existing_perms , perms ) , level = DEBUG ) os . chmod ( path , perms )
Create or overwrite a file with the contents of a byte string .
245,482
def mount ( device , mountpoint , options = None , persist = False , filesystem = "ext3" ) : cmd_args = [ 'mount' ] if options is not None : cmd_args . extend ( [ '-o' , options ] ) cmd_args . extend ( [ device , mountpoint ] ) try : subprocess . check_output ( cmd_args ) except subprocess . CalledProcessError as e : log ( 'Error mounting {} at {}\n{}' . format ( device , mountpoint , e . output ) ) return False if persist : return fstab_add ( device , mountpoint , filesystem , options = options ) return True
Mount a filesystem at a particular mountpoint
245,483
def umount ( mountpoint , persist = False ) : cmd_args = [ 'umount' , mountpoint ] try : subprocess . check_output ( cmd_args ) except subprocess . CalledProcessError as e : log ( 'Error unmounting {}\n{}' . format ( mountpoint , e . output ) ) return False if persist : return fstab_remove ( mountpoint ) return True
Unmount a filesystem
245,484
def fstab_mount ( mountpoint ) : cmd_args = [ 'mount' , mountpoint ] try : subprocess . check_output ( cmd_args ) except subprocess . CalledProcessError as e : log ( 'Error unmounting {}\n{}' . format ( mountpoint , e . output ) ) return False return True
Mount filesystem using fstab
245,485
def file_hash ( path , hash_type = 'md5' ) : if os . path . exists ( path ) : h = getattr ( hashlib , hash_type ) ( ) with open ( path , 'rb' ) as source : h . update ( source . read ( ) ) return h . hexdigest ( ) else : return None
Generate a hash checksum of the contents of path or None if not found .
245,486
def check_hash ( path , checksum , hash_type = 'md5' ) : actual_checksum = file_hash ( path , hash_type ) if checksum != actual_checksum : raise ChecksumError ( "'%s' != '%s'" % ( checksum , actual_checksum ) )
Validate a file using a cryptographic checksum .
245,487
def restart_on_change ( restart_map , stopstart = False , restart_functions = None ) : def wrap ( f ) : @ functools . wraps ( f ) def wrapped_f ( * args , ** kwargs ) : return restart_on_change_helper ( ( lambda : f ( * args , ** kwargs ) ) , restart_map , stopstart , restart_functions ) return wrapped_f return wrap
Restart services based on configuration files changing
245,488
def restart_on_change_helper ( lambda_f , restart_map , stopstart = False , restart_functions = None ) : if restart_functions is None : restart_functions = { } checksums = { path : path_hash ( path ) for path in restart_map } r = lambda_f ( ) restarts = [ restart_map [ path ] for path in restart_map if path_hash ( path ) != checksums [ path ] ] services_list = list ( OrderedDict . fromkeys ( itertools . chain ( * restarts ) ) ) if services_list : actions = ( 'stop' , 'start' ) if stopstart else ( 'restart' , ) for service_name in services_list : if service_name in restart_functions : restart_functions [ service_name ] ( service_name ) else : for action in actions : service ( action , service_name ) return r
Helper function to perform the restart_on_change function .
245,489
def pwgen ( length = None ) : if length is None : length = random . choice ( range ( 35 , 45 ) ) alphanumeric_chars = [ l for l in ( string . ascii_letters + string . digits ) if l not in 'l0QD1vAEIOUaeiou' ] random_generator = random . SystemRandom ( ) random_chars = [ random_generator . choice ( alphanumeric_chars ) for _ in range ( length ) ] return ( '' . join ( random_chars ) )
Generate a random pasword .
245,490
def is_phy_iface ( interface ) : if interface : sys_net = '/sys/class/net' if os . path . isdir ( sys_net ) : for iface in glob . glob ( os . path . join ( sys_net , '*' ) ) : if '/virtual/' in os . path . realpath ( iface ) : continue if interface == os . path . basename ( iface ) : return True return False
Returns True if interface is not virtual otherwise False .
245,491
def get_bond_master ( interface ) : if interface : iface_path = '/sys/class/net/%s' % ( interface ) if os . path . exists ( iface_path ) : if '/virtual/' in os . path . realpath ( iface_path ) : return None master = os . path . join ( iface_path , 'master' ) if os . path . exists ( master ) : master = os . path . realpath ( master ) if os . path . exists ( os . path . join ( master , 'bonding' ) ) : return os . path . basename ( master ) return None
Returns bond master if interface is bond slave otherwise None .
245,492
def chdir ( directory ) : cur = os . getcwd ( ) try : yield os . chdir ( directory ) finally : os . chdir ( cur )
Change the current working directory to a different directory for a code block and return the previous directory after the block exits . Useful to run commands from a specificed directory .
245,493
def chownr ( path , owner , group , follow_links = True , chowntopdir = False ) : uid = pwd . getpwnam ( owner ) . pw_uid gid = grp . getgrnam ( group ) . gr_gid if follow_links : chown = os . chown else : chown = os . lchown if chowntopdir : broken_symlink = os . path . lexists ( path ) and not os . path . exists ( path ) if not broken_symlink : chown ( path , uid , gid ) for root , dirs , files in os . walk ( path , followlinks = follow_links ) : for name in dirs + files : full = os . path . join ( root , name ) broken_symlink = os . path . lexists ( full ) and not os . path . exists ( full ) if not broken_symlink : chown ( full , uid , gid )
Recursively change user and group ownership of files and directories in given path . Doesn t chown path itself by default only its children .
245,494
def lchownr ( path , owner , group ) : chownr ( path , owner , group , follow_links = False )
Recursively change user and group ownership of files and directories in a given path not following symbolic links . See the documentation for os . lchown for more information .
245,495
def owner ( path ) : stat = os . stat ( path ) username = pwd . getpwuid ( stat . st_uid ) [ 0 ] groupname = grp . getgrgid ( stat . st_gid ) [ 0 ] return username , groupname
Returns a tuple containing the username & groupname owning the path .
245,496
def get_total_ram ( ) : with open ( '/proc/meminfo' , 'r' ) as f : for line in f . readlines ( ) : if line : key , value , unit = line . split ( ) if key == 'MemTotal:' : assert unit == 'kB' , 'Unknown unit' return int ( value ) * 1024 raise NotImplementedError ( )
The total amount of system RAM in bytes .
245,497
def add_to_updatedb_prunepath ( path , updatedb_path = UPDATEDB_PATH ) : if not os . path . exists ( updatedb_path ) or os . path . isdir ( updatedb_path ) : return with open ( updatedb_path , 'r+' ) as f_id : updatedb_text = f_id . read ( ) output = updatedb ( updatedb_text , path ) f_id . seek ( 0 ) f_id . write ( output ) f_id . truncate ( )
Adds the specified path to the mlocate s udpatedb . conf PRUNEPATH list .
245,498
def install_ca_cert ( ca_cert , name = None ) : if not ca_cert : return if not isinstance ( ca_cert , bytes ) : ca_cert = ca_cert . encode ( 'utf8' ) if not name : name = 'juju-{}' . format ( charm_name ( ) ) cert_file = '/usr/local/share/ca-certificates/{}.crt' . format ( name ) new_hash = hashlib . md5 ( ca_cert ) . hexdigest ( ) if file_hash ( cert_file ) == new_hash : return log ( "Installing new CA cert at: {}" . format ( cert_file ) , level = INFO ) write_file ( cert_file , ca_cert ) subprocess . check_call ( [ 'update-ca-certificates' , '--fresh' ] )
Install the given cert as a trusted CA .
245,499
def get_audits ( ) : audits = [ ] audits . append ( TemplatedFile ( '/etc/securetty' , SecureTTYContext ( ) , template_dir = TEMPLATES_DIR , mode = 0o0400 , user = 'root' , group = 'root' ) ) return audits
Get OS hardening Secure TTY audits .