idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
56,500 | def get_pgs ( self , pool_size , percent_data = DEFAULT_POOL_WEIGHT , device_class = None ) : validator ( value = pool_size , valid_type = int ) if percent_data is None : percent_data = DEFAULT_POOL_WEIGHT osd_list = get_osds ( self . service , device_class ) expected = config ( 'expected-osd-count' ) or 0 if osd_list ... | Return the number of placement groups to use when creating the pool . |
56,501 | def add_op_create_replicated_pool ( self , name , replica_count = 3 , pg_num = None , weight = None , group = None , namespace = None , app_name = None , max_bytes = None , max_objects = None ) : if pg_num and weight : raise ValueError ( 'pg_num and weight are mutually exclusive' ) self . ops . append ( { 'op' : 'creat... | Adds an operation to create a replicated pool . |
56,502 | def add_op_create_erasure_pool ( self , name , erasure_profile = None , weight = None , group = None , app_name = None , max_bytes = None , max_objects = None ) : self . ops . append ( { 'op' : 'create-pool' , 'name' : name , 'pool-type' : 'erasure' , 'erasure-profile' : erasure_profile , 'weight' : weight , 'group' : ... | Adds an operation to create a erasure coded pool . |
56,503 | def get_nagios_unit_name ( relation_name = 'nrpe-external-master' ) : host_context = get_nagios_hostcontext ( relation_name ) if host_context : unit = "%s:%s" % ( host_context , local_unit ( ) ) else : unit = local_unit ( ) return unit | Return the nagios unit name prepended with host_context if needed |
56,504 | def copy_nrpe_checks ( nrpe_files_dir = None ) : NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' if nrpe_files_dir is None : for segment in [ '.' , 'hooks' ] : nrpe_files_dir = os . path . abspath ( os . path . join ( os . getenv ( 'CHARM_DIR' ) , segment , 'charmhelpers' , 'contrib' , 'openstack' , 'files' ) ) if os ... | Copy the nrpe checks into place |
56,505 | def write_vaultlocker_conf ( context , priority = 100 ) : charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf" . format ( hookenv . service_name ( ) ) host . mkdir ( os . path . dirname ( charm_vl_path ) , perms = 0o700 ) templating . render ( source = 'vaultlocker.conf.j2' , target = charm_vl_path , context = context ... | Write vaultlocker configuration to disk and install alternative |
56,506 | def vault_relation_complete ( backend = None ) : vault_kv = VaultKVContext ( secret_backend = backend or VAULTLOCKER_BACKEND ) vault_kv ( ) return vault_kv . complete | Determine whether vault relation is complete |
56,507 | def retrieve_secret_id ( url , token ) : import hvac client = hvac . Client ( url = url , token = token ) response = client . _post ( '/v1/sys/wrapping/unwrap' ) if response . status_code == 200 : data = response . json ( ) return data [ 'data' ] [ 'secret_id' ] | Retrieve a response - wrapped secret_id from Vault |
56,508 | def retry_on_exception ( num_retries , base_delay = 0 , exc_type = Exception ) : def _retry_on_exception_inner_1 ( f ) : def _retry_on_exception_inner_2 ( * args , ** kwargs ) : retries = num_retries multiplier = 1 while True : try : return f ( * args , ** kwargs ) except exc_type : if not retries : raise delay = base_... | If the decorated function raises exception exc_type allow num_retries retry attempts before raise the exception . |
56,509 | def _snap_exec ( commands ) : assert type ( commands ) == list retry_count = 0 return_code = None while return_code is None or return_code == SNAP_NO_LOCK : try : return_code = subprocess . check_call ( [ 'snap' ] + commands , env = os . environ ) except subprocess . CalledProcessError as e : retry_count += + 1 if retr... | Execute snap commands . |
56,510 | def snap_remove ( packages , * flags ) : if type ( packages ) is not list : packages = [ packages ] flags = list ( flags ) message = 'Removing snap(s) "%s"' % ', ' . join ( packages ) if flags : message += ' with options "%s"' % ', ' . join ( flags ) log ( message , level = 'INFO' ) return _snap_exec ( [ 'remove' ] + f... | Remove a snap package . |
56,511 | def validate_v2_endpoint_data ( self , endpoints , admin_port , internal_port , public_port , expected ) : self . log . debug ( 'Validating endpoint data...' ) self . log . debug ( 'actual: {}' . format ( repr ( endpoints ) ) ) found = False for ep in endpoints : self . log . debug ( 'endpoint: {}' . format ( repr ( ep... | Validate endpoint data . |
56,512 | def validate_v3_endpoint_data ( self , endpoints , admin_port , internal_port , public_port , expected , expected_num_eps = 3 ) : self . log . debug ( 'Validating v3 endpoint data...' ) self . log . debug ( 'actual: {}' . format ( repr ( endpoints ) ) ) found = [ ] for ep in endpoints : self . log . debug ( 'endpoint: ... | Validate keystone v3 endpoint data . |
56,513 | def convert_svc_catalog_endpoint_data_to_v3 ( self , ep_data ) : self . log . warn ( "Endpoint ID and Region ID validation is limited to not " "null checks after v2 to v3 conversion" ) for svc in ep_data . keys ( ) : assert len ( ep_data [ svc ] ) == 1 , "Unknown data format" svc_ep_data = ep_data [ svc ] [ 0 ] ep_data... | Convert v2 endpoint data into v3 . |
56,514 | def validate_v2_svc_catalog_endpoint_data ( self , expected , actual ) : self . log . debug ( 'Validating service catalog endpoint data...' ) self . log . debug ( 'actual: {}' . format ( repr ( actual ) ) ) for k , v in six . iteritems ( expected ) : if k in actual : ret = self . _validate_dict_data ( expected [ k ] [ ... | Validate service catalog endpoint data . |
56,515 | def validate_v3_svc_catalog_endpoint_data ( self , expected , actual ) : self . log . debug ( 'Validating v3 service catalog endpoint data...' ) self . log . debug ( 'actual: {}' . format ( repr ( actual ) ) ) for k , v in six . iteritems ( expected ) : if k in actual : l_expected = sorted ( v , key = lambda x : x [ 'i... | Validate the keystone v3 catalog endpoint data . |
56,516 | def validate_tenant_data ( self , expected , actual ) : self . log . debug ( 'Validating tenant data...' ) self . log . debug ( 'actual: {}' . format ( repr ( actual ) ) ) for e in expected : found = False for act in actual : a = { 'enabled' : act . enabled , 'description' : act . description , 'name' : act . name , 'i... | Validate tenant data . |
56,517 | def validate_user_data ( self , expected , actual , api_version = None ) : self . log . debug ( 'Validating user data...' ) self . log . debug ( 'actual: {}' . format ( repr ( actual ) ) ) for e in expected : found = False for act in actual : if e [ 'name' ] == act . name : a = { 'enabled' : act . enabled , 'name' : ac... | Validate user data . |
56,518 | def validate_flavor_data ( self , expected , actual ) : self . log . debug ( 'Validating flavor data...' ) self . log . debug ( 'actual: {}' . format ( repr ( actual ) ) ) act = [ a . name for a in actual ] return self . _validate_list_data ( expected , act ) | Validate flavor data . |
56,519 | def tenant_exists ( self , keystone , tenant ) : self . log . debug ( 'Checking if tenant exists ({})...' . format ( tenant ) ) return tenant in [ t . name for t in keystone . tenants . list ( ) ] | Return True if tenant exists . |
56,520 | def keystone_wait_for_propagation ( self , sentry_relation_pairs , api_version ) : for ( sentry , relation_name ) in sentry_relation_pairs : rel = sentry . relation ( 'identity-service' , relation_name ) self . log . debug ( 'keystone relation data: {}' . format ( rel ) ) if rel . get ( 'api_version' ) != str ( api_ver... | Iterate over list of sentry and relation tuples and verify that api_version has the expected value . |
56,521 | def keystone_configure_api_version ( self , sentry_relation_pairs , deployment , api_version ) : self . log . debug ( "Setting keystone preferred-api-version: '{}'" "" . format ( api_version ) ) config = { 'preferred-api-version' : api_version } deployment . d . configure ( 'keystone' , config ) deployment . _auto_wait... | Configure preferred - api - version of keystone in deployment and monitor provided list of relation objects for propagation before returning to caller . |
56,522 | def authenticate_cinder_admin ( self , keystone , api_version = 2 ) : self . log . debug ( 'Authenticating cinder admin...' ) _clients = { 1 : cinder_client . Client , 2 : cinder_clientv2 . Client } return _clients [ api_version ] ( session = keystone . session ) | Authenticates admin user with cinder . |
56,523 | def authenticate_keystone ( self , keystone_ip , username , password , api_version = False , admin_port = False , user_domain_name = None , domain_name = None , project_domain_name = None , project_name = None ) : self . log . debug ( 'Authenticating with keystone...' ) if not api_version : api_version = 2 sess , auth ... | Authenticate with Keystone |
56,524 | def get_keystone_session ( self , keystone_ip , username , password , api_version = False , admin_port = False , user_domain_name = None , domain_name = None , project_domain_name = None , project_name = None ) : ep = self . get_keystone_endpoint ( keystone_ip , api_version = api_version , admin_port = admin_port ) if ... | Return a keystone session object |
56,525 | def get_keystone_endpoint ( self , keystone_ip , api_version = None , admin_port = False ) : port = 5000 if admin_port : port = 35357 base_ep = "http://{}:{}" . format ( keystone_ip . strip ( ) . decode ( 'utf-8' ) , port ) if api_version == 2 : ep = base_ep + "/v2.0" else : ep = base_ep + "/v3" return ep | Return keystone endpoint |
56,526 | def get_default_keystone_session ( self , keystone_sentry , openstack_release = None , api_version = 2 ) : self . log . debug ( 'Authenticating keystone admin...' ) if api_version == 3 or ( openstack_release and openstack_release >= 11 ) : client_class = keystone_client_v3 . Client api_version = 3 else : client_class =... | Return a keystone session object and client object assuming standard default settings |
56,527 | def authenticate_keystone_admin ( self , keystone_sentry , user , password , tenant = None , api_version = None , keystone_ip = None , user_domain_name = None , project_domain_name = None , project_name = None ) : self . log . debug ( 'Authenticating keystone admin...' ) if not keystone_ip : keystone_ip = keystone_sent... | Authenticates admin user with the keystone admin endpoint . |
56,528 | def authenticate_keystone_user ( self , keystone , user , password , tenant ) : self . log . debug ( 'Authenticating keystone user ({})...' . format ( user ) ) ep = keystone . service_catalog . url_for ( service_type = 'identity' , interface = 'publicURL' ) keystone_ip = urlparse . urlparse ( ep ) . hostname return sel... | Authenticates a regular user with the keystone public endpoint . |
56,529 | def authenticate_glance_admin ( self , keystone , force_v1_client = False ) : self . log . debug ( 'Authenticating glance admin...' ) ep = keystone . service_catalog . url_for ( service_type = 'image' , interface = 'adminURL' ) if not force_v1_client and keystone . session : return glance_clientv2 . Client ( "2" , sess... | Authenticates admin user with glance . |
56,530 | def authenticate_heat_admin ( self , keystone ) : self . log . debug ( 'Authenticating heat admin...' ) ep = keystone . service_catalog . url_for ( service_type = 'orchestration' , interface = 'publicURL' ) if keystone . session : return heat_client . Client ( endpoint = ep , session = keystone . session ) else : retur... | Authenticates the admin user with heat . |
56,531 | def authenticate_nova_user ( self , keystone , user , password , tenant ) : self . log . debug ( 'Authenticating nova user ({})...' . format ( user ) ) ep = keystone . service_catalog . url_for ( service_type = 'identity' , interface = 'publicURL' ) if keystone . session : return nova_client . Client ( NOVA_CLIENT_VERS... | Authenticates a regular user with nova - api . |
56,532 | def authenticate_swift_user ( self , keystone , user , password , tenant ) : self . log . debug ( 'Authenticating swift user ({})...' . format ( user ) ) ep = keystone . service_catalog . url_for ( service_type = 'identity' , interface = 'publicURL' ) if keystone . session : return swiftclient . Connection ( session = ... | Authenticates a regular user with swift api . |
56,533 | def create_flavor ( self , nova , name , ram , vcpus , disk , flavorid = "auto" , ephemeral = 0 , swap = 0 , rxtx_factor = 1.0 , is_public = True ) : try : nova . flavors . find ( name = name ) except ( exceptions . NotFound , exceptions . NoUniqueMatch ) : self . log . debug ( 'Creating flavor ({})' . format ( name ) ... | Create the specified flavor . |
56,534 | def glance_create_image ( self , glance , image_name , image_url , download_dir = 'tests' , hypervisor_type = None , disk_format = 'qcow2' , architecture = 'x86_64' , container_format = 'bare' ) : self . log . debug ( 'Creating glance image ({}) from ' '{}...' . format ( image_name , image_url ) ) http_proxy = os . get... | Download an image and upload it to glance validate its status and return an image object pointer . KVM defaults can override for LXD . |
56,535 | def create_cirros_image ( self , glance , image_name , hypervisor_type = None ) : self . log . warn ( '/!\\ DEPRECATION WARNING: use ' 'glance_create_image instead of ' 'create_cirros_image.' ) self . log . debug ( 'Creating glance cirros image ' '({})...' . format ( image_name ) ) http_proxy = os . getenv ( 'AMULET_H... | Download the latest cirros image and upload it to glance validate and return a resource pointer . |
56,536 | def delete_image ( self , glance , image ) : self . log . warn ( '/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_image.' ) self . log . debug ( 'Deleting glance image ({})...' . format ( image ) ) return self . delete_resource ( glance . images , image , msg = 'glance image' ) | Delete the specified image . |
56,537 | def create_instance ( self , nova , image_name , instance_name , flavor ) : self . log . debug ( 'Creating instance ' '({}|{}|{})' . format ( instance_name , image_name , flavor ) ) image = nova . glance . find_image ( image_name ) flavor = nova . flavors . find ( name = flavor ) instance = nova . servers . create ( na... | Create the specified instance . |
56,538 | def delete_instance ( self , nova , instance ) : self . log . warn ( '/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_instance.' ) self . log . debug ( 'Deleting instance ({})...' . format ( instance ) ) return self . delete_resource ( nova . servers , instance , msg = 'nova instance' ) | Delete the specified instance . |
56,539 | def create_or_get_keypair ( self , nova , keypair_name = "testkey" ) : try : _keypair = nova . keypairs . get ( keypair_name ) self . log . debug ( 'Keypair ({}) already exists, ' 'using it.' . format ( keypair_name ) ) return _keypair except Exception : self . log . debug ( 'Keypair ({}) does not exist, ' 'creating it... | Create a new keypair or return pointer if it already exists . |
56,540 | def create_cinder_volume ( self , cinder , vol_name = "demo-vol" , vol_size = 1 , img_id = None , src_vol_id = None , snap_id = None ) : if img_id and not src_vol_id and not snap_id : self . log . debug ( 'Creating cinder volume from glance image...' ) bootable = 'true' elif src_vol_id and not img_id and not snap_id : ... | Create cinder volume optionally from a glance image OR optionally as a clone of an existing volume OR optionally from a snapshot . Wait for the new volume status to reach the expected status validate and return a resource pointer . |
56,541 | def delete_resource ( self , resource , resource_id , msg = "resource" , max_wait = 120 ) : self . log . debug ( 'Deleting OpenStack resource ' '{} ({})' . format ( resource_id , msg ) ) num_before = len ( list ( resource . list ( ) ) ) resource . delete ( resource_id ) tries = 0 num_after = len ( list ( resource . lis... | Delete one openstack resource such as one instance keypair image volume stack etc . and confirm deletion within max wait time . |
56,542 | def resource_reaches_status ( self , resource , resource_id , expected_stat = 'available' , msg = 'resource' , max_wait = 120 ) : tries = 0 resource_stat = resource . get ( resource_id ) . status while resource_stat != expected_stat and tries < ( max_wait / 4 ) : self . log . debug ( '{} status check: ' '{} [{}:{}] {}'... | Wait for an openstack resources status to reach an expected status within a specified time . Useful to confirm that nova instances cinder vols snapshots glance images heat stacks and other resources eventually reach the expected status . |
56,543 | def get_ceph_pools ( self , sentry_unit ) : pools = { } cmd = 'sudo ceph osd lspools' output , code = sentry_unit . run ( cmd ) if code != 0 : msg = ( '{} `{}` returned {} ' '{}' . format ( sentry_unit . info [ 'unit_name' ] , cmd , code , output ) ) amulet . raise_status ( amulet . FAIL , msg = msg ) output = output .... | Return a dict of ceph pools from a single ceph unit with pool name as keys pool id as vals . |
56,544 | def get_ceph_df ( self , sentry_unit ) : cmd = 'sudo ceph df --format=json' output , code = sentry_unit . run ( cmd ) if code != 0 : msg = ( '{} `{}` returned {} ' '{}' . format ( sentry_unit . info [ 'unit_name' ] , cmd , code , output ) ) amulet . raise_status ( amulet . FAIL , msg = msg ) return json . loads ( outpu... | Return dict of ceph df json output including ceph pool state . |
56,545 | def get_ceph_pool_sample ( self , sentry_unit , pool_id = 0 ) : df = self . get_ceph_df ( sentry_unit ) for pool in df [ 'pools' ] : if pool [ 'id' ] == pool_id : pool_name = pool [ 'name' ] obj_count = pool [ 'stats' ] [ 'objects' ] kb_used = pool [ 'stats' ] [ 'kb_used' ] self . log . debug ( 'Ceph {} pool (ID {}): {... | Take a sample of attributes of a ceph pool returning ceph pool name object count and disk space used for the specified pool ID number . |
56,546 | def validate_ceph_pool_samples ( self , samples , sample_type = "resource pool" ) : original , created , deleted = range ( 3 ) if samples [ created ] <= samples [ original ] or samples [ deleted ] >= samples [ created ] : return ( 'Ceph {} samples ({}) ' 'unexpected.' . format ( sample_type , samples ) ) else : self . ... | Validate ceph pool samples taken over time such as pool object counts or pool kb used before adding after adding and after deleting items which affect those pool attributes . The 2nd element is expected to be greater than the 1st ; 3rd is expected to be less than the 2nd . |
56,547 | def rmq_wait_for_cluster ( self , deployment , init_sleep = 15 , timeout = 1200 ) : if init_sleep : time . sleep ( init_sleep ) message = re . compile ( '^Unit is ready and clustered$' ) deployment . _auto_wait_for_status ( message = message , timeout = timeout , include_only = [ 'rabbitmq-server' ] ) | Wait for rmq units extended status to show cluster readiness after an optional initial sleep period . Initial sleep is likely necessary to be effective following a config change as status message may not instantly update to non - ready . |
56,548 | def get_rmq_cluster_status ( self , sentry_unit ) : cmd = 'rabbitmqctl cluster_status' output , _ = self . run_cmd_unit ( sentry_unit , cmd ) self . log . debug ( '{} cluster_status:\n{}' . format ( sentry_unit . info [ 'unit_name' ] , output ) ) return str ( output ) | Execute rabbitmq cluster status command on a unit and return the full output . |
56,549 | def get_rmq_cluster_running_nodes ( self , sentry_unit ) : str_stat = self . get_rmq_cluster_status ( sentry_unit ) if 'running_nodes' in str_stat : pos_start = str_stat . find ( "{running_nodes," ) + 15 pos_end = str_stat . find ( "]}," , pos_start ) + 1 str_run_nodes = str_stat [ pos_start : pos_end ] . replace ( "'"... | Parse rabbitmqctl cluster_status output string return list of running rabbitmq cluster nodes . |
56,550 | def validate_rmq_cluster_running_nodes ( self , sentry_units ) : host_names = self . get_unit_hostnames ( sentry_units ) errors = [ ] for query_unit in sentry_units : query_unit_name = query_unit . info [ 'unit_name' ] running_nodes = self . get_rmq_cluster_running_nodes ( query_unit ) for validate_unit in sentry_units... | Check that all rmq unit hostnames are represented in the cluster_status output of all units . |
56,551 | def rmq_ssl_is_enabled_on_unit ( self , sentry_unit , port = None ) : host = sentry_unit . info [ 'public-address' ] unit_name = sentry_unit . info [ 'unit_name' ] conf_file = '/etc/rabbitmq/rabbitmq.config' conf_contents = str ( self . file_contents_safe ( sentry_unit , conf_file , max_wait = 16 ) ) conf_ssl = 'ssl' i... | Check a single juju rmq unit for ssl and port in the config file . |
56,552 | def validate_rmq_ssl_enabled_units ( self , sentry_units , port = None ) : for sentry_unit in sentry_units : if not self . rmq_ssl_is_enabled_on_unit ( sentry_unit , port = port ) : return ( 'Unexpected condition: ssl is disabled on unit ' '({})' . format ( sentry_unit . info [ 'unit_name' ] ) ) return None | Check that ssl is enabled on rmq juju sentry units . |
56,553 | def validate_rmq_ssl_disabled_units ( self , sentry_units ) : for sentry_unit in sentry_units : if self . rmq_ssl_is_enabled_on_unit ( sentry_unit ) : return ( 'Unexpected condition: ssl is enabled on unit ' '({})' . format ( sentry_unit . info [ 'unit_name' ] ) ) return None | Check that ssl is enabled on listed rmq juju sentry units . |
56,554 | def configure_rmq_ssl_on ( self , sentry_units , deployment , port = None , max_wait = 60 ) : self . log . debug ( 'Setting ssl charm config option: on' ) config = { 'ssl' : 'on' } if port : config [ 'ssl_port' ] = port deployment . d . configure ( 'rabbitmq-server' , config ) self . rmq_wait_for_cluster ( deployment ... | Turn ssl charm config option on with optional non - default ssl port specification . Confirm that it is enabled on every unit . |
56,555 | def configure_rmq_ssl_off ( self , sentry_units , deployment , max_wait = 60 ) : self . log . debug ( 'Setting ssl charm config option: off' ) config = { 'ssl' : 'off' } deployment . d . configure ( 'rabbitmq-server' , config ) self . rmq_wait_for_cluster ( deployment ) tries = 0 ret = self . validate_rmq_ssl_disabled... | Turn ssl charm config option off confirm that it is disabled on every unit . |
56,556 | def connect_amqp_by_unit ( self , sentry_unit , ssl = False , port = None , fatal = True , username = "testuser1" , password = "changeme" ) : host = sentry_unit . info [ 'public-address' ] unit_name = sentry_unit . info [ 'unit_name' ] if ssl and not port : port = 5671 elif not ssl and not port : port = 5672 self . log... | Establish and return a pika amqp connection to the rabbitmq service running on a rmq juju unit . |
56,557 | def publish_amqp_message_by_unit ( self , sentry_unit , message , queue = "test" , ssl = False , username = "testuser1" , password = "changeme" , port = None ) : self . log . debug ( 'Publishing message to {} queue:\n{}' . format ( queue , message ) ) connection = self . connect_amqp_by_unit ( sentry_unit , ssl = ssl ,... | Publish an amqp message to a rmq juju unit . |
56,558 | def get_amqp_message_by_unit ( self , sentry_unit , queue = "test" , username = "testuser1" , password = "changeme" , ssl = False , port = None ) : connection = self . connect_amqp_by_unit ( sentry_unit , ssl = ssl , port = port , username = username , password = password ) channel = connection . channel ( ) method_fra... | Get an amqp message from a rmq juju unit . |
56,559 | def validate_memcache ( self , sentry_unit , conf , os_release , earliest_release = 5 , section = 'keystone_authtoken' , check_kvs = None ) : if os_release < earliest_release : self . log . debug ( 'Skipping memcache checks for deployment. {} <' 'mitaka' . format ( os_release ) ) return _kvs = check_kvs or { 'memcached... | Check Memcache is running and is configured to be used |
56,560 | def acquire ( self , lock ) : unit = hookenv . local_unit ( ) ts = self . requests [ unit ] . get ( lock ) if not ts : self . requests . setdefault ( lock , { } ) self . requests [ unit ] [ lock ] = _timestamp ( ) self . msg ( 'Requested {}' . format ( lock ) ) if self . granted ( lock ) : self . msg ( 'Acquired {}' . ... | Acquire the named lock non - blocking . |
56,561 | def granted ( self , lock ) : unit = hookenv . local_unit ( ) ts = self . requests [ unit ] . get ( lock ) if ts and self . grants . get ( unit , { } ) . get ( lock ) == ts : return True return False | Return True if a previously requested lock has been granted |
56,562 | def request_timestamp ( self , lock ) : ts = self . requests [ hookenv . local_unit ( ) ] . get ( lock , None ) if ts is not None : return datetime . strptime ( ts , _timestamp_format ) | Return the timestamp of our outstanding request for lock or None . |
56,563 | def grant ( self , lock , unit ) : if not hookenv . is_leader ( ) : return False granted = set ( ) for u in self . grants : if lock in self . grants [ u ] : granted . add ( u ) if unit in granted : return True reqs = set ( ) for u in self . requests : if u in granted : continue for _lock , ts in self . requests [ u ] .... | Maybe grant the lock to a unit . |
56,564 | def released ( self , unit , lock , timestamp ) : interval = _utcnow ( ) - timestamp self . msg ( 'Leader released {} from {}, held {}' . format ( lock , unit , interval ) ) | Called on the leader when it has released a lock . |
56,565 | def require ( self , lock , guard_func , * guard_args , ** guard_kw ) : def decorator ( f ) : @ wraps ( f ) def wrapper ( * args , ** kw ) : if self . granted ( lock ) : self . msg ( 'Granted {}' . format ( lock ) ) return f ( * args , ** kw ) if guard_func ( * guard_args , ** guard_kw ) and self . acquire ( lock ) : r... | Decorate a function to be run only when a lock is acquired . |
56,566 | def msg ( self , msg ) : hookenv . log ( 'coordinator.{} {}' . format ( self . _name ( ) , msg ) , level = hookenv . INFO ) | Emit a message . Override to customize log spam . |
56,567 | def deprecate ( warning , date = None , log = None ) : def wrap ( f ) : @ functools . wraps ( f ) def wrapped_f ( * args , ** kwargs ) : try : module = inspect . getmodule ( f ) file = inspect . getsourcefile ( f ) lines = inspect . getsourcelines ( f ) f_name = "{}-{}-{}..{}-{}" . format ( module . __name__ , file , l... | Add a deprecation warning the first time the function is used . The date which is a string in semi - ISO8660 format indicate the year - month that the function is officially going to be removed . |
56,568 | def download ( self , source , dest ) : proto , netloc , path , params , query , fragment = urlparse ( source ) if proto in ( 'http' , 'https' ) : auth , barehost = splituser ( netloc ) if auth is not None : source = urlunparse ( ( proto , barehost , path , params , query , fragment ) ) username , password = splitpassw... | Download an archive file . |
56,569 | def install ( self , source , dest = None , checksum = None , hash_type = 'sha1' ) : url_parts = self . parse_url ( source ) dest_dir = os . path . join ( os . environ . get ( 'CHARM_DIR' ) , 'fetched' ) if not os . path . exists ( dest_dir ) : mkdir ( dest_dir , perms = 0o755 ) dld_file = os . path . join ( dest_dir ,... | Download and install an archive file with optional checksum validation . |
56,570 | def set_trace ( addr = DEFAULT_ADDR , port = DEFAULT_PORT ) : atexit . register ( close_port , port ) try : log ( "Starting a remote python debugger session on %s:%s" % ( addr , port ) ) open_port ( port ) debugger = Rpdb ( addr = addr , port = port ) debugger . set_trace ( sys . _getframe ( ) . f_back ) except Excepti... | Set a trace point using the remote debugger |
56,571 | def device_info ( device ) : status = subprocess . check_output ( [ 'ibstat' , device , '-s' ] ) . splitlines ( ) regexes = { "CA type: (.*)" : "device_type" , "Number of ports: (.*)" : "num_ports" , "Firmware version: (.*)" : "fw_ver" , "Hardware version: (.*)" : "hw_ver" , "Node GUID: (.*)" : "node_guid" , "System im... | Returns a DeviceInfo object with the current device settings |
56,572 | def ipoib_interfaces ( ) : interfaces = [ ] for interface in network_interfaces ( ) : try : driver = re . search ( '^driver: (.+)$' , subprocess . check_output ( [ 'ethtool' , '-i' , interface ] ) , re . M ) . group ( 1 ) if driver in IPOIB_DRIVERS : interfaces . append ( interface ) except Exception : log ( "Skipping ... | Return a list of IPOIB capable ethernet interfaces |
56,573 | def get_audits ( ) : audits = [ TemplatedFile ( '/etc/login.defs' , LoginContext ( ) , template_dir = TEMPLATES_DIR , user = 'root' , group = 'root' , mode = 0o0444 ) ] return audits | Get OS hardening login . defs audits . |
56,574 | def _get_defaults ( modules ) : default = os . path . join ( os . path . dirname ( __file__ ) , 'defaults/%s.yaml' % ( modules ) ) return yaml . safe_load ( open ( default ) ) | Load the default config for the provided modules . |
56,575 | def _get_schema ( modules ) : schema = os . path . join ( os . path . dirname ( __file__ ) , 'defaults/%s.yaml.schema' % ( modules ) ) return yaml . safe_load ( open ( schema ) ) | Load the config schema for the provided modules . |
56,576 | def _get_user_provided_overrides ( modules ) : overrides = os . path . join ( os . environ [ 'JUJU_CHARM_DIR' ] , 'hardening.yaml' ) if os . path . exists ( overrides ) : log ( "Found user-provided config overrides file '%s'" % ( overrides ) , level = DEBUG ) settings = yaml . safe_load ( open ( overrides ) ) if settin... | Load user - provided config overrides . |
56,577 | def _apply_overrides ( settings , overrides , schema ) : if overrides : for k , v in six . iteritems ( overrides ) : if k in schema : if schema [ k ] is None : settings [ k ] = v elif type ( schema [ k ] ) is dict : settings [ k ] = _apply_overrides ( settings [ k ] , overrides [ k ] , schema [ k ] ) else : raise Excep... | Get overrides config overlayed onto modules defaults . |
56,578 | def ensure_permissions ( path , user , group , permissions , maxdepth = - 1 ) : if not os . path . exists ( path ) : log ( "File '%s' does not exist - cannot set permissions" % ( path ) , level = WARNING ) return _user = pwd . getpwnam ( user ) os . chown ( path , _user . pw_uid , grp . getgrnam ( group ) . gr_gid ) os... | Ensure permissions for path . |
56,579 | def create ( sysctl_dict , sysctl_file , ignore = False ) : if type ( sysctl_dict ) is not dict : try : sysctl_dict_parsed = yaml . safe_load ( sysctl_dict ) except yaml . YAMLError : log ( "Error parsing YAML sysctl_dict: {}" . format ( sysctl_dict ) , level = ERROR ) return else : sysctl_dict_parsed = sysctl_dict wit... | Creates a sysctl . conf file from a YAML associative array |
56,580 | def canonical_url ( configs , endpoint_type = PUBLIC ) : scheme = _get_scheme ( configs ) address = resolve_address ( endpoint_type ) if is_ipv6 ( address ) : address = "[{}]" . format ( address ) return '%s://%s' % ( scheme , address ) | Returns the correct HTTP URL to this host given the state of HTTPS configuration hacluster and charm configuration . |
56,581 | def _get_address_override ( endpoint_type = PUBLIC ) : override_key = ADDRESS_MAP [ endpoint_type ] [ 'override' ] addr_override = config ( override_key ) if not addr_override : return None else : return addr_override . format ( service_name = service_name ( ) ) | Returns any address overrides that the user has defined based on the endpoint type . |
56,582 | def resolve_address ( endpoint_type = PUBLIC , override = True ) : resolved_address = None if override : resolved_address = _get_address_override ( endpoint_type ) if resolved_address : return resolved_address vips = config ( 'vip' ) if vips : vips = vips . split ( ) net_type = ADDRESS_MAP [ endpoint_type ] [ 'config' ... | Return unit address depending on net config . |
56,583 | def hugepage_support ( user , group = 'hugetlb' , nr_hugepages = 256 , max_map_count = 65536 , mnt_point = '/run/hugepages/kvm' , pagesize = '2MB' , mount = True , set_shmmax = False ) : group_info = add_group ( group ) gid = group_info . gr_gid add_user_to_group ( user , group ) if max_map_count < 2 * nr_hugepages : m... | Enable hugepages on system . |
56,584 | def ensure_compliance ( self ) : if not self . modules : return try : loaded_modules = self . _get_loaded_modules ( ) non_compliant_modules = [ ] for module in self . modules : if module in loaded_modules : log ( "Module '%s' is enabled but should not be." % ( module ) , level = INFO ) non_compliant_modules . append ( ... | Ensures that the modules are not loaded . |
56,585 | def _get_loaded_modules ( ) : output = subprocess . check_output ( [ 'apache2ctl' , '-M' ] ) if six . PY3 : output = output . decode ( 'utf-8' ) modules = [ ] for line in output . splitlines ( ) : matcher = re . search ( r'^ (\S*)_module (\S*)' , line ) if matcher : modules . append ( matcher . group ( 1 ) ) return mod... | Returns the modules which are enabled in Apache . |
56,586 | def _disable_module ( module ) : try : subprocess . check_call ( [ 'a2dismod' , module ] ) except subprocess . CalledProcessError as e : log ( 'Error occurred disabling module %s. ' 'Output is: %s' % ( module , e . output ) , level = ERROR ) | Disables the specified module in Apache . |
56,587 | def get_template_path ( template_dir , path ) : return os . path . join ( template_dir , os . path . basename ( path ) ) | Returns the template file which would be used to render the path . |
56,588 | def render_and_write ( template_dir , path , context ) : env = Environment ( loader = FileSystemLoader ( template_dir ) ) template_file = os . path . basename ( path ) template = env . get_template ( template_file ) log ( 'Rendering from template: %s' % template . name , level = DEBUG ) rendered_content = template . re... | Renders the specified template into the file . |
56,589 | def get_audits ( ) : audits = [ AptConfig ( [ { 'key' : 'APT::Get::AllowUnauthenticated' , 'expected' : 'false' } ] ) ] settings = get_settings ( 'os' ) clean_packages = settings [ 'security' ] [ 'packages_clean' ] if clean_packages : security_packages = settings [ 'security' ] [ 'packages_list' ] if security_packages ... | Get OS hardening apt audits . |
56,590 | def get_audits ( ) : audits = [ ] settings = utils . get_settings ( 'os' ) if settings [ 'auth' ] [ 'pam_passwdqc_enable' ] : audits . append ( PasswdqcPAM ( '/etc/passwdqc.conf' ) ) if settings [ 'auth' ] [ 'retries' ] : audits . append ( Tally2PAM ( '/usr/share/pam-configs/tally2' ) ) else : audits . append ( Deleted... | Get OS hardening PAM authentication audits . |
56,591 | def install_ansible_support ( from_ppa = True , ppa_location = 'ppa:rquillo/ansible' ) : if from_ppa : charmhelpers . fetch . add_source ( ppa_location ) charmhelpers . fetch . apt_update ( fatal = True ) charmhelpers . fetch . apt_install ( 'ansible' ) with open ( ansible_hosts_path , 'w+' ) as hosts_file : hosts_file... | Installs the ansible package . |
56,592 | def execute ( self , args ) : hook_name = os . path . basename ( args [ 0 ] ) extra_vars = None if hook_name in self . _actions : extra_vars = self . _actions [ hook_name ] ( args [ 1 : ] ) else : super ( AnsibleHooks , self ) . execute ( args ) charmhelpers . contrib . ansible . apply_playbook ( self . playbook_path ,... | Execute the hook followed by the playbook using the hook as tag . |
56,593 | def action ( self , * action_names ) : def action_wrapper ( decorated ) : @ functools . wraps ( decorated ) def wrapper ( argv ) : kwargs = dict ( arg . split ( '=' ) for arg in argv ) try : return decorated ( ** kwargs ) except TypeError as e : if decorated . __doc__ : e . args += ( decorated . __doc__ , ) raise self ... | Decorator registering them as actions |
56,594 | def get_logger ( self , name = "deployment-logger" , level = logging . DEBUG ) : log = logging logger = log . getLogger ( name ) fmt = log . Formatter ( "%(asctime)s %(funcName)s " "%(levelname)s: %(message)s" ) handler = log . StreamHandler ( stream = sys . stdout ) handler . setLevel ( level ) handler . setFormatter ... | Get a logger object that will log to stdout . |
56,595 | def _determine_branch_locations ( self , other_services ) : self . log . info ( 'OpenStackAmuletDeployment: determine branch locations' ) base_charms = { 'mysql' : [ 'trusty' ] , 'mongodb' : [ 'trusty' ] , 'nrpe' : [ 'trusty' , 'xenial' ] , } for svc in other_services : if svc . get ( 'location' ) : continue if svc [ ... | Determine the branch locations for the other services . |
56,596 | def _auto_wait_for_status ( self , message = None , exclude_services = None , include_only = None , timeout = None ) : if not timeout : timeout = int ( os . environ . get ( 'AMULET_SETUP_TIMEOUT' , 1800 ) ) self . log . info ( 'Waiting for extended status on units for {}s...' '' . format ( timeout ) ) all_services = se... | Wait for all units to have a specific extended status except for any defined as excluded . Unless specified via message any status containing any case of ready will be considered a match . |
56,597 | def _get_openstack_release ( self ) : for i , os_pair in enumerate ( OPENSTACK_RELEASES_PAIRS ) : setattr ( self , os_pair , i ) releases = { ( 'trusty' , None ) : self . trusty_icehouse , ( 'trusty' , 'cloud:trusty-kilo' ) : self . trusty_kilo , ( 'trusty' , 'cloud:trusty-liberty' ) : self . trusty_liberty , ( 'trusty... | Get openstack release . |
56,598 | def _get_openstack_release_string ( self ) : releases = OrderedDict ( [ ( 'trusty' , 'icehouse' ) , ( 'xenial' , 'mitaka' ) , ( 'yakkety' , 'newton' ) , ( 'zesty' , 'ocata' ) , ( 'artful' , 'pike' ) , ( 'bionic' , 'queens' ) , ( 'cosmic' , 'rocky' ) , ( 'disco' , 'stein' ) , ] ) if self . openstack : os_origin = self .... | Get openstack release string . |
56,599 | def get_ceph_expected_pools ( self , radosgw = False ) : if self . _get_openstack_release ( ) == self . trusty_icehouse : pools = [ 'data' , 'metadata' , 'rbd' , 'cinder-ceph' , 'glance' ] elif ( self . trusty_kilo <= self . _get_openstack_release ( ) <= self . zesty_ocata ) : pools = [ 'rbd' , 'cinder-ceph' , 'glance'... | Return a list of expected ceph pools in a ceph + cinder + glance test scenario based on OpenStack release and whether ceph radosgw is flagged as present or not . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.