idx
int64
0
252k
question
stringlengths
48
5.28k
target
stringlengths
5
1.23k
245,600
def expected_peer_units ( ) : if not has_juju_version ( "2.4.0" ) : raise NotImplementedError ( "goal-state" ) _goal_state = goal_state ( ) return ( key for key in _goal_state [ 'units' ] if '/' in key and key != local_unit ( ) )
Get a generator for units we expect to join peer relation based on goal - state .
245,601
def expected_related_units ( reltype = None ) : if not has_juju_version ( "2.4.4" ) : raise NotImplementedError ( "goal-state relation unit count" ) reltype = reltype or relation_type ( ) _goal_state = goal_state ( ) return ( key for key in _goal_state [ 'relations' ] [ reltype ] if '/' in key )
Get a generator for units we expect to join relation based on goal - state .
245,602
def relation_for_unit ( unit = None , rid = None ) : unit = unit or remote_unit ( ) relation = relation_get ( unit = unit , rid = rid ) for key in relation : if key . endswith ( '-list' ) : relation [ key ] = relation [ key ] . split ( ) relation [ '__unit__' ] = unit return relation
Get the json represenation of a unit s relation
245,603
def relations_for_id ( relid = None ) : relation_data = [ ] relid = relid or relation_ids ( ) for unit in related_units ( relid ) : unit_data = relation_for_unit ( unit , relid ) unit_data [ '__relid__' ] = relid relation_data . append ( unit_data ) return relation_data
Get relations of a specific relation ID
245,604
def relations_of_type ( reltype = None ) : relation_data = [ ] reltype = reltype or relation_type ( ) for relid in relation_ids ( reltype ) : for relation in relations_for_id ( relid ) : relation [ '__relid__' ] = relid relation_data . append ( relation ) return relation_data
Get relations of a specific type
245,605
def metadata ( ) : with open ( os . path . join ( charm_dir ( ) , 'metadata.yaml' ) ) as md : return yaml . safe_load ( md )
Get the current charm metadata . yaml contents as a python object
245,606
def relation_types ( ) : rel_types = [ ] md = metadata ( ) for key in ( 'provides' , 'requires' , 'peers' ) : section = md . get ( key ) if section : rel_types . extend ( section . keys ( ) ) return rel_types
Get a list of relation types supported by this charm
245,607
def peer_relation_id ( ) : md = metadata ( ) section = md . get ( 'peers' ) if section : for key in section : relids = relation_ids ( key ) if relids : return relids [ 0 ] return None
Get the peers relation id if a peers relation has been joined else None .
245,608
def interface_to_relations ( interface_name ) : results = [ ] for role in ( 'provides' , 'requires' , 'peers' ) : results . extend ( role_and_interface_to_relations ( role , interface_name ) ) return results
Given an interface return a list of relation names for the current charm that use that interface .
245,609
def relations ( ) : rels = { } for reltype in relation_types ( ) : relids = { } for relid in relation_ids ( reltype ) : units = { local_unit ( ) : relation_get ( unit = local_unit ( ) , rid = relid ) } for unit in related_units ( relid ) : reldata = relation_get ( unit = unit , rid = relid ) units [ unit ] = reldata relids [ relid ] = units rels [ reltype ] = relids return rels
Get a nested dictionary of relation data for all related units
245,610
def _port_op ( op_name , port , protocol = "TCP" ) : _args = [ op_name ] icmp = protocol . upper ( ) == "ICMP" if icmp : _args . append ( protocol ) else : _args . append ( '{}/{}' . format ( port , protocol ) ) try : subprocess . check_call ( _args ) except subprocess . CalledProcessError : if not icmp : raise
Open or close a service network port
245,611
def open_ports ( start , end , protocol = "TCP" ) : _args = [ 'open-port' ] _args . append ( '{}-{}/{}' . format ( start , end , protocol ) ) subprocess . check_call ( _args )
Opens a range of service network ports
245,612
def unit_get ( attribute ) : _args = [ 'unit-get' , '--format=json' , attribute ] try : return json . loads ( subprocess . check_output ( _args ) . decode ( 'UTF-8' ) ) except ValueError : return None
Get the unit ID for the remote unit
245,613
def storage_get ( attribute = None , storage_id = None ) : _args = [ 'storage-get' , '--format=json' ] if storage_id : _args . extend ( ( '-s' , storage_id ) ) if attribute : _args . append ( attribute ) try : return json . loads ( subprocess . check_output ( _args ) . decode ( 'UTF-8' ) ) except ValueError : return None
Get storage attributes
245,614
def storage_list ( storage_name = None ) : _args = [ 'storage-list' , '--format=json' ] if storage_name : _args . append ( storage_name ) try : return json . loads ( subprocess . check_output ( _args ) . decode ( 'UTF-8' ) ) except ValueError : return None except OSError as e : import errno if e . errno == errno . ENOENT : return [ ] raise
List the storage IDs for the unit
245,615
def charm_dir ( ) : d = os . environ . get ( 'JUJU_CHARM_DIR' ) if d is not None : return d return os . environ . get ( 'CHARM_DIR' )
Return the root directory of the current charm
245,616
def action_set ( values ) : cmd = [ 'action-set' ] for k , v in list ( values . items ( ) ) : cmd . append ( '{}={}' . format ( k , v ) ) subprocess . check_call ( cmd )
Sets the values to be returned after the action finishes
245,617
def status_set ( workload_state , message ) : valid_states = [ 'maintenance' , 'blocked' , 'waiting' , 'active' ] if workload_state not in valid_states : raise ValueError ( '{!r} is not a valid workload state' . format ( workload_state ) ) cmd = [ 'status-set' , workload_state , message ] try : ret = subprocess . call ( cmd ) if ret == 0 : return except OSError as e : if e . errno != errno . ENOENT : raise log_message = 'status-set failed: {} {}' . format ( workload_state , message ) log ( log_message , level = 'INFO' )
Set the workload state with a message
245,618
def status_get ( ) : cmd = [ 'status-get' , "--format=json" , "--include-data" ] try : raw_status = subprocess . check_output ( cmd ) except OSError as e : if e . errno == errno . ENOENT : return ( 'unknown' , "" ) else : raise else : status = json . loads ( raw_status . decode ( "UTF-8" ) ) return ( status [ "status" ] , status [ "message" ] )
Retrieve the previously set juju workload state and message
245,619
def application_version_set ( version ) : cmd = [ 'application-version-set' ] cmd . append ( version ) try : subprocess . check_call ( cmd ) except OSError : log ( "Application Version: {}" . format ( version ) )
Charm authors may trigger this command from any hook to output what version of the application is running . This could be a package version for instance postgres version 9 . 5 . It could also be a build number or version control revision identifier for instance git sha 6fb7ba68 .
245,620
def payload_register ( ptype , klass , pid ) : cmd = [ 'payload-register' ] for x in [ ptype , klass , pid ] : cmd . append ( x ) subprocess . check_call ( cmd )
is used while a hook is running to let Juju know that a payload has been started .
245,621
def resource_get ( name ) : if not name : return False cmd = [ 'resource-get' , name ] try : return subprocess . check_output ( cmd ) . decode ( 'UTF-8' ) except subprocess . CalledProcessError : return False
used to fetch the resource path of the given name .
245,622
def atstart ( callback , * args , ** kwargs ) : global _atstart _atstart . append ( ( callback , args , kwargs ) )
Schedule a callback to run before the main hook .
245,623
def _run_atstart ( ) : global _atstart for callback , args , kwargs in _atstart : callback ( * args , ** kwargs ) del _atstart [ : ]
Hook frameworks must invoke this before running the main hook body .
245,624
def _run_atexit ( ) : global _atexit for callback , args , kwargs in reversed ( _atexit ) : callback ( * args , ** kwargs ) del _atexit [ : ]
Hook frameworks must invoke this after the main hook body has successfully completed . Do not invoke it if the hook fails .
245,625
def network_get ( endpoint , relation_id = None ) : if not has_juju_version ( '2.2' ) : raise NotImplementedError ( juju_version ( ) ) if relation_id and not has_juju_version ( '2.3' ) : raise NotImplementedError cmd = [ 'network-get' , endpoint , '--format' , 'yaml' ] if relation_id : cmd . append ( '-r' ) cmd . append ( relation_id ) response = subprocess . check_output ( cmd , stderr = subprocess . STDOUT ) . decode ( 'UTF-8' ) . strip ( ) return yaml . safe_load ( response )
Retrieve the network details for a relation endpoint
245,626
def add_metric ( * args , ** kwargs ) : _args = [ 'add-metric' ] _kvpairs = [ ] _kvpairs . extend ( args ) _kvpairs . extend ( [ '{}={}' . format ( k , v ) for k , v in kwargs . items ( ) ] ) _args . extend ( sorted ( _kvpairs ) ) try : subprocess . check_call ( _args ) return except EnvironmentError as e : if e . errno != errno . ENOENT : raise log_message = 'add-metric failed: {}' . format ( ' ' . join ( _kvpairs ) ) log ( log_message , level = 'INFO' )
Add metric values . Values may be expressed with keyword arguments . For metric names containing dashes these may be expressed as one or more key = value positional arguments . May only be called from the collect - metrics hook .
245,627
def iter_units_for_relation_name ( relation_name ) : RelatedUnit = namedtuple ( 'RelatedUnit' , 'rid, unit' ) for rid in relation_ids ( relation_name ) : for unit in related_units ( rid ) : yield RelatedUnit ( rid , unit )
Iterate through all units in a relation
245,628
def ingress_address ( rid = None , unit = None ) : settings = relation_get ( rid = rid , unit = unit ) return ( settings . get ( 'ingress-address' ) or settings . get ( 'private-address' ) )
Retrieve the ingress - address from a relation when available . Otherwise return the private - address .
245,629
def egress_subnets ( rid = None , unit = None ) : def _to_range ( addr ) : if re . search ( r'^(?:\d{1,3}\.){3}\d{1,3}$' , addr ) is not None : addr += '/32' elif ':' in addr and '/' not in addr : addr += '/128' return addr settings = relation_get ( rid = rid , unit = unit ) if 'egress-subnets' in settings : return [ n . strip ( ) for n in settings [ 'egress-subnets' ] . split ( ',' ) if n . strip ( ) ] if 'ingress-address' in settings : return [ _to_range ( settings [ 'ingress-address' ] ) ] if 'private-address' in settings : return [ _to_range ( settings [ 'private-address' ] ) ] return [ ]
Retrieve the egress - subnets from a relation .
245,630
def unit_doomed ( unit = None ) : if not has_juju_version ( "2.4.1" ) : raise NotImplementedError ( "is_doomed" ) if unit is None : unit = local_unit ( ) gs = goal_state ( ) units = gs . get ( 'units' , { } ) if unit not in units : return True return units [ unit ] [ 'status' ] in ( 'dying' , 'dead' )
Determines if the unit is being removed from the model
245,631
def env_proxy_settings ( selected_settings = None ) : SUPPORTED_SETTINGS = { 'http' : 'HTTP_PROXY' , 'https' : 'HTTPS_PROXY' , 'no_proxy' : 'NO_PROXY' , 'ftp' : 'FTP_PROXY' } if selected_settings is None : selected_settings = SUPPORTED_SETTINGS selected_vars = [ v for k , v in SUPPORTED_SETTINGS . items ( ) if k in selected_settings ] proxy_settings = { } for var in selected_vars : var_val = os . getenv ( var ) if var_val : proxy_settings [ var ] = var_val proxy_settings [ var . lower ( ) ] = var_val charm_var_val = os . getenv ( 'JUJU_CHARM_{}' . format ( var ) ) if charm_var_val : proxy_settings [ var ] = charm_var_val proxy_settings [ var . lower ( ) ] = charm_var_val if 'no_proxy' in proxy_settings : if _contains_range ( proxy_settings [ 'no_proxy' ] ) : log ( RANGE_WARNING , level = WARNING ) return proxy_settings if proxy_settings else None
Get proxy settings from process environment variables .
245,632
def load_previous ( self , path = None ) : self . path = path or self . path with open ( self . path ) as f : try : self . _prev_dict = json . load ( f ) except ValueError as e : log ( 'Unable to parse previous config data - {}' . format ( str ( e ) ) , level = ERROR ) for k , v in copy . deepcopy ( self . _prev_dict ) . items ( ) : if k not in self : self [ k ] = v
Load previous copy of config from disk .
245,633
def changed ( self , key ) : if self . _prev_dict is None : return True return self . previous ( key ) != self . get ( key )
Return True if the current value for this key is different from the previous value .
245,634
def save ( self ) : with open ( self . path , 'w' ) as f : os . fchmod ( f . fileno ( ) , 0o600 ) json . dump ( self , f )
Save this config to disk .
245,635
def hook ( self , * hook_names ) : def wrapper ( decorated ) : for hook_name in hook_names : self . register ( hook_name , decorated ) else : self . register ( decorated . __name__ , decorated ) if '_' in decorated . __name__ : self . register ( decorated . __name__ . replace ( '_' , '-' ) , decorated ) return decorated return wrapper
Decorator registering them as hooks
245,636
def shutdown ( self ) : sys . stdout = self . old_stdout sys . stdin = self . old_stdin self . skt . close ( ) self . set_continue ( )
Revert stdin and stdout close the socket .
245,637
def start ( ) : action_set ( 'meta.start' , time . strftime ( '%Y-%m-%dT%H:%M:%SZ' ) ) COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data' if os . path . exists ( COLLECT_PROFILE_DATA ) : subprocess . check_output ( [ COLLECT_PROFILE_DATA ] )
If the collectd charm is also installed tell it to send a snapshot of the current profile data .
245,638
def get_os_codename_install_source ( src ) : ubuntu_rel = lsb_release ( ) [ 'DISTRIB_CODENAME' ] rel = '' if src is None : return rel if src in [ 'distro' , 'distro-proposed' , 'proposed' ] : try : rel = UBUNTU_OPENSTACK_RELEASE [ ubuntu_rel ] except KeyError : e = 'Could not derive openstack release for ' 'this Ubuntu release: %s' % ubuntu_rel error_out ( e ) return rel if src . startswith ( 'cloud:' ) : ca_rel = src . split ( ':' ) [ 1 ] ca_rel = ca_rel . split ( '-' ) [ 1 ] . split ( '/' ) [ 0 ] return ca_rel if ( src . startswith ( 'deb' ) or src . startswith ( 'ppa' ) or src . startswith ( 'snap' ) ) : for v in OPENSTACK_CODENAMES . values ( ) : if v in src : return v
Derive OpenStack release codename from a given installation source .
245,639
def get_os_version_codename ( codename , version_map = OPENSTACK_CODENAMES ) : for k , v in six . iteritems ( version_map ) : if v == codename : return k e = 'Could not derive OpenStack version for ' 'codename: %s' % codename error_out ( e )
Determine OpenStack version number from codename .
245,640
def get_os_version_codename_swift ( codename ) : for k , v in six . iteritems ( SWIFT_CODENAMES ) : if k == codename : return v [ - 1 ] e = 'Could not derive swift version for ' 'codename: %s' % codename error_out ( e )
Determine OpenStack version number of swift from codename .
245,641
def get_swift_codename ( version ) : codenames = [ k for k , v in six . iteritems ( SWIFT_CODENAMES ) if version in v ] if len ( codenames ) > 1 : for codename in reversed ( codenames ) : releases = UBUNTU_OPENSTACK_RELEASE release = [ k for k , v in six . iteritems ( releases ) if codename in v ] ret = subprocess . check_output ( [ 'apt-cache' , 'policy' , 'swift' ] ) if six . PY3 : ret = ret . decode ( 'UTF-8' ) if codename in ret or release [ 0 ] in ret : return codename elif len ( codenames ) == 1 : return codenames [ 0 ] match = re . match ( r'^(\d+)\.(\d+)' , version ) if match : major_minor_version = match . group ( 0 ) for codename , versions in six . iteritems ( SWIFT_CODENAMES ) : for release_version in versions : if release_version . startswith ( major_minor_version ) : return codename return None
Determine OpenStack codename that corresponds to swift version .
245,642
def get_os_codename_package ( package , fatal = True ) : if snap_install_requested ( ) : cmd = [ 'snap' , 'list' , package ] try : out = subprocess . check_output ( cmd ) if six . PY3 : out = out . decode ( 'UTF-8' ) except subprocess . CalledProcessError : return None lines = out . split ( '\n' ) for line in lines : if package in line : return line . split ( ) [ 1 ] import apt_pkg as apt cache = apt_cache ( ) try : pkg = cache [ package ] except Exception : if not fatal : return None e = 'Could not determine version of package with no installation ' 'candidate: %s' % package error_out ( e ) if not pkg . current_ver : if not fatal : return None e = 'Could not determine version of uninstalled package: %s' % package error_out ( e ) vers = apt . upstream_version ( pkg . current_ver . ver_str ) if 'swift' in pkg . name : match = re . match ( r'^(\d+)\.(\d+)\.(\d+)' , vers ) else : match = re . match ( r'^(\d+)\.(\d+)' , vers ) if match : vers = match . group ( 0 ) major_vers = vers . split ( '.' ) [ 0 ] if ( package in PACKAGE_CODENAMES and major_vers in PACKAGE_CODENAMES [ package ] ) : return PACKAGE_CODENAMES [ package ] [ major_vers ] else : try : if 'swift' in pkg . name : return get_swift_codename ( vers ) else : return OPENSTACK_CODENAMES [ vers ] except KeyError : if not fatal : return None e = 'Could not determine OpenStack codename for version %s' % vers error_out ( e )
Derive OpenStack release codename from an installed package .
245,643
def get_os_version_package ( pkg , fatal = True ) : codename = get_os_codename_package ( pkg , fatal = fatal ) if not codename : return None if 'swift' in pkg : vers_map = SWIFT_CODENAMES for cname , version in six . iteritems ( vers_map ) : if cname == codename : return version [ - 1 ] else : vers_map = OPENSTACK_CODENAMES for version , cname in six . iteritems ( vers_map ) : if cname == codename : return version
Derive OpenStack version number from an installed package .
245,644
def os_release ( package , base = 'essex' , reset_cache = False ) : global _os_rel if reset_cache : reset_os_release ( ) if _os_rel : return _os_rel _os_rel = ( get_os_codename_package ( package , fatal = False ) or get_os_codename_install_source ( config ( 'openstack-origin' ) ) or base ) return _os_rel
Returns OpenStack release codename from a cached global .
245,645
def import_key ( keyid ) : try : return fetch_import_key ( keyid ) except GPGKeyError as e : error_out ( "Could not import key: {}" . format ( str ( e ) ) )
Import a key either ASCII armored or a GPG key id .
245,646
def get_source_and_pgp_key ( source_and_key ) : try : source , key = source_and_key . split ( '|' , 2 ) return source , key or None except ValueError : return source_and_key , None
Look for a pgp key ID or ascii - armor key in the given input .
245,647
def configure_installation_source ( source_plus_key ) : if source_plus_key . startswith ( 'snap' ) : return source , key = get_source_and_pgp_key ( source_plus_key ) try : fetch_add_source ( source , key , fail_invalid = True ) except SourceConfigError as se : error_out ( str ( se ) )
Configure an installation source .
245,648
def config_value_changed ( option ) : hook_data = unitdata . HookData ( ) with hook_data ( ) : db = unitdata . kv ( ) current = config ( option ) saved = db . get ( option ) db . set ( option , current ) if saved is None : return False return current != saved
Determine if config value changed since last call to this function .
245,649
def save_script_rc ( script_path = "scripts/scriptrc" , ** env_vars ) : juju_rc_path = "%s/%s" % ( charm_dir ( ) , script_path ) if not os . path . exists ( os . path . dirname ( juju_rc_path ) ) : os . mkdir ( os . path . dirname ( juju_rc_path ) ) with open ( juju_rc_path , 'wt' ) as rc_script : rc_script . write ( "#!/bin/bash\n" ) [ rc_script . write ( 'export %s=%s\n' % ( u , p ) ) for u , p in six . iteritems ( env_vars ) if u != "script_path" ]
Write an rc file in the charm - delivered directory containing exported environment variables provided by env_vars . Any charm scripts run outside the juju hook environment can source this scriptrc to obtain updated config information necessary to perform health checks or service changes .
245,650
def openstack_upgrade_available ( package ) : import apt_pkg as apt src = config ( 'openstack-origin' ) cur_vers = get_os_version_package ( package ) if not cur_vers : return False if "swift" in package : codename = get_os_codename_install_source ( src ) avail_vers = get_os_version_codename_swift ( codename ) else : avail_vers = get_os_version_install_source ( src ) apt . init ( ) return apt . version_compare ( avail_vers , cur_vers ) >= 1
Determines if an OpenStack upgrade is available from installation source based on version of installed package .
245,651
def ensure_block_device ( block_device ) : _none = [ 'None' , 'none' , None ] if ( block_device in _none ) : error_out ( 'prepare_storage(): Missing required input: block_device=%s.' % block_device ) if block_device . startswith ( '/dev/' ) : bdev = block_device elif block_device . startswith ( '/' ) : _bd = block_device . split ( '|' ) if len ( _bd ) == 2 : bdev , size = _bd else : bdev = block_device size = DEFAULT_LOOPBACK_SIZE bdev = ensure_loopback_device ( bdev , size ) else : bdev = '/dev/%s' % block_device if not is_block_device ( bdev ) : error_out ( 'Failed to locate valid block device at %s' % bdev ) return bdev
Confirm block_device create as loopback if necessary .
245,652
def os_requires_version ( ostack_release , pkg ) : def wrap ( f ) : @ wraps ( f ) def wrapped_f ( * args ) : if os_release ( pkg ) < ostack_release : raise Exception ( "This hook is not supported on releases" " before %s" % ostack_release ) f ( * args ) return wrapped_f return wrap
Decorator for hook to specify minimum supported release
245,653
def os_workload_status ( configs , required_interfaces , charm_func = None ) : def wrap ( f ) : @ wraps ( f ) def wrapped_f ( * args , ** kwargs ) : f ( * args , ** kwargs ) set_os_workload_status ( configs , required_interfaces , charm_func ) return wrapped_f return wrap
Decorator to set workload status based on complete contexts
245,654
def set_os_workload_status ( configs , required_interfaces , charm_func = None , services = None , ports = None ) : state , message = _determine_os_workload_status ( configs , required_interfaces , charm_func , services , ports ) status_set ( state , message )
Set the state of the workload status for the charm .
245,655
def _determine_os_workload_status ( configs , required_interfaces , charm_func = None , services = None , ports = None ) : state , message = _ows_check_if_paused ( services , ports ) if state is None : state , message = _ows_check_generic_interfaces ( configs , required_interfaces ) if state != 'maintenance' and charm_func : state , message = _ows_check_charm_func ( state , message , lambda : charm_func ( configs ) ) if state is None : state , message = _ows_check_services_running ( services , ports ) if state is None : state = 'active' message = "Unit is ready" juju_log ( message , 'INFO' ) return state , message
Determine the state of the workload status for the charm .
245,656
def _ows_check_generic_interfaces ( configs , required_interfaces ) : incomplete_rel_data = incomplete_relation_data ( configs , required_interfaces ) state = None message = None missing_relations = set ( ) incomplete_relations = set ( ) for generic_interface , relations_states in incomplete_rel_data . items ( ) : related_interface = None missing_data = { } for interface , relation_state in relations_states . items ( ) : if relation_state . get ( 'related' ) : related_interface = interface missing_data = relation_state . get ( 'missing_data' ) break if not related_interface : juju_log ( "{} relation is missing and must be related for " "functionality. " . format ( generic_interface ) , 'WARN' ) state = 'blocked' missing_relations . add ( generic_interface ) else : if not missing_data : _hook_name = hook_name ( ) if ( ( 'departed' in _hook_name or 'broken' in _hook_name ) and related_interface in _hook_name ) : state = 'blocked' missing_relations . add ( generic_interface ) juju_log ( "{} relation's interface, {}, " "relationship is departed or broken " "and is required for functionality." "" . format ( generic_interface , related_interface ) , "WARN" ) else : juju_log ( "{} relations's interface, {}, is related but has" " no units in the relation." "" . format ( generic_interface , related_interface ) , "INFO" ) else : juju_log ( "{} relation's interface, {}, is related awaiting " "the following data from the relationship: {}. " "" . format ( generic_interface , related_interface , ", " . join ( missing_data ) ) , "INFO" ) if state != 'blocked' : state = 'waiting' if generic_interface not in missing_relations : incomplete_relations . add ( generic_interface ) if missing_relations : message = "Missing relations: {}" . format ( ", " . join ( missing_relations ) ) if incomplete_relations : message += "; incomplete relations: {}" "" . format ( ", " . join ( incomplete_relations ) ) state = 'blocked' elif incomplete_relations : message = "Incomplete relations: {}" "" . format ( ", " . join ( incomplete_relations ) ) state = 'waiting' return state , message
Check the complete contexts to determine the workload status .
245,657
def _ows_check_services_running ( services , ports ) : messages = [ ] state = None if services is not None : services = _extract_services_list_helper ( services ) services_running , running = _check_running_services ( services ) if not all ( running ) : messages . append ( "Services not running that should be: {}" . format ( ", " . join ( _filter_tuples ( services_running , False ) ) ) ) state = 'blocked' map_not_open , ports_open = ( _check_listening_on_services_ports ( services ) ) if not all ( ports_open ) : message_parts = { service : ", " . join ( [ str ( v ) for v in open_ports ] ) for service , open_ports in map_not_open . items ( ) } message = ", " . join ( [ "{}: [{}]" . format ( s , sp ) for s , sp in message_parts . items ( ) ] ) messages . append ( "Services with ports not open that should be: {}" . format ( message ) ) state = 'blocked' if ports is not None : ports_open , ports_open_bools = _check_listening_on_ports_list ( ports ) if not all ( ports_open_bools ) : messages . append ( "Ports which should be open, but are not: {}" . format ( ", " . join ( [ str ( p ) for p , v in ports_open if not v ] ) ) ) state = 'blocked' if state is not None : message = "; " . join ( messages ) return state , message return None , None
Check that the services that should be running are actually running and that any ports specified are being listened to .
245,658
def _check_listening_on_ports_list ( ports ) : ports_open = [ port_has_listener ( '0.0.0.0' , p ) for p in ports ] return zip ( ports , ports_open ) , ports_open
Check that the ports list given are being listened to
245,659
def workload_state_compare ( current_workload_state , workload_state ) : hierarchy = { 'unknown' : - 1 , 'active' : 0 , 'maintenance' : 1 , 'waiting' : 2 , 'blocked' : 3 , } if hierarchy . get ( workload_state ) is None : workload_state = 'unknown' if hierarchy . get ( current_workload_state ) is None : current_workload_state = 'unknown' if hierarchy . get ( current_workload_state ) > hierarchy . get ( workload_state ) : return current_workload_state else : return workload_state
Return highest priority of two states
245,660
def incomplete_relation_data ( configs , required_interfaces ) : complete_ctxts = configs . complete_contexts ( ) incomplete_relations = [ svc_type for svc_type , interfaces in required_interfaces . items ( ) if not set ( interfaces ) . intersection ( complete_ctxts ) ] return { i : configs . get_incomplete_context_data ( required_interfaces [ i ] ) for i in incomplete_relations }
Check complete contexts against required_interfaces Return dictionary of incomplete relation data .
245,661
def do_action_openstack_upgrade ( package , upgrade_callback , configs ) : ret = False if openstack_upgrade_available ( package ) : if config ( 'action-managed-upgrade' ) : juju_log ( 'Upgrading OpenStack release' ) try : upgrade_callback ( configs = configs ) action_set ( { 'outcome' : 'success, upgrade completed.' } ) ret = True except Exception : action_set ( { 'outcome' : 'upgrade failed, see traceback.' } ) action_set ( { 'traceback' : traceback . format_exc ( ) } ) action_fail ( 'do_openstack_upgrade resulted in an ' 'unexpected error' ) else : action_set ( { 'outcome' : 'action-managed-upgrade config is ' 'False, skipped upgrade.' } ) else : action_set ( { 'outcome' : 'no upgrade available.' } ) return ret
Perform action - managed OpenStack upgrade .
245,662
def manage_payload_services ( action , services = None , charm_func = None ) : actions = { 'pause' : service_pause , 'resume' : service_resume , 'start' : service_start , 'stop' : service_stop } action = action . lower ( ) if action not in actions . keys ( ) : raise RuntimeError ( "action: {} must be one of: {}" . format ( action , ', ' . join ( actions . keys ( ) ) ) ) services = _extract_services_list_helper ( services ) messages = [ ] success = True if services : for service in services . keys ( ) : rc = actions [ action ] ( service ) if not rc : success = False messages . append ( "{} didn't {} cleanly." . format ( service , action ) ) if charm_func : try : message = charm_func ( ) if message : messages . append ( message ) except Exception as e : success = False messages . append ( str ( e ) ) return success , messages
Run an action against all services .
245,663
def pausable_restart_on_change ( restart_map , stopstart = False , restart_functions = None ) : def wrap ( f ) : __restart_map_cache = { 'cache' : None } @ functools . wraps ( f ) def wrapped_f ( * args , ** kwargs ) : if is_unit_paused_set ( ) : return f ( * args , ** kwargs ) if __restart_map_cache [ 'cache' ] is None : __restart_map_cache [ 'cache' ] = restart_map ( ) if callable ( restart_map ) else restart_map return restart_on_change_helper ( ( lambda : f ( * args , ** kwargs ) ) , __restart_map_cache [ 'cache' ] , stopstart , restart_functions ) return wrapped_f return wrap
A restart_on_change decorator that checks to see if the unit is paused . If it is paused then the decorated function doesn t fire .
245,664
def ordered ( orderme ) : if not isinstance ( orderme , dict ) : raise ValueError ( 'argument must be a dict type' ) result = OrderedDict ( ) for k , v in sorted ( six . iteritems ( orderme ) , key = lambda x : x [ 0 ] ) : if isinstance ( v , dict ) : result [ k ] = ordered ( v ) else : result [ k ] = v return result
Converts the provided dictionary into a collections . OrderedDict .
245,665
def config_flags_parser ( config_flags ) : colon = config_flags . find ( ':' ) equals = config_flags . find ( '=' ) if colon > 0 : if colon < equals or equals < 0 : return ordered ( yaml . safe_load ( config_flags ) ) if config_flags . find ( '==' ) >= 0 : juju_log ( "config_flags is not in expected format (key=value)" , level = ERROR ) raise OSContextError post_strippers = ' ,' split = config_flags . strip ( ' =' ) . split ( '=' ) limit = len ( split ) flags = OrderedDict ( ) for i in range ( 0 , limit - 1 ) : current = split [ i ] next = split [ i + 1 ] vindex = next . rfind ( ',' ) if ( i == limit - 2 ) or ( vindex < 0 ) : value = next else : value = next [ : vindex ] if i == 0 : key = current else : index = current . rfind ( ',' ) if index < 0 : juju_log ( "Invalid config value(s) at index %s" % ( i ) , level = ERROR ) raise OSContextError key = current [ index + 1 : ] flags [ key . strip ( post_strippers ) ] = value . rstrip ( post_strippers ) return flags
Parses config flags string into dict .
245,666
def os_application_version_set ( package ) : application_version = get_upstream_version ( package ) if not application_version : application_version_set ( os_release ( package ) ) else : application_version_set ( application_version )
Set version of application for Juju 2 . 0 and later
245,667
def enable_memcache ( source = None , release = None , package = None ) : _release = None if release : _release = release else : _release = os_release ( package , base = 'icehouse' ) if not _release : _release = get_os_codename_install_source ( source ) return CompareOpenStackReleases ( _release ) >= 'mitaka'
Determine if memcache should be enabled on the local unit
245,668
def token_cache_pkgs ( source = None , release = None ) : packages = [ ] if enable_memcache ( source = source , release = release ) : packages . extend ( [ 'memcached' , 'python-memcache' ] ) return packages
Determine additional packages needed for token caching
245,669
def snap_install_requested ( ) : origin = config ( 'openstack-origin' ) or "" if not origin . startswith ( 'snap:' ) : return False _src = origin [ 5 : ] if '/' in _src : channel = _src . split ( '/' ) [ 1 ] else : channel = 'stable' return valid_snap_channel ( channel )
Determine if installing from snaps
245,670
def get_snaps_install_info_from_origin ( snaps , src , mode = 'classic' ) : if not src . startswith ( 'snap:' ) : juju_log ( "Snap source is not a snap origin" , 'WARN' ) return { } _src = src [ 5 : ] channel = '--channel={}' . format ( _src ) return { snap : { 'channel' : channel , 'mode' : mode } for snap in snaps }
Generate a dictionary of snap install information from origin
245,671
def install_os_snaps ( snaps , refresh = False ) : def _ensure_flag ( flag ) : if flag . startswith ( '--' ) : return flag return '--{}' . format ( flag ) if refresh : for snap in snaps . keys ( ) : snap_refresh ( snap , _ensure_flag ( snaps [ snap ] [ 'channel' ] ) , _ensure_flag ( snaps [ snap ] [ 'mode' ] ) ) else : for snap in snaps . keys ( ) : snap_install ( snap , _ensure_flag ( snaps [ snap ] [ 'channel' ] ) , _ensure_flag ( snaps [ snap ] [ 'mode' ] ) )
Install OpenStack snaps from channel and with mode
245,672
def series_upgrade_complete ( resume_unit_helper = None , configs = None ) : clear_unit_paused ( ) clear_unit_upgrading ( ) if configs : configs . write_all ( ) if resume_unit_helper : resume_unit_helper ( configs )
Run common series upgrade complete tasks .
245,673
def get_certificate_request ( json_encode = True ) : req = CertRequest ( json_encode = json_encode ) req . add_hostname_cn ( ) for net_type in [ INTERNAL , ADMIN , PUBLIC ] : net_config = config ( ADDRESS_MAP [ net_type ] [ 'override' ] ) try : net_addr = resolve_address ( endpoint_type = net_type ) ip = network_get_primary_address ( ADDRESS_MAP [ net_type ] [ 'binding' ] ) addresses = [ net_addr , ip ] vip = get_vip_in_network ( resolve_network_cidr ( ip ) ) if vip : addresses . append ( vip ) if net_config : req . add_entry ( net_type , net_config , addresses ) else : req . add_hostname_cn_ip ( addresses ) except NoNetworkBinding : log ( "Skipping request for certificate for ip in {} space, no " "local address found" . format ( net_type ) , WARNING ) return req . get_request ( )
Generate a certificatee requests based on the network confioguration
245,674
def create_ip_cert_links ( ssl_dir , custom_hostname_link = None ) : hostname = get_hostname ( unit_get ( 'private-address' ) ) hostname_cert = os . path . join ( ssl_dir , 'cert_{}' . format ( hostname ) ) hostname_key = os . path . join ( ssl_dir , 'key_{}' . format ( hostname ) ) for net_type in [ INTERNAL , ADMIN , PUBLIC ] : try : addr = resolve_address ( endpoint_type = net_type ) cert = os . path . join ( ssl_dir , 'cert_{}' . format ( addr ) ) key = os . path . join ( ssl_dir , 'key_{}' . format ( addr ) ) if os . path . isfile ( hostname_cert ) and not os . path . isfile ( cert ) : os . symlink ( hostname_cert , cert ) os . symlink ( hostname_key , key ) except NoNetworkBinding : log ( "Skipping creating cert symlink for ip in {} space, no " "local address found" . format ( net_type ) , WARNING ) if custom_hostname_link : custom_cert = os . path . join ( ssl_dir , 'cert_{}' . format ( custom_hostname_link ) ) custom_key = os . path . join ( ssl_dir , 'key_{}' . format ( custom_hostname_link ) ) if os . path . isfile ( hostname_cert ) and not os . path . isfile ( custom_cert ) : os . symlink ( hostname_cert , custom_cert ) os . symlink ( hostname_key , custom_key )
Create symlinks for SAN records
245,675
def install_certs ( ssl_dir , certs , chain = None , user = 'root' , group = 'root' ) : for cn , bundle in certs . items ( ) : cert_filename = 'cert_{}' . format ( cn ) key_filename = 'key_{}' . format ( cn ) cert_data = bundle [ 'cert' ] if chain : cert_data = cert_data + os . linesep + chain write_file ( path = os . path . join ( ssl_dir , cert_filename ) , owner = user , group = group , content = cert_data , perms = 0o640 ) write_file ( path = os . path . join ( ssl_dir , key_filename ) , owner = user , group = group , content = bundle [ 'key' ] , perms = 0o640 )
Install the certs passed into the ssl dir and append the chain if provided .
245,676
def process_certificates ( service_name , relation_id , unit , custom_hostname_link = None , user = 'root' , group = 'root' ) : data = relation_get ( rid = relation_id , unit = unit ) ssl_dir = os . path . join ( '/etc/apache2/ssl/' , service_name ) mkdir ( path = ssl_dir ) name = local_unit ( ) . replace ( '/' , '_' ) certs = data . get ( '{}.processed_requests' . format ( name ) ) chain = data . get ( 'chain' ) ca = data . get ( 'ca' ) if certs : certs = json . loads ( certs ) install_ca_cert ( ca . encode ( ) ) install_certs ( ssl_dir , certs , chain , user = user , group = group ) create_ip_cert_links ( ssl_dir , custom_hostname_link = custom_hostname_link ) return True return False
Process the certificates supplied down the relation
245,677
def get_requests_for_local_unit ( relation_name = None ) : local_name = local_unit ( ) . replace ( '/' , '_' ) raw_certs_key = '{}.processed_requests' . format ( local_name ) relation_name = relation_name or 'certificates' bundles = [ ] for rid in relation_ids ( relation_name ) : for unit in related_units ( rid ) : data = relation_get ( rid = rid , unit = unit ) if data . get ( raw_certs_key ) : bundles . append ( { 'ca' : data [ 'ca' ] , 'chain' : data . get ( 'chain' ) , 'certs' : json . loads ( data [ raw_certs_key ] ) } ) return bundles
Extract any certificates data targeted at this unit down relation_name .
245,678
def get_bundle_for_cn ( cn , relation_name = None ) : entries = get_requests_for_local_unit ( relation_name ) cert_bundle = { } for entry in entries : for _cn , bundle in entry [ 'certs' ] . items ( ) : if _cn == cn : cert_bundle = { 'cert' : bundle [ 'cert' ] , 'key' : bundle [ 'key' ] , 'chain' : entry [ 'chain' ] , 'ca' : entry [ 'ca' ] } break if cert_bundle : break return cert_bundle
Extract certificates for the given cn .
245,679
def add_entry ( self , net_type , cn , addresses ) : self . entries . append ( { 'cn' : cn , 'addresses' : addresses } )
Add a request to the batch
245,680
def add_hostname_cn ( self ) : ip = unit_get ( 'private-address' ) addresses = [ ip ] vip = get_vip_in_network ( resolve_network_cidr ( ip ) ) if vip : addresses . append ( vip ) self . hostname_entry = { 'cn' : get_hostname ( ip ) , 'addresses' : addresses }
Add a request for the hostname of the machine
245,681
def add_hostname_cn_ip ( self , addresses ) : for addr in addresses : if addr not in self . hostname_entry [ 'addresses' ] : self . hostname_entry [ 'addresses' ] . append ( addr )
Add an address to the SAN list for the hostname request
245,682
def get_request ( self ) : if self . hostname_entry : self . entries . append ( self . hostname_entry ) request = { } for entry in self . entries : sans = sorted ( list ( set ( entry [ 'addresses' ] ) ) ) request [ entry [ 'cn' ] ] = { 'sans' : sans } if self . json_encode : return { 'cert_requests' : json . dumps ( request , sort_keys = True ) } else : return { 'cert_requests' : request }
Generate request from the batched up entries
245,683
def get_audits ( ) : audits = [ ] settings = utils . get_settings ( 'os' ) audits . append ( SysctlConf ( ) ) audits . append ( FilePermissionAudit ( '/etc/sysctl.conf' , user = 'root' , group = 'root' , mode = 0o0440 ) ) if not settings [ 'security' ] [ 'kernel_enable_module_loading' ] : audits . append ( ModulesTemplate ( ) ) return audits
Get OS hardening sysctl audits .
245,684
def _stat ( file ) : out = subprocess . check_output ( [ 'stat' , '-c' , '%U %G %a' , file ] ) . decode ( 'utf-8' ) return Ownership ( * out . strip ( ) . split ( ' ' ) )
Get the Ownership information from a file .
245,685
def _config_ini ( path ) : conf = configparser . ConfigParser ( ) conf . read ( path ) return dict ( conf )
Parse an ini file
245,686
def _validate_file_mode ( mode , file_name , optional = False ) : try : ownership = _stat ( file_name ) except subprocess . CalledProcessError as e : print ( "Error reading file: {}" . format ( e ) ) if not optional : assert False , "Specified file does not exist: {}" . format ( file_name ) assert mode == ownership . mode , "{} has an incorrect mode: {} should be {}" . format ( file_name , ownership . mode , mode ) print ( "Validate mode of {}: PASS" . format ( file_name ) )
Validate that a specified file has the specified permissions .
245,687
def _config_section ( config , section ) : path = os . path . join ( config . get ( 'config_path' ) , config . get ( 'config_file' ) ) conf = _config_ini ( path ) return conf . get ( section )
Read the configuration file and return a section .
245,688
def validate_file_permissions ( config ) : files = config . get ( 'files' , { } ) for file_name , options in files . items ( ) : for key in options . keys ( ) : if key not in [ "owner" , "group" , "mode" ] : raise RuntimeError ( "Invalid ownership configuration: {}" . format ( key ) ) mode = options . get ( 'mode' , config . get ( 'permissions' , '600' ) ) optional = options . get ( 'optional' , config . get ( 'optional' , 'False' ) ) if '*' in file_name : for file in glob . glob ( file_name ) : if file not in files . keys ( ) : if os . path . isfile ( file ) : _validate_file_mode ( mode , file , optional ) else : if os . path . isfile ( file_name ) : _validate_file_mode ( mode , file_name , optional )
Verify that permissions on configuration files are secure enough .
245,689
def validate_uses_tls_for_keystone ( audit_options ) : section = _config_section ( audit_options , 'keystone_authtoken' ) assert section is not None , "Missing section 'keystone_authtoken'" assert not section . get ( 'insecure' ) and "https://" in section . get ( "auth_uri" ) , "TLS is not used for Keystone"
Verify that TLS is used to communicate with Keystone .
245,690
def validate_uses_tls_for_glance ( audit_options ) : section = _config_section ( audit_options , 'glance' ) assert section is not None , "Missing section 'glance'" assert not section . get ( 'insecure' ) and "https://" in section . get ( "api_servers" ) , "TLS is not used for Glance"
Verify that TLS is used to communicate with Glance .
245,691
def is_ready ( self ) : ready = len ( self . get ( self . name , [ ] ) ) > 0 if not ready : hookenv . log ( 'Incomplete relation: {}' . format ( self . __class__ . __name__ ) , hookenv . DEBUG ) return ready
Returns True if all of the required_keys are available from any units .
245,692
def _is_ready ( self , unit_data ) : return set ( unit_data . keys ( ) ) . issuperset ( set ( self . required_keys ) )
Helper method that tests a set of relation data and returns True if all of the required_keys are present .
245,693
def service_restart ( service_name ) : if host . service_available ( service_name ) : if host . service_running ( service_name ) : host . service_restart ( service_name ) else : host . service_start ( service_name )
Wrapper around host . service_restart to prevent spurious unknown service messages in the logs .
245,694
def manage ( self ) : hookenv . _run_atstart ( ) try : hook_name = hookenv . hook_name ( ) if hook_name == 'stop' : self . stop_services ( ) else : self . reconfigure_services ( ) self . provide_data ( ) except SystemExit as x : if x . code is None or x . code == 0 : hookenv . _run_atexit ( ) hookenv . _run_atexit ( )
Handle the current hook by doing The Right Thing with the registered services .
245,695
def provide_data ( self ) : for service_name , service in self . services . items ( ) : service_ready = self . is_ready ( service_name ) for provider in service . get ( 'provided_data' , [ ] ) : for relid in hookenv . relation_ids ( provider . name ) : units = hookenv . related_units ( relid ) if not units : continue remote_service = units [ 0 ] . split ( '/' ) [ 0 ] argspec = getargspec ( provider . provide_data ) if len ( argspec . args ) > 1 : data = provider . provide_data ( remote_service , service_ready ) else : data = provider . provide_data ( ) if data : hookenv . relation_set ( relid , data )
Set the relation data for each provider in the provided_data list .
245,696
def reconfigure_services ( self , * service_names ) : for service_name in service_names or self . services . keys ( ) : if self . is_ready ( service_name ) : self . fire_event ( 'data_ready' , service_name ) self . fire_event ( 'start' , service_name , default = [ service_restart , manage_ports ] ) self . save_ready ( service_name ) else : if self . was_ready ( service_name ) : self . fire_event ( 'data_lost' , service_name ) self . fire_event ( 'stop' , service_name , default = [ manage_ports , service_stop ] ) self . save_lost ( service_name )
Update all files for one or more registered services and if ready optionally restart them .
245,697
def stop_services ( self , * service_names ) : for service_name in service_names or self . services . keys ( ) : self . fire_event ( 'stop' , service_name , default = [ manage_ports , service_stop ] )
Stop one or more registered services by name .
245,698
def get_service ( self , service_name ) : service = self . services . get ( service_name ) if not service : raise KeyError ( 'Service not registered: %s' % service_name ) return service
Given the name of a registered service return its service definition .
245,699
def fire_event ( self , event_name , service_name , default = None ) : service = self . get_service ( service_name ) callbacks = service . get ( event_name , default ) if not callbacks : return if not isinstance ( callbacks , Iterable ) : callbacks = [ callbacks ] for callback in callbacks : if isinstance ( callback , ManagerCallback ) : callback ( self , service_name , event_name ) else : callback ( service_name )
Fire a data_ready data_lost start or stop event on a given service .