text
stringlengths 75
104k
|
---|
def avail_images(kwargs=None, call=None):
'''
Return a dict of all available VM images on the cloud provider.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
if 'owner' in kwargs:
owner = kwargs['owner']
else:
provider = get_configured_provider()
owner = config.get_cloud_config_value(
'owner', provider, __opts__, default='amazon'
)
ret = {}
params = {'Action': 'DescribeImages',
'Owner': owner}
images = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for image in images:
ret[image['imageId']] = image
return ret |
def ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
ret = config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
if ret not in ('public_ips', 'private_ips'):
log.warning(
'Invalid ssh_interface: %s. '
'Allowed options are ("public_ips", "private_ips"). '
'Defaulting to "public_ips".', ret
)
ret = 'public_ips'
return ret |
def get_ssh_gateway_config(vm_):
'''
Return the ssh_gateway configuration.
'''
ssh_gateway = config.get_cloud_config_value(
'ssh_gateway', vm_, __opts__, default=None,
search_global=False
)
# Check to see if a SSH Gateway will be used.
if not isinstance(ssh_gateway, six.string_types):
return None
# Create dictionary of configuration items
# ssh_gateway
ssh_gateway_config = {'ssh_gateway': ssh_gateway}
# ssh_gateway_port
ssh_gateway_config['ssh_gateway_port'] = config.get_cloud_config_value(
'ssh_gateway_port', vm_, __opts__, default=None,
search_global=False
)
# ssh_gateway_username
ssh_gateway_config['ssh_gateway_user'] = config.get_cloud_config_value(
'ssh_gateway_username', vm_, __opts__, default=None,
search_global=False
)
# ssh_gateway_private_key
ssh_gateway_config['ssh_gateway_key'] = config.get_cloud_config_value(
'ssh_gateway_private_key', vm_, __opts__, default=None,
search_global=False
)
# ssh_gateway_password
ssh_gateway_config['ssh_gateway_password'] = config.get_cloud_config_value(
'ssh_gateway_password', vm_, __opts__, default=None,
search_global=False
)
# ssh_gateway_command
ssh_gateway_config['ssh_gateway_command'] = config.get_cloud_config_value(
'ssh_gateway_command', vm_, __opts__, default=None,
search_global=False
)
# Check if private key exists
key_filename = ssh_gateway_config['ssh_gateway_key']
if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError(
'The defined ssh_gateway_private_key \'{0}\' does not exist'
.format(key_filename)
)
elif (
key_filename is None and
not ssh_gateway_config['ssh_gateway_password']
):
raise SaltCloudConfigError(
'No authentication method. Please define: '
' ssh_gateway_password or ssh_gateway_private_key'
)
return ssh_gateway_config |
def avail_locations(call=None):
'''
List all available locations
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
ret = {}
params = {'Action': 'DescribeRegions'}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for region in result:
ret[region['regionName']] = {
'name': region['regionName'],
'endpoint': region['regionEndpoint'],
}
return ret |
def get_availability_zone(vm_):
'''
Return the availability zone to use
'''
avz = config.get_cloud_config_value(
'availability_zone', vm_, __opts__, search_global=False
)
if avz is None:
return None
zones = _list_availability_zones(vm_)
# Validate user-specified AZ
if avz not in zones:
raise SaltCloudException(
'The specified availability zone isn\'t valid in this region: '
'{0}\n'.format(
avz
)
)
# check specified AZ is available
elif zones[avz] != 'available':
raise SaltCloudException(
'The specified availability zone isn\'t currently available: '
'{0}\n'.format(
avz
)
)
return avz |
def get_imageid(vm_):
'''
Returns the ImageId to use
'''
image = config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
)
if image.startswith('ami-'):
return image
# a poor man's cache
if not hasattr(get_imageid, 'images'):
get_imageid.images = {}
elif image in get_imageid.images:
return get_imageid.images[image]
params = {'Action': 'DescribeImages',
'Filter.0.Name': 'name',
'Filter.0.Value.0': image}
# Query AWS, sort by 'creationDate' and get the last imageId
_t = lambda x: datetime.datetime.strptime(x['creationDate'], '%Y-%m-%dT%H:%M:%S.%fZ')
image_id = sorted(aws.query(params, location=get_location(),
provider=get_provider(), opts=__opts__, sigver='4'),
lambda i, j: salt.utils.compat.cmp(_t(i), _t(j))
)[-1]['imageId']
get_imageid.images[image] = image_id
return image_id |
def _get_subnetname_id(subnetname):
'''
Returns the SubnetId of a SubnetName to use
'''
params = {'Action': 'DescribeSubnets'}
for subnet in aws.query(params, location=get_location(),
provider=get_provider(), opts=__opts__, sigver='4'):
tags = subnet.get('tagSet', {}).get('item', {})
if not isinstance(tags, list):
tags = [tags]
for tag in tags:
if tag['key'] == 'Name' and tag['value'] == subnetname:
log.debug(
'AWS Subnet ID of %s is %s',
subnetname, subnet['subnetId']
)
return subnet['subnetId']
return None |
def get_subnetid(vm_):
'''
Returns the SubnetId to use
'''
subnetid = config.get_cloud_config_value(
'subnetid', vm_, __opts__, search_global=False
)
if subnetid:
return subnetid
subnetname = config.get_cloud_config_value(
'subnetname', vm_, __opts__, search_global=False
)
if subnetname:
return _get_subnetname_id(subnetname)
return None |
def _get_securitygroupname_id(securitygroupname_list):
'''
Returns the SecurityGroupId of a SecurityGroupName to use
'''
securitygroupid_set = set()
if not isinstance(securitygroupname_list, list):
securitygroupname_list = [securitygroupname_list]
params = {'Action': 'DescribeSecurityGroups'}
for sg in aws.query(params, location=get_location(),
provider=get_provider(), opts=__opts__, sigver='4'):
if sg['groupName'] in securitygroupname_list:
log.debug(
'AWS SecurityGroup ID of %s is %s',
sg['groupName'], sg['groupId']
)
securitygroupid_set.add(sg['groupId'])
return list(securitygroupid_set) |
def securitygroupid(vm_):
'''
Returns the SecurityGroupId
'''
securitygroupid_set = set()
securitygroupid_list = config.get_cloud_config_value(
'securitygroupid',
vm_,
__opts__,
search_global=False
)
# If the list is None, then the set will remain empty
# If the list is already a set then calling 'set' on it is a no-op
# If the list is a string, then calling 'set' generates a one-element set
# If the list is anything else, stacktrace
if securitygroupid_list:
securitygroupid_set = securitygroupid_set.union(set(securitygroupid_list))
securitygroupname_list = config.get_cloud_config_value(
'securitygroupname', vm_, __opts__, search_global=False
)
if securitygroupname_list:
if not isinstance(securitygroupname_list, list):
securitygroupname_list = [securitygroupname_list]
params = {'Action': 'DescribeSecurityGroups'}
for sg in aws.query(params, location=get_location(),
provider=get_provider(), opts=__opts__, sigver='4'):
if sg['groupName'] in securitygroupname_list:
log.debug(
'AWS SecurityGroup ID of %s is %s',
sg['groupName'], sg['groupId']
)
securitygroupid_set.add(sg['groupId'])
return list(securitygroupid_set) |
def get_provider(vm_=None):
'''
Extract the provider name from vm
'''
if vm_ is None:
provider = __active_provider_name__ or 'ec2'
else:
provider = vm_.get('provider', 'ec2')
if ':' in provider:
prov_comps = provider.split(':')
provider = prov_comps[0]
return provider |
def _list_availability_zones(vm_=None):
'''
List all availability zones in the current region
'''
ret = {}
params = {'Action': 'DescribeAvailabilityZones',
'Filter.0.Name': 'region-name',
'Filter.0.Value.0': get_location(vm_)}
result = aws.query(params,
location=get_location(vm_),
provider=get_provider(),
opts=__opts__,
sigver='4')
for zone in result:
ret[zone['zoneName']] = zone['zoneState']
return ret |
def _request_eip(interface, vm_):
'''
Request and return Elastic IP
'''
params = {'Action': 'AllocateAddress'}
params['Domain'] = interface.setdefault('domain', 'vpc')
eips = aws.query(params,
return_root=True,
location=get_location(vm_),
provider=get_provider(),
opts=__opts__,
sigver='4')
for eip in eips:
if 'allocationId' in eip:
return eip['allocationId']
return None |
def _create_eni_if_necessary(interface, vm_):
'''
Create an Elastic Interface if necessary and return a Network Interface Specification
'''
if 'NetworkInterfaceId' in interface and interface['NetworkInterfaceId'] is not None:
return {'DeviceIndex': interface['DeviceIndex'],
'NetworkInterfaceId': interface['NetworkInterfaceId']}
params = {'Action': 'DescribeSubnets'}
subnet_query = aws.query(params,
return_root=True,
location=get_location(vm_),
provider=get_provider(),
opts=__opts__,
sigver='4')
if 'SecurityGroupId' not in interface and 'securitygroupname' in interface:
interface['SecurityGroupId'] = _get_securitygroupname_id(interface['securitygroupname'])
if 'SubnetId' not in interface and 'subnetname' in interface:
interface['SubnetId'] = _get_subnetname_id(interface['subnetname'])
subnet_id = _get_subnet_id_for_interface(subnet_query, interface)
if not subnet_id:
raise SaltCloudConfigError(
'No such subnet <{0}>'.format(interface.get('SubnetId'))
)
params = {'SubnetId': subnet_id}
for k in 'Description', 'PrivateIpAddress', 'SecondaryPrivateIpAddressCount':
if k in interface:
params[k] = interface[k]
for k in 'PrivateIpAddresses', 'SecurityGroupId':
if k in interface:
params.update(_param_from_config(k, interface[k]))
if 'AssociatePublicIpAddress' in interface:
# Associating a public address in a VPC only works when the interface is not
# created beforehand, but as a part of the machine creation request.
for k in ('DeviceIndex', 'AssociatePublicIpAddress', 'NetworkInterfaceId'):
if k in interface:
params[k] = interface[k]
params['DeleteOnTermination'] = interface.get('delete_interface_on_terminate', True)
return params
params['Action'] = 'CreateNetworkInterface'
result = aws.query(params,
return_root=True,
location=get_location(vm_),
provider=get_provider(),
opts=__opts__,
sigver='4')
eni_desc = result[1]
if not eni_desc or not eni_desc.get('networkInterfaceId'):
raise SaltCloudException('Failed to create interface: {0}'.format(result))
eni_id = eni_desc.get('networkInterfaceId')
log.debug(
'Created network interface %s inst %s',
eni_id, interface['DeviceIndex']
)
associate_public_ip = interface.get('AssociatePublicIpAddress', False)
if isinstance(associate_public_ip, six.string_types):
# Assume id of EIP as value
_associate_eip_with_interface(eni_id, associate_public_ip, vm_=vm_)
if interface.get('associate_eip'):
_associate_eip_with_interface(eni_id, interface.get('associate_eip'), vm_=vm_)
elif interface.get('allocate_new_eip'):
_new_eip = _request_eip(interface, vm_)
_associate_eip_with_interface(eni_id, _new_eip, vm_=vm_)
elif interface.get('allocate_new_eips'):
addr_list = _list_interface_private_addrs(eni_desc)
eip_list = []
for idx, addr in enumerate(addr_list):
eip_list.append(_request_eip(interface, vm_))
for idx, addr in enumerate(addr_list):
_associate_eip_with_interface(eni_id, eip_list[idx], addr, vm_=vm_)
if 'Name' in interface:
tag_params = {'Action': 'CreateTags',
'ResourceId.0': eni_id,
'Tag.0.Key': 'Name',
'Tag.0.Value': interface['Name']}
tag_response = aws.query(tag_params,
return_root=True,
location=get_location(vm_),
provider=get_provider(),
opts=__opts__,
sigver='4')
if 'error' in tag_response:
log.error('Failed to set name of interface {0}')
return {'DeviceIndex': interface['DeviceIndex'],
'NetworkInterfaceId': eni_id} |
def _list_interface_private_addrs(eni_desc):
'''
Returns a list of all of the private IP addresses attached to a
network interface. The 'primary' address will be listed first.
'''
primary = eni_desc.get('privateIpAddress')
if not primary:
return None
addresses = [primary]
lst = eni_desc.get('privateIpAddressesSet', {}).get('item', [])
if not isinstance(lst, list):
return addresses
for entry in lst:
if entry.get('primary') == 'true':
continue
if entry.get('privateIpAddress'):
addresses.append(entry.get('privateIpAddress'))
return addresses |
def _modify_eni_properties(eni_id, properties=None, vm_=None):
'''
Change properties of the interface
with id eni_id to the values in properties dict
'''
if not isinstance(properties, dict):
raise SaltCloudException(
'ENI properties must be a dictionary'
)
params = {'Action': 'ModifyNetworkInterfaceAttribute',
'NetworkInterfaceId': eni_id}
for k, v in six.iteritems(properties):
params[k] = v
result = aws.query(params,
return_root=True,
location=get_location(vm_),
provider=get_provider(),
opts=__opts__,
sigver='4')
if isinstance(result, dict) and result.get('error'):
raise SaltCloudException(
'Could not change interface <{0}> attributes <\'{1}\'>'.format(
eni_id, properties
)
)
else:
return result |
def _associate_eip_with_interface(eni_id, eip_id, private_ip=None, vm_=None):
'''
Accept the id of a network interface, and the id of an elastic ip
address, and associate the two of them, such that traffic sent to the
elastic ip address will be forwarded (NATted) to this network interface.
Optionally specify the private (10.x.x.x) IP address that traffic should
be NATted to - useful if you have multiple IP addresses assigned to an
interface.
'''
params = {'Action': 'AssociateAddress',
'NetworkInterfaceId': eni_id,
'AllocationId': eip_id}
if private_ip:
params['PrivateIpAddress'] = private_ip
result = aws.query(params,
return_root=True,
location=get_location(vm_),
provider=get_provider(),
opts=__opts__,
sigver='4')
if not result[2].get('associationId'):
raise SaltCloudException(
'Could not associate elastic ip address '
'<{0}> with network interface <{1}>'.format(
eip_id, eni_id
)
)
log.debug(
'Associated ElasticIP address %s with interface %s',
eip_id, eni_id
)
return result[2].get('associationId') |
def _param_from_config(key, data):
'''
Return EC2 API parameters based on the given config data.
Examples:
1. List of dictionaries
>>> data = [
... {'DeviceIndex': 0, 'SubnetId': 'subid0',
... 'AssociatePublicIpAddress': True},
... {'DeviceIndex': 1,
... 'SubnetId': 'subid1',
... 'PrivateIpAddress': '192.168.1.128'}
... ]
>>> _param_from_config('NetworkInterface', data)
... {'NetworkInterface.0.SubnetId': 'subid0',
... 'NetworkInterface.0.DeviceIndex': 0,
... 'NetworkInterface.1.SubnetId': 'subid1',
... 'NetworkInterface.1.PrivateIpAddress': '192.168.1.128',
... 'NetworkInterface.0.AssociatePublicIpAddress': 'true',
... 'NetworkInterface.1.DeviceIndex': 1}
2. List of nested dictionaries
>>> data = [
... {'DeviceName': '/dev/sdf',
... 'Ebs': {
... 'SnapshotId': 'dummy0',
... 'VolumeSize': 200,
... 'VolumeType': 'standard'}},
... {'DeviceName': '/dev/sdg',
... 'Ebs': {
... 'SnapshotId': 'dummy1',
... 'VolumeSize': 100,
... 'VolumeType': 'standard'}}
... ]
>>> _param_from_config('BlockDeviceMapping', data)
... {'BlockDeviceMapping.0.Ebs.VolumeType': 'standard',
... 'BlockDeviceMapping.1.Ebs.SnapshotId': 'dummy1',
... 'BlockDeviceMapping.0.Ebs.VolumeSize': 200,
... 'BlockDeviceMapping.0.Ebs.SnapshotId': 'dummy0',
... 'BlockDeviceMapping.1.Ebs.VolumeType': 'standard',
... 'BlockDeviceMapping.1.DeviceName': '/dev/sdg',
... 'BlockDeviceMapping.1.Ebs.VolumeSize': 100,
... 'BlockDeviceMapping.0.DeviceName': '/dev/sdf'}
3. Dictionary of dictionaries
>>> data = { 'Arn': 'dummyarn', 'Name': 'Tester' }
>>> _param_from_config('IamInstanceProfile', data)
{'IamInstanceProfile.Arn': 'dummyarn', 'IamInstanceProfile.Name': 'Tester'}
'''
param = {}
if isinstance(data, dict):
for k, v in six.iteritems(data):
param.update(_param_from_config('{0}.{1}'.format(key, k), v))
elif isinstance(data, list) or isinstance(data, tuple):
for idx, conf_item in enumerate(data):
prefix = '{0}.{1}'.format(key, idx)
param.update(_param_from_config(prefix, conf_item))
else:
if isinstance(data, bool):
# convert boolean True/False to 'true'/'false'
param.update({key: six.text_type(data).lower()})
else:
param.update({key: data})
return param |
def request_instance(vm_=None, call=None):
'''
Put together all of the information necessary to request an instance on EC2,
and then fire off the request the instance.
Returns data about the instance
'''
if call == 'function':
# Technically this function may be called other ways too, but it
# definitely cannot be called with --function.
raise SaltCloudSystemExit(
'The request_instance action must be called with -a or --action.'
)
location = vm_.get('location', get_location(vm_))
# do we launch a regular vm or a spot instance?
# see http://goo.gl/hYZ13f for more information on EC2 API
spot_config = get_spot_config(vm_)
if spot_config is not None:
if 'spot_price' not in spot_config:
raise SaltCloudSystemExit(
'Spot instance config for {0} requires a spot_price '
'attribute.'.format(vm_['name'])
)
params = {'Action': 'RequestSpotInstances',
'InstanceCount': '1',
'Type': spot_config['type']
if 'type' in spot_config else 'one-time',
'SpotPrice': spot_config['spot_price']}
# All of the necessary launch parameters for a VM when using
# spot instances are the same except for the prefix below
# being tacked on.
spot_prefix = 'LaunchSpecification.'
# regular EC2 instance
else:
# WARNING! EXPERIMENTAL!
# This allows more than one instance to be spun up in a single call.
# The first instance will be called by the name provided, but all other
# instances will be nameless (or more specifically, they will use the
# InstanceId as the name). This interface is expected to change, so
# use at your own risk.
min_instance = config.get_cloud_config_value(
'min_instance', vm_, __opts__, search_global=False, default=1
)
max_instance = config.get_cloud_config_value(
'max_instance', vm_, __opts__, search_global=False, default=1
)
params = {'Action': 'RunInstances',
'MinCount': min_instance,
'MaxCount': max_instance}
# Normal instances should have no prefix.
spot_prefix = ''
image_id = get_imageid(vm_)
params[spot_prefix + 'ImageId'] = image_id
userdata = None
userdata_file = config.get_cloud_config_value(
'userdata_file', vm_, __opts__, search_global=False, default=None
)
if userdata_file is None:
userdata = config.get_cloud_config_value(
'userdata', vm_, __opts__, search_global=False, default=None
)
else:
log.trace('userdata_file: %s', userdata_file)
if os.path.exists(userdata_file):
with salt.utils.files.fopen(userdata_file, 'r') as fh_:
userdata = salt.utils.stringutils.to_unicode(fh_.read())
userdata = salt.utils.cloud.userdata_template(__opts__, vm_, userdata)
if userdata is not None:
try:
params[spot_prefix + 'UserData'] = base64.b64encode(
salt.utils.stringutils.to_bytes(userdata)
)
except Exception as exc:
log.exception('Failed to encode userdata: %s', exc)
vm_size = config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
)
params[spot_prefix + 'InstanceType'] = vm_size
ex_keyname = keyname(vm_)
if ex_keyname:
params[spot_prefix + 'KeyName'] = ex_keyname
ex_securitygroup = securitygroup(vm_)
if ex_securitygroup:
if not isinstance(ex_securitygroup, list):
params[spot_prefix + 'SecurityGroup.1'] = ex_securitygroup
else:
for counter, sg_ in enumerate(ex_securitygroup):
params[spot_prefix + 'SecurityGroup.{0}'.format(counter)] = sg_
ex_iam_profile = iam_profile(vm_)
if ex_iam_profile:
try:
if ex_iam_profile.startswith('arn:aws:iam:'):
params[
spot_prefix + 'IamInstanceProfile.Arn'
] = ex_iam_profile
else:
params[
spot_prefix + 'IamInstanceProfile.Name'
] = ex_iam_profile
except AttributeError:
raise SaltCloudConfigError(
'\'iam_profile\' should be a string value.'
)
az_ = get_availability_zone(vm_)
if az_ is not None:
params[spot_prefix + 'Placement.AvailabilityZone'] = az_
tenancy_ = get_tenancy(vm_)
if tenancy_ is not None:
if spot_config is not None:
raise SaltCloudConfigError(
'Spot instance config for {0} does not support '
'specifying tenancy.'.format(vm_['name'])
)
params['Placement.Tenancy'] = tenancy_
subnetid_ = get_subnetid(vm_)
if subnetid_ is not None:
params[spot_prefix + 'SubnetId'] = subnetid_
ex_securitygroupid = securitygroupid(vm_)
if ex_securitygroupid:
if not isinstance(ex_securitygroupid, list):
params[spot_prefix + 'SecurityGroupId.1'] = ex_securitygroupid
else:
for counter, sg_ in enumerate(ex_securitygroupid):
params[
spot_prefix + 'SecurityGroupId.{0}'.format(counter)
] = sg_
placementgroup_ = get_placementgroup(vm_)
if placementgroup_ is not None:
params[spot_prefix + 'Placement.GroupName'] = placementgroup_
blockdevicemappings_holder = block_device_mappings(vm_)
if blockdevicemappings_holder:
for _bd in blockdevicemappings_holder:
if 'tag' in _bd:
_bd.pop('tag')
ex_blockdevicemappings = blockdevicemappings_holder
if ex_blockdevicemappings:
params.update(_param_from_config(spot_prefix + 'BlockDeviceMapping',
ex_blockdevicemappings))
network_interfaces = config.get_cloud_config_value(
'network_interfaces',
vm_,
__opts__,
search_global=False
)
if network_interfaces:
eni_devices = []
for interface in network_interfaces:
log.debug('Create network interface: %s', interface)
_new_eni = _create_eni_if_necessary(interface, vm_)
eni_devices.append(_new_eni)
params.update(_param_from_config(spot_prefix + 'NetworkInterface',
eni_devices))
set_ebs_optimized = config.get_cloud_config_value(
'ebs_optimized', vm_, __opts__, search_global=False
)
if set_ebs_optimized is not None:
if not isinstance(set_ebs_optimized, bool):
raise SaltCloudConfigError(
'\'ebs_optimized\' should be a boolean value.'
)
params[spot_prefix + 'EbsOptimized'] = set_ebs_optimized
set_del_root_vol_on_destroy = config.get_cloud_config_value(
'del_root_vol_on_destroy', vm_, __opts__, search_global=False
)
set_termination_protection = config.get_cloud_config_value(
'termination_protection', vm_, __opts__, search_global=False
)
if set_termination_protection is not None:
if not isinstance(set_termination_protection, bool):
raise SaltCloudConfigError(
'\'termination_protection\' should be a boolean value.'
)
params.update(_param_from_config(spot_prefix + 'DisableApiTermination',
set_termination_protection))
if set_del_root_vol_on_destroy and not isinstance(set_del_root_vol_on_destroy, bool):
raise SaltCloudConfigError(
'\'del_root_vol_on_destroy\' should be a boolean value.'
)
vm_['set_del_root_vol_on_destroy'] = set_del_root_vol_on_destroy
if set_del_root_vol_on_destroy:
# first make sure to look up the root device name
# as Ubuntu and CentOS (and most likely other OSs)
# use different device identifiers
log.info('Attempting to look up root device name for image id %s on '
'VM %s', image_id, vm_['name'])
rd_params = {
'Action': 'DescribeImages',
'ImageId.1': image_id
}
try:
rd_data = aws.query(rd_params,
location=get_location(vm_),
provider=get_provider(),
opts=__opts__,
sigver='4')
if 'error' in rd_data:
return rd_data['error']
log.debug('EC2 Response: \'%s\'', rd_data)
except Exception as exc:
log.error(
'Error getting root device name for image id %s for '
'VM %s: \n%s', image_id, vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
raise
# make sure we have a response
if not rd_data:
err_msg = 'There was an error querying EC2 for the root device ' \
'of image id {0}. Empty response.'.format(image_id)
raise SaltCloudSystemExit(err_msg)
# pull the root device name from the result and use it when
# launching the new VM
rd_name = None
rd_type = None
if 'blockDeviceMapping' in rd_data[0]:
# Some ami instances do not have a root volume. Ignore such cases
if rd_data[0]['blockDeviceMapping'] is not None:
item = rd_data[0]['blockDeviceMapping']['item']
if isinstance(item, list):
item = item[0]
rd_name = item['deviceName']
# Grab the volume type
rd_type = item['ebs'].get('volumeType', None)
log.info('Found root device name: %s', rd_name)
if rd_name is not None:
if ex_blockdevicemappings:
dev_list = [
dev['DeviceName'] for dev in ex_blockdevicemappings
]
else:
dev_list = []
if rd_name in dev_list:
# Device already listed, just grab the index
dev_index = dev_list.index(rd_name)
else:
dev_index = len(dev_list)
# Add the device name in since it wasn't already there
params[
'{0}BlockDeviceMapping.{1}.DeviceName'.format(
spot_prefix, dev_index
)
] = rd_name
# Set the termination value
termination_key = '{0}BlockDeviceMapping.{1}.Ebs.DeleteOnTermination'.format(spot_prefix, dev_index)
params[termination_key] = six.text_type(set_del_root_vol_on_destroy).lower()
# Use default volume type if not specified
if ex_blockdevicemappings and dev_index < len(ex_blockdevicemappings) and \
'Ebs.VolumeType' not in ex_blockdevicemappings[dev_index]:
type_key = '{0}BlockDeviceMapping.{1}.Ebs.VolumeType'.format(spot_prefix, dev_index)
params[type_key] = rd_type
set_del_all_vols_on_destroy = config.get_cloud_config_value(
'del_all_vols_on_destroy', vm_, __opts__, search_global=False, default=False
)
if set_del_all_vols_on_destroy and not isinstance(set_del_all_vols_on_destroy, bool):
raise SaltCloudConfigError(
'\'del_all_vols_on_destroy\' should be a boolean value.'
)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event'](
'requesting', params, list(params)
),
'location': location,
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
provider = get_provider(vm_)
try:
data = aws.query(params,
'instancesSet',
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if 'error' in data:
return data['error']
except Exception as exc:
log.error(
'Error creating %s on EC2 when trying to run the initial '
'deployment: \n%s', vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
raise
# if we're using spot instances, we need to wait for the spot request
# to become active before we continue
if spot_config:
sir_id = data[0]['spotInstanceRequestId']
vm_['spotRequestId'] = sir_id
def __query_spot_instance_request(sir_id, location):
params = {'Action': 'DescribeSpotInstanceRequests',
'SpotInstanceRequestId.1': sir_id}
data = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if not data:
log.error(
'There was an error while querying EC2. Empty response'
)
# Trigger a failure in the wait for spot instance method
return False
if isinstance(data, dict) and 'error' in data:
log.warning('There was an error in the query. %s', data['error'])
# Trigger a failure in the wait for spot instance method
return False
log.debug('Returned query data: %s', data)
state = data[0].get('state')
if state == 'active':
return data
if state == 'open':
# Still waiting for an active state
log.info('Spot instance status: %s', data[0]['status']['message'])
return None
if state in ['cancelled', 'failed', 'closed']:
# Request will never be active, fail
log.error('Spot instance request resulted in state \'{0}\'. '
'Nothing else we can do here.')
return False
__utils__['cloud.fire_event'](
'event',
'waiting for spot instance',
'salt/cloud/{0}/waiting_for_spot'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
try:
data = _wait_for_spot_instance(
__query_spot_instance_request,
update_args=(sir_id, location),
timeout=config.get_cloud_config_value(
'wait_for_spot_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_spot_interval', vm_, __opts__, default=30),
interval_multiplier=config.get_cloud_config_value(
'wait_for_spot_interval_multiplier',
vm_,
__opts__,
default=1),
max_failures=config.get_cloud_config_value(
'wait_for_spot_max_failures',
vm_,
__opts__,
default=10),
)
log.debug('wait_for_spot_instance data %s', data)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# Cancel the existing spot instance request
params = {'Action': 'CancelSpotInstanceRequests',
'SpotInstanceRequestId.1': sir_id}
data = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
log.debug('Canceled spot instance request %s. Data '
'returned: %s', sir_id, data)
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
return data, vm_ |
def query_instance(vm_=None, call=None):
'''
Query an instance upon creation from the EC2 API
'''
if call == 'function':
# Technically this function may be called other ways too, but it
# definitely cannot be called with --function.
raise SaltCloudSystemExit(
'The query_instance action must be called with -a or --action.'
)
instance_id = vm_['instance_id']
location = vm_.get('location', get_location(vm_))
__utils__['cloud.fire_event'](
'event',
'querying instance',
'salt/cloud/{0}/querying'.format(vm_['name']),
args={'instance_id': instance_id},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('The new VM instance_id is %s', instance_id)
params = {'Action': 'DescribeInstances',
'InstanceId.1': instance_id}
provider = get_provider(vm_)
attempts = 0
while attempts < aws.AWS_MAX_RETRIES:
data, requesturl = aws.query(params, # pylint: disable=unbalanced-tuple-unpacking
location=location,
provider=provider,
opts=__opts__,
return_url=True,
sigver='4')
log.debug('The query returned: %s', data)
if isinstance(data, dict) and 'error' in data:
log.warning(
'There was an error in the query. %s attempts '
'remaining: %s', attempts, data['error']
)
elif isinstance(data, list) and not data:
log.warning(
'Query returned an empty list. %s attempts '
'remaining.', attempts
)
else:
break
aws.sleep_exponential_backoff(attempts)
attempts += 1
continue
else:
raise SaltCloudSystemExit(
'An error occurred while creating VM: {0}'.format(data['error'])
)
def __query_ip_address(params, url): # pylint: disable=W0613
data = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if not data:
log.error(
'There was an error while querying EC2. Empty response'
)
# Trigger a failure in the wait for IP function
return False
if isinstance(data, dict) and 'error' in data:
log.warning('There was an error in the query. %s', data['error'])
# Trigger a failure in the wait for IP function
return False
log.debug('Returned query data: %s', data)
if ssh_interface(vm_) == 'public_ips':
if 'ipAddress' in data[0]['instancesSet']['item']:
return data
else:
log.error(
'Public IP not detected.'
)
if ssh_interface(vm_) == 'private_ips':
if 'privateIpAddress' in data[0]['instancesSet']['item']:
return data
else:
log.error(
'Private IP not detected.'
)
try:
data = salt.utils.cloud.wait_for_ip(
__query_ip_address,
update_args=(params, requesturl),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10),
interval_multiplier=config.get_cloud_config_value(
'wait_for_ip_interval_multiplier', vm_, __opts__, default=1),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
if 'reactor' in vm_ and vm_['reactor'] is True:
__utils__['cloud.fire_event'](
'event',
'instance queried',
'salt/cloud/{0}/query_reactor'.format(vm_['name']),
args={'data': data},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return data |
def wait_for_instance(
vm_=None,
data=None,
ip_address=None,
display_ssh_output=True,
call=None,
):
'''
Wait for an instance upon creation from the EC2 API, to become available
'''
if call == 'function':
# Technically this function may be called other ways too, but it
# definitely cannot be called with --function.
raise SaltCloudSystemExit(
'The wait_for_instance action must be called with -a or --action.'
)
if vm_ is None:
vm_ = {}
if data is None:
data = {}
ssh_gateway_config = vm_.get(
'gateway', get_ssh_gateway_config(vm_)
)
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(vm_['name']),
args={'ip_address': ip_address},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
ssh_connect_timeout = config.get_cloud_config_value(
'ssh_connect_timeout', vm_, __opts__, 900 # 15 minutes
)
ssh_port = config.get_cloud_config_value(
'ssh_port', vm_, __opts__, 22
)
if config.get_cloud_config_value('win_installer', vm_, __opts__):
username = config.get_cloud_config_value(
'win_username', vm_, __opts__, default='Administrator'
)
win_passwd = config.get_cloud_config_value(
'win_password', vm_, __opts__, default=''
)
win_deploy_auth_retries = config.get_cloud_config_value(
'win_deploy_auth_retries', vm_, __opts__, default=10
)
win_deploy_auth_retry_delay = config.get_cloud_config_value(
'win_deploy_auth_retry_delay', vm_, __opts__, default=1
)
use_winrm = config.get_cloud_config_value(
'use_winrm', vm_, __opts__, default=False
)
winrm_verify_ssl = config.get_cloud_config_value(
'winrm_verify_ssl', vm_, __opts__, default=True
)
if win_passwd and win_passwd == 'auto':
log.debug('Waiting for auto-generated Windows EC2 password')
while True:
password_data = get_password_data(
name=vm_['name'],
kwargs={
'key_file': vm_['private_key'],
},
call='action',
)
win_passwd = password_data.get('password', None)
if win_passwd is None:
log.debug(password_data)
# This wait is so high, because the password is unlikely to
# be generated for at least 4 minutes
time.sleep(60)
else:
logging_data = password_data
logging_data['password'] = 'XXX-REDACTED-XXX'
logging_data['passwordData'] = 'XXX-REDACTED-XXX'
log.debug(logging_data)
vm_['win_password'] = win_passwd
break
# SMB used whether psexec or winrm
if not salt.utils.cloud.wait_for_port(ip_address,
port=445,
timeout=ssh_connect_timeout):
raise SaltCloudSystemExit(
'Failed to connect to remote windows host'
)
# If not using winrm keep same psexec behavior
if not use_winrm:
log.debug('Trying to authenticate via SMB using psexec')
if not salt.utils.cloud.validate_windows_cred(ip_address,
username,
win_passwd,
retries=win_deploy_auth_retries,
retry_delay=win_deploy_auth_retry_delay):
raise SaltCloudSystemExit(
'Failed to authenticate against remote windows host (smb)'
)
# If using winrm
else:
# Default HTTPS port can be changed in cloud configuration
winrm_port = config.get_cloud_config_value(
'winrm_port', vm_, __opts__, default=5986
)
# Wait for winrm port to be available
if not salt.utils.cloud.wait_for_port(ip_address,
port=winrm_port,
timeout=ssh_connect_timeout):
raise SaltCloudSystemExit(
'Failed to connect to remote windows host (winrm)'
)
log.debug('Trying to authenticate via Winrm using pywinrm')
if not salt.utils.cloud.wait_for_winrm(ip_address,
winrm_port,
username,
win_passwd,
timeout=ssh_connect_timeout,
verify=winrm_verify_ssl):
raise SaltCloudSystemExit(
'Failed to authenticate against remote windows host'
)
elif salt.utils.cloud.wait_for_port(ip_address,
port=ssh_port,
timeout=ssh_connect_timeout,
gateway=ssh_gateway_config
):
# If a known_hosts_file is configured, this instance will not be
# accessible until it has a host key. Since this is provided on
# supported instances by cloud-init, and viewable to us only from the
# console output (which may take several minutes to become available,
# we have some more waiting to do here.
known_hosts_file = config.get_cloud_config_value(
'known_hosts_file', vm_, __opts__, default=None
)
if known_hosts_file:
console = {}
while 'output_decoded' not in console:
console = get_console_output(
instance_id=vm_['instance_id'],
call='action',
location=get_location(vm_)
)
pprint.pprint(console)
time.sleep(5)
output = salt.utils.stringutils.to_unicode(console['output_decoded'])
comps = output.split('-----BEGIN SSH HOST KEY KEYS-----')
if len(comps) < 2:
# Fail; there are no host keys
return False
comps = comps[1].split('-----END SSH HOST KEY KEYS-----')
keys = ''
for line in comps[0].splitlines():
if not line:
continue
keys += '\n{0} {1}'.format(ip_address, line)
with salt.utils.files.fopen(known_hosts_file, 'a') as fp_:
fp_.write(salt.utils.stringutils.to_str(keys))
fp_.close()
for user in vm_['usernames']:
if salt.utils.cloud.wait_for_passwd(
host=ip_address,
port=ssh_port,
username=user,
ssh_timeout=config.get_cloud_config_value(
'wait_for_passwd_timeout', vm_, __opts__, default=1 * 60
),
key_filename=vm_['key_filename'],
display_ssh_output=display_ssh_output,
gateway=ssh_gateway_config,
maxtries=config.get_cloud_config_value(
'wait_for_passwd_maxtries', vm_, __opts__, default=15
),
known_hosts_file=config.get_cloud_config_value(
'known_hosts_file', vm_, __opts__,
default='/dev/null'
),
):
__opts__['ssh_username'] = user
vm_['ssh_username'] = user
break
else:
raise SaltCloudSystemExit(
'Failed to authenticate against remote ssh'
)
else:
raise SaltCloudSystemExit(
'Failed to connect to remote ssh'
)
if 'reactor' in vm_ and vm_['reactor'] is True:
__utils__['cloud.fire_event'](
'event',
'ssh is available',
'salt/cloud/{0}/ssh_ready_reactor'.format(vm_['name']),
args={'ip_address': ip_address},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return vm_ |
def create(vm_=None, call=None):
'''
Create a single VM from a data dict
'''
if call:
raise SaltCloudSystemExit(
'You cannot create an instance with -a or -f.'
)
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'ec2',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
# Check for private_key and keyfile name for bootstrapping new instances
deploy = config.get_cloud_config_value(
'deploy', vm_, __opts__, default=True
)
win_password = config.get_cloud_config_value(
'win_password', vm_, __opts__, default=''
)
key_filename = config.get_cloud_config_value(
'private_key', vm_, __opts__, search_global=False, default=None
)
if deploy:
# The private_key and keyname settings are only needed for bootstrapping
# new instances when deploy is True
_validate_key_path_and_mode(key_filename)
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'ec2', vm_['driver']
)
vm_['key_filename'] = key_filename
# wait_for_instance requires private_key
vm_['private_key'] = key_filename
# Get SSH Gateway config early to verify the private_key,
# if used, exists or not. We don't want to deploy an instance
# and not be able to access it via the gateway.
vm_['gateway'] = get_ssh_gateway_config(vm_)
location = get_location(vm_)
vm_['location'] = location
log.info('Creating Cloud VM %s in %s', vm_['name'], location)
vm_['usernames'] = salt.utils.cloud.ssh_usernames(
vm_,
__opts__,
default_users=(
'ec2-user', # Amazon Linux, Fedora, RHEL; FreeBSD
'centos', # CentOS AMIs from AWS Marketplace
'ubuntu', # Ubuntu
'admin', # Debian GNU/Linux
'bitnami', # BitNami AMIs
'root' # Last resort, default user on RHEL 5, SUSE
)
)
if 'instance_id' in vm_:
# This was probably created via another process, and doesn't have
# things like salt keys created yet, so let's create them now.
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'%s\'', vm_['name'])
vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
__opts__
)
)
else:
# Put together all of the information required to request the instance,
# and then fire off the request for it
if keyname(vm_) is None:
raise SaltCloudSystemExit(
'The required \'keyname\' configuration setting is missing from the '
'\'ec2\' driver.'
)
data, vm_ = request_instance(vm_, location)
# If data is a str, it's an error
if isinstance(data, six.string_types):
log.error('Error requesting instance: %s', data)
return {}
# Pull the instance ID, valid for both spot and normal instances
# Multiple instances may have been spun up, get all their IDs
vm_['instance_id_list'] = []
for instance in data:
vm_['instance_id_list'].append(instance['instanceId'])
vm_['instance_id'] = vm_['instance_id_list'].pop()
if vm_['instance_id_list']:
# Multiple instances were spun up, get one now, and queue the rest
queue_instances(vm_['instance_id_list'])
# Wait for vital information, such as IP addresses, to be available
# for the new instance
data = query_instance(vm_)
# Now that the instance is available, tag it appropriately. Should
# mitigate race conditions with tags
tags = config.get_cloud_config_value('tag',
vm_,
__opts__,
{},
search_global=False)
if not isinstance(tags, dict):
raise SaltCloudConfigError(
'\'tag\' should be a dict.'
)
for value in six.itervalues(tags):
if not isinstance(value, six.string_types):
raise SaltCloudConfigError(
'\'tag\' values must be strings. Try quoting the values. '
'e.g. "2013-09-19T20:09:46Z".'
)
tags['Name'] = vm_['name']
__utils__['cloud.fire_event'](
'event',
'setting tags',
'salt/cloud/{0}/tagging'.format(vm_['name']),
args={'tags': tags},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
salt.utils.cloud.wait_for_fun(
set_tags,
timeout=30,
name=vm_['name'],
tags=tags,
instance_id=vm_['instance_id'],
call='action',
location=location
)
# Once instance tags are set, tag the spot request if configured
if 'spot_config' in vm_ and 'tag' in vm_['spot_config']:
if not isinstance(vm_['spot_config']['tag'], dict):
raise SaltCloudConfigError(
'\'tag\' should be a dict.'
)
for value in six.itervalues(vm_['spot_config']['tag']):
if not isinstance(value, str):
raise SaltCloudConfigError(
'\'tag\' values must be strings. Try quoting the values. '
'e.g. "2013-09-19T20:09:46Z".'
)
spot_request_tags = {}
if 'spotRequestId' not in vm_:
raise SaltCloudConfigError('Failed to find spotRequestId')
sir_id = vm_['spotRequestId']
spot_request_tags['Name'] = vm_['name']
for k, v in six.iteritems(vm_['spot_config']['tag']):
spot_request_tags[k] = v
__utils__['cloud.fire_event'](
'event',
'setting tags',
'salt/cloud/spot_request_{0}/tagging'.format(sir_id),
args={'tags': spot_request_tags},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
salt.utils.cloud.wait_for_fun(
set_tags,
timeout=30,
name=vm_['name'],
tags=spot_request_tags,
instance_id=sir_id,
call='action',
location=location
)
network_interfaces = config.get_cloud_config_value(
'network_interfaces',
vm_,
__opts__,
search_global=False
)
if network_interfaces:
_update_enis(network_interfaces, data, vm_)
# At this point, the node is created and tagged, and now needs to be
# bootstrapped, once the necessary port is available.
log.info('Created node %s', vm_['name'])
instance = data[0]['instancesSet']['item']
# Wait for the necessary port to become available to bootstrap
if ssh_interface(vm_) == 'private_ips':
ip_address = instance['privateIpAddress']
log.info('Salt node data. Private_ip: %s', ip_address)
else:
ip_address = instance['ipAddress']
log.info('Salt node data. Public_ip: %s', ip_address)
vm_['ssh_host'] = ip_address
if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips':
salt_ip_address = instance['privateIpAddress']
log.info('Salt interface set to: %s', salt_ip_address)
else:
salt_ip_address = instance['ipAddress']
log.debug('Salt interface set to: %s', salt_ip_address)
vm_['salt_host'] = salt_ip_address
if deploy:
display_ssh_output = config.get_cloud_config_value(
'display_ssh_output', vm_, __opts__, default=True
)
vm_ = wait_for_instance(
vm_, data, ip_address, display_ssh_output
)
# The instance is booted and accessible, let's Salt it!
ret = instance.copy()
# Get ANY defined volumes settings, merging data, in the following order
# 1. VM config
# 2. Profile config
# 3. Global configuration
volumes = config.get_cloud_config_value(
'volumes', vm_, __opts__, search_global=True
)
if volumes:
__utils__['cloud.fire_event'](
'event',
'attaching volumes',
'salt/cloud/{0}/attaching_volumes'.format(vm_['name']),
args={'volumes': volumes},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Create and attach volumes to node %s', vm_['name'])
created = create_attach_volumes(
vm_['name'],
{
'volumes': volumes,
'zone': ret['placement']['availabilityZone'],
'instance_id': ret['instanceId'],
'del_all_vols_on_destroy': vm_.get('del_all_vols_on_destroy', False)
},
call='action'
)
ret['Attached Volumes'] = created
# Associate instance with a ssm document, if present
ssm_document = config.get_cloud_config_value(
'ssm_document', vm_, __opts__, None, search_global=False
)
if ssm_document:
log.debug('Associating with ssm document: %s', ssm_document)
assoc = ssm_create_association(
vm_['name'],
{'ssm_document': ssm_document},
instance_id=vm_['instance_id'],
call='action'
)
if isinstance(assoc, dict) and assoc.get('error', None):
log.error(
'Failed to associate instance %s with ssm document %s',
vm_['instance_id'], ssm_document
)
return {}
for key, value in six.iteritems(__utils__['cloud.bootstrap'](vm_, __opts__)):
ret.setdefault(key, value)
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(instance)
)
event_data = {
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['driver'],
'instance_id': vm_['instance_id'],
}
if volumes:
event_data['volumes'] = volumes
if ssm_document:
event_data['ssm_document'] = ssm_document
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', event_data, list(event_data)),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# Ensure that the latest node data is returned
node = _get_node(instance_id=vm_['instance_id'])
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__)
ret.update(node)
# Add any block device tags specified
ex_blockdevicetags = {}
blockdevicemappings_holder = block_device_mappings(vm_)
if blockdevicemappings_holder:
for _bd in blockdevicemappings_holder:
if 'tag' in _bd:
ex_blockdevicetags[_bd['DeviceName']] = _bd['tag']
block_device_volume_id_map = {}
if ex_blockdevicetags:
for _device, _map in six.iteritems(ret['blockDeviceMapping']):
bd_items = []
if isinstance(_map, dict):
bd_items.append(_map)
else:
for mapitem in _map:
bd_items.append(mapitem)
for blockitem in bd_items:
if blockitem['deviceName'] in ex_blockdevicetags and 'Name' not in ex_blockdevicetags[blockitem['deviceName']]:
ex_blockdevicetags[blockitem['deviceName']]['Name'] = vm_['name']
if blockitem['deviceName'] in ex_blockdevicetags:
block_device_volume_id_map[blockitem[ret['rootDeviceType']]['volumeId']] = ex_blockdevicetags[blockitem['deviceName']]
if block_device_volume_id_map:
for volid, tags in six.iteritems(block_device_volume_id_map):
__utils__['cloud.fire_event'](
'event',
'setting tags',
'salt/cloud/block_volume_{0}/tagging'.format(str(volid)),
args={'tags': tags},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
__utils__['cloud.wait_for_fun'](
set_tags,
timeout=30,
name=vm_['name'],
tags=tags,
resource_id=volid,
call='action',
location=location
)
return ret |
def queue_instances(instances):
'''
Queue a set of instances to be provisioned later. Expects a list.
Currently this only queries node data, and then places it in the cloud
cache (if configured). If the salt-cloud-reactor is being used, these
instances will be automatically provisioned using that.
For more information about the salt-cloud-reactor, see:
https://github.com/saltstack-formulas/salt-cloud-reactor
'''
for instance_id in instances:
node = _get_node(instance_id=instance_id)
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) |
def create_attach_volumes(name, kwargs, call=None, wait_to_finish=True):
'''
Create and attach volumes to created node
'''
if call != 'action':
raise SaltCloudSystemExit(
'The create_attach_volumes action must be called with '
'-a or --action.'
)
if 'instance_id' not in kwargs:
kwargs['instance_id'] = _get_node(name)['instanceId']
if isinstance(kwargs['volumes'], six.string_types):
volumes = salt.utils.yaml.safe_load(kwargs['volumes'])
else:
volumes = kwargs['volumes']
ret = []
for volume in volumes:
created = False
volume_name = '{0} on {1}'.format(volume['device'], name)
volume_dict = {
'volume_name': volume_name,
'zone': kwargs['zone']
}
if 'volume_id' in volume:
volume_dict['volume_id'] = volume['volume_id']
elif 'snapshot' in volume:
volume_dict['snapshot'] = volume['snapshot']
elif 'size' in volume:
volume_dict['size'] = volume['size']
else:
raise SaltCloudConfigError(
'Cannot create volume. Please define one of \'volume_id\', '
'\'snapshot\', or \'size\''
)
if 'tags' in volume:
volume_dict['tags'] = volume['tags']
if 'type' in volume:
volume_dict['type'] = volume['type']
if 'iops' in volume:
volume_dict['iops'] = volume['iops']
if 'encrypted' in volume:
volume_dict['encrypted'] = volume['encrypted']
if 'kmskeyid' in volume:
volume_dict['kmskeyid'] = volume['kmskeyid']
if 'volume_id' not in volume_dict:
created_volume = create_volume(volume_dict, call='function', wait_to_finish=wait_to_finish)
created = True
if 'volumeId' in created_volume:
volume_dict['volume_id'] = created_volume['volumeId']
attach = attach_volume(
name,
{'volume_id': volume_dict['volume_id'],
'device': volume['device']},
instance_id=kwargs['instance_id'],
call='action'
)
# Update the delvol parameter for this volume
delvols_on_destroy = kwargs.get('del_all_vols_on_destroy', None)
if attach and created and delvols_on_destroy is not None:
_toggle_delvol(instance_id=kwargs['instance_id'],
device=volume['device'],
value=delvols_on_destroy)
if attach:
msg = (
'{0} attached to {1} (aka {2}) as device {3}'.format(
volume_dict['volume_id'],
kwargs['instance_id'],
name,
volume['device']
)
)
log.info(msg)
ret.append(msg)
return ret |
def stop(name, call=None):
'''
Stop a node
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping node %s', name)
instance_id = _get_node(name)['instanceId']
__utils__['cloud.fire_event'](
'event',
'stopping instance',
'salt/cloud/{0}/stopping'.format(name),
args={'name': name, 'instance_id': instance_id},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
params = {'Action': 'StopInstances',
'InstanceId.1': instance_id}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return result |
def set_tags(name=None,
tags=None,
call=None,
location=None,
instance_id=None,
resource_id=None,
kwargs=None): # pylint: disable=W0613
'''
Set tags for a resource. Normally a VM name or instance_id is passed in,
but a resource_id may be passed instead. If both are passed in, the
instance_id will be used.
CLI Examples:
.. code-block:: bash
salt-cloud -a set_tags mymachine tag1=somestuff tag2='Other stuff'
salt-cloud -a set_tags resource_id=vol-3267ab32 tag=somestuff
'''
if kwargs is None:
kwargs = {}
if location is None:
location = get_location()
if instance_id is None:
if 'resource_id' in kwargs:
resource_id = kwargs['resource_id']
del kwargs['resource_id']
if 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
del kwargs['instance_id']
if resource_id is None:
if instance_id is None:
instance_id = _get_node(name=name, instance_id=None, location=location)['instanceId']
else:
instance_id = resource_id
# This second check is a safety, in case the above still failed to produce
# a usable ID
if instance_id is None:
return {
'Error': 'A valid instance_id or resource_id was not specified.'
}
params = {'Action': 'CreateTags',
'ResourceId.1': instance_id}
log.debug('Tags to set for %s: %s', name, tags)
if kwargs and not tags:
tags = kwargs
for idx, (tag_k, tag_v) in enumerate(six.iteritems(tags)):
params['Tag.{0}.Key'.format(idx)] = tag_k
params['Tag.{0}.Value'.format(idx)] = tag_v
attempts = 0
while attempts < aws.AWS_MAX_RETRIES:
aws.query(params,
setname='tagSet',
location=location,
provider=get_provider(),
opts=__opts__,
sigver='4')
settags = get_tags(
instance_id=instance_id, call='action', location=location
)
log.debug('Setting the tags returned: %s', settags)
failed_to_set_tags = False
for tag in settags:
if tag['key'] not in tags:
# We were not setting this tag
continue
if tag.get('value') is None and tags.get(tag['key']) == '':
# This is a correctly set tag with no value
continue
if six.text_type(tags.get(tag['key'])) != six.text_type(tag['value']):
# Not set to the proper value!?
log.debug(
'Setting the tag %s returned %s instead of %s',
tag['key'], tags.get(tag['key']), tag['value']
)
failed_to_set_tags = True
break
if failed_to_set_tags:
log.warning('Failed to set tags. Remaining attempts %s', attempts)
attempts += 1
aws.sleep_exponential_backoff(attempts)
continue
return settags
raise SaltCloudSystemExit(
'Failed to set tags on {0}!'.format(name)
) |
def get_tags(name=None,
instance_id=None,
call=None,
location=None,
kwargs=None,
resource_id=None): # pylint: disable=W0613
'''
Retrieve tags for a resource. Normally a VM name or instance_id is passed
in, but a resource_id may be passed instead. If both are passed in, the
instance_id will be used.
CLI Examples:
.. code-block:: bash
salt-cloud -a get_tags mymachine
salt-cloud -a get_tags resource_id=vol-3267ab32
'''
if location is None:
location = get_location()
if instance_id is None:
if resource_id is None:
if name:
instance_id = _get_node(name)['instanceId']
elif 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
elif 'resource_id' in kwargs:
instance_id = kwargs['resource_id']
else:
instance_id = resource_id
params = {'Action': 'DescribeTags',
'Filter.1.Name': 'resource-id',
'Filter.1.Value': instance_id}
return aws.query(params,
setname='tagSet',
location=location,
provider=get_provider(),
opts=__opts__,
sigver='4') |
def del_tags(name=None,
kwargs=None,
call=None,
instance_id=None,
resource_id=None): # pylint: disable=W0613
'''
Delete tags for a resource. Normally a VM name or instance_id is passed in,
but a resource_id may be passed instead. If both are passed in, the
instance_id will be used.
CLI Examples:
.. code-block:: bash
salt-cloud -a del_tags mymachine tags=mytag,
salt-cloud -a del_tags mymachine tags=tag1,tag2,tag3
salt-cloud -a del_tags resource_id=vol-3267ab32 tags=tag1,tag2,tag3
'''
if kwargs is None:
kwargs = {}
if 'tags' not in kwargs:
raise SaltCloudSystemExit(
'A tag or tags must be specified using tags=list,of,tags'
)
if not name and 'resource_id' in kwargs:
instance_id = kwargs['resource_id']
del kwargs['resource_id']
if not instance_id:
instance_id = _get_node(name)['instanceId']
params = {'Action': 'DeleteTags',
'ResourceId.1': instance_id}
for idx, tag in enumerate(kwargs['tags'].split(',')):
params['Tag.{0}.Key'.format(idx)] = tag
aws.query(params,
setname='tagSet',
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
if resource_id:
return get_tags(resource_id=resource_id)
else:
return get_tags(instance_id=instance_id) |
def rename(name, kwargs, call=None):
'''
Properly rename a node. Pass in the new name as "new name".
CLI Example:
.. code-block:: bash
salt-cloud -a rename mymachine newname=yourmachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The rename action must be called with -a or --action.'
)
log.info('Renaming %s to %s', name, kwargs['newname'])
set_tags(name, {'Name': kwargs['newname']}, call='action')
salt.utils.cloud.rename_key(
__opts__['pki_dir'], name, kwargs['newname']
) |
def destroy(name, call=None):
'''
Destroy a node. Will check termination protection and warn if enabled.
CLI Example:
.. code-block:: bash
salt-cloud --destroy mymachine
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
node_metadata = _get_node(name)
instance_id = node_metadata['instanceId']
sir_id = node_metadata.get('spotInstanceRequestId')
protected = show_term_protect(
name=name,
instance_id=instance_id,
call='action',
quiet=True
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name, 'instance_id': instance_id},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if protected == 'true':
raise SaltCloudSystemExit(
'This instance has been protected from being destroyed. '
'Use the following command to disable protection:\n\n'
'salt-cloud -a disable_term_protect {0}'.format(
name
)
)
ret = {}
# Default behavior is to rename EC2 VMs when destroyed
# via salt-cloud, unless explicitly set to False.
rename_on_destroy = config.get_cloud_config_value('rename_on_destroy',
get_configured_provider(),
__opts__,
search_global=False)
if rename_on_destroy is not False:
newname = '{0}-DEL{1}'.format(name, uuid.uuid4().hex)
rename(name, kwargs={'newname': newname}, call='action')
log.info(
'Machine will be identified as %s until it has been '
'cleaned up.', newname
)
ret['newname'] = newname
params = {'Action': 'TerminateInstances',
'InstanceId.1': instance_id}
location = get_location()
provider = get_provider()
result = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
log.info(result)
ret.update(result[0])
# If this instance is part of a spot instance request, we
# need to cancel it as well
if sir_id is not None:
params = {'Action': 'CancelSpotInstanceRequests',
'SpotInstanceRequestId.1': sir_id}
result = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
ret['spotInstance'] = result[0]
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name, 'instance_id': instance_id},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
__utils__['cloud.cachedir_index_del'](name)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return ret |
def reboot(name, call=None):
'''
Reboot a node.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot mymachine
'''
instance_id = _get_node(name)['instanceId']
params = {'Action': 'RebootInstances',
'InstanceId.1': instance_id}
result = aws.query(params,
setname='tagSet',
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
if result == []:
log.info('Complete')
return {'Reboot': 'Complete'} |
def show_image(kwargs, call=None):
'''
Show the details from EC2 concerning an AMI
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_image action must be called with -f or --function.'
)
params = {'ImageId.1': kwargs['image'],
'Action': 'DescribeImages'}
result = aws.query(params,
setname='tagSet',
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
log.info(result)
return result |
def show_instance(name=None, instance_id=None, call=None, kwargs=None):
'''
Show the details from EC2 concerning an AMI.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a show_instance myinstance
...or as a function (which requires either a name or instance_id):
.. code-block:: bash
salt-cloud -f show_instance my-ec2 name=myinstance
salt-cloud -f show_instance my-ec2 instance_id=i-d34db33f
'''
if not name and call == 'action':
raise SaltCloudSystemExit(
'The show_instance action requires a name.'
)
if call == 'function':
name = kwargs.get('name', None)
instance_id = kwargs.get('instance_id', None)
if not name and not instance_id:
raise SaltCloudSystemExit(
'The show_instance function requires '
'either a name or an instance_id'
)
node = _get_node(name=name, instance_id=instance_id)
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__)
return node |
def _extract_instance_info(instances):
'''
Given an instance query, return a dict of all instance data
'''
ret = {}
for instance in instances:
# items could be type dict or list (for stopped EC2 instances)
if isinstance(instance['instancesSet']['item'], list):
for item in instance['instancesSet']['item']:
name = _extract_name_tag(item)
ret[name] = item
ret[name]['name'] = name
ret[name].update(
dict(
id=item['instanceId'],
image=item['imageId'],
size=item['instanceType'],
state=item['instanceState']['name'],
private_ips=item.get('privateIpAddress', []),
public_ips=item.get('ipAddress', [])
)
)
else:
item = instance['instancesSet']['item']
name = _extract_name_tag(item)
ret[name] = item
ret[name]['name'] = name
ret[name].update(
dict(
id=item['instanceId'],
image=item['imageId'],
size=item['instanceType'],
state=item['instanceState']['name'],
private_ips=item.get('privateIpAddress', []),
public_ips=item.get('ipAddress', [])
)
)
return ret |
def _list_nodes_full(location=None):
'''
Return a list of the VMs that in this location
'''
provider = __active_provider_name__ or 'ec2'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
params = {'Action': 'DescribeInstances'}
instances = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if 'error' in instances:
raise SaltCloudSystemExit(
'An error occurred while listing nodes: {0}'.format(
instances['error']['Errors']['Error']['Message']
)
)
ret = _extract_instance_info(instances)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret |
def list_nodes_min(location=None, call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names,
and their state, is returned. This is the minimum amount of information
needed to check for existing VMs.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
params = {'Action': 'DescribeInstances'}
instances = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
if 'error' in instances:
raise SaltCloudSystemExit(
'An error occurred while listing nodes: {0}'.format(
instances['error']['Errors']['Error']['Message']
)
)
for instance in instances:
if isinstance(instance['instancesSet']['item'], list):
items = instance['instancesSet']['item']
else:
items = [instance['instancesSet']['item']]
for item in items:
state = item['instanceState']['name']
name = _extract_name_tag(item)
id = item['instanceId']
ret[name] = {'state': state, 'id': id}
return ret |
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(get_location()), __opts__['query.selection'], call,
) |
def show_term_protect(name=None, instance_id=None, call=None, quiet=False):
'''
Show the details from EC2 concerning an instance's termination protection state
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_term_protect action must be called with -a or --action.'
)
if not instance_id:
instance_id = _get_node(name)['instanceId']
params = {'Action': 'DescribeInstanceAttribute',
'InstanceId': instance_id,
'Attribute': 'disableApiTermination'}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
return_root=True,
opts=__opts__,
sigver='4')
disable_protect = False
for item in result:
if 'value' in item:
disable_protect = item['value']
break
log.log(
logging.DEBUG if quiet is True else logging.INFO,
'Termination Protection is %s for %s',
disable_protect == 'true' and 'enabled' or 'disabled', name
)
return disable_protect |
def show_detailed_monitoring(name=None, instance_id=None, call=None, quiet=False):
'''
Show the details from EC2 regarding cloudwatch detailed monitoring.
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_detailed_monitoring action must be called with -a or --action.'
)
location = get_location()
if six.text_type(name).startswith('i-') and (len(name) == 10 or len(name) == 19):
instance_id = name
if not name and not instance_id:
raise SaltCloudSystemExit(
'The show_detailed_monitoring action must be provided with a name or instance\
ID'
)
matched = _get_node(name=name, instance_id=instance_id, location=location)
log.log(
logging.DEBUG if quiet is True else logging.INFO,
'Detailed Monitoring is %s for %s', matched['monitoring'], name
)
return matched['monitoring'] |
def _toggle_term_protect(name, value):
'''
Enable or Disable termination protection on a node
'''
instance_id = _get_node(name)['instanceId']
params = {'Action': 'ModifyInstanceAttribute',
'InstanceId': instance_id,
'DisableApiTermination.Value': value}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
return_root=True,
opts=__opts__,
sigver='4')
return show_term_protect(name=name, instance_id=instance_id, call='action') |
def disable_detailed_monitoring(name, call=None):
'''
Enable/disable detailed monitoring on a node
CLI Example:
'''
if call != 'action':
raise SaltCloudSystemExit(
'The enable_term_protect action must be called with '
'-a or --action.'
)
instance_id = _get_node(name)['instanceId']
params = {'Action': 'UnmonitorInstances',
'InstanceId.1': instance_id}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
return_root=True,
opts=__opts__,
sigver='4')
return show_detailed_monitoring(name=name, instance_id=instance_id, call='action') |
def show_delvol_on_destroy(name, kwargs=None, call=None):
'''
Do not delete all/specified EBS volumes upon instance termination
CLI Example:
.. code-block:: bash
salt-cloud -a show_delvol_on_destroy mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_delvol_on_destroy action must be called '
'with -a or --action.'
)
if not kwargs:
kwargs = {}
instance_id = kwargs.get('instance_id', None)
device = kwargs.get('device', None)
volume_id = kwargs.get('volume_id', None)
if instance_id is None:
instance_id = _get_node(name)['instanceId']
params = {'Action': 'DescribeInstances',
'InstanceId.1': instance_id}
data = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
blockmap = data[0]['instancesSet']['item']['blockDeviceMapping']
if not isinstance(blockmap['item'], list):
blockmap['item'] = [blockmap['item']]
items = []
for idx, item in enumerate(blockmap['item']):
device_name = item['deviceName']
if device is not None and device != device_name:
continue
if volume_id is not None and volume_id != item['ebs']['volumeId']:
continue
info = {
'device_name': device_name,
'volume_id': item['ebs']['volumeId'],
'deleteOnTermination': item['ebs']['deleteOnTermination']
}
items.append(info)
return items |
def keepvol_on_destroy(name, kwargs=None, call=None):
'''
Do not delete all/specified EBS volumes upon instance termination
CLI Example:
.. code-block:: bash
salt-cloud -a keepvol_on_destroy mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The keepvol_on_destroy action must be called with -a or --action.'
)
if not kwargs:
kwargs = {}
device = kwargs.get('device', None)
volume_id = kwargs.get('volume_id', None)
return _toggle_delvol(name=name, device=device,
volume_id=volume_id, value='false') |
def register_image(kwargs=None, call=None):
'''
Create an ami from a snapshot
CLI Example:
.. code-block:: bash
salt-cloud -f register_image my-ec2-config ami_name=my_ami description="my description"
root_device_name=/dev/xvda snapshot_id=snap-xxxxxxxx
'''
if call != 'function':
log.error(
'The create_volume function must be called with -f or --function.'
)
return False
if 'ami_name' not in kwargs:
log.error('ami_name must be specified to register an image.')
return False
block_device_mapping = kwargs.get('block_device_mapping', None)
if not block_device_mapping:
if 'snapshot_id' not in kwargs:
log.error('snapshot_id or block_device_mapping must be specified to register an image.')
return False
if 'root_device_name' not in kwargs:
log.error('root_device_name or block_device_mapping must be specified to register an image.')
return False
block_device_mapping = [{
'DeviceName': kwargs['root_device_name'],
'Ebs': {
'VolumeType': kwargs.get('volume_type', 'gp2'),
'SnapshotId': kwargs['snapshot_id'],
}
}]
if not isinstance(block_device_mapping, list):
block_device_mapping = [block_device_mapping]
params = {'Action': 'RegisterImage',
'Name': kwargs['ami_name']}
params.update(_param_from_config('BlockDeviceMapping', block_device_mapping))
if 'root_device_name' in kwargs:
params['RootDeviceName'] = kwargs['root_device_name']
if 'description' in kwargs:
params['Description'] = kwargs['description']
if 'virtualization_type' in kwargs:
params['VirtualizationType'] = kwargs['virtualization_type']
if 'architecture' in kwargs:
params['Architecture'] = kwargs['architecture']
log.debug(params)
data = aws.query(params,
return_url=True,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
r_data = {}
for d in data[0]:
for k, v in d.items():
r_data[k] = v
return r_data |
def create_volume(kwargs=None, call=None, wait_to_finish=False):
'''
Create a volume.
zone
The availability zone used to create the volume. Required. String.
size
The size of the volume, in GiBs. Defaults to ``10``. Integer.
snapshot
The snapshot-id from which to create the volume. Integer.
type
The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned
IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for
Magnetic volumes. String.
iops
The number of I/O operations per second (IOPS) to provision for the volume,
with a maximum ratio of 50 IOPS/GiB. Only valid for Provisioned IOPS SSD
volumes. Integer.
This option will only be set if ``type`` is also specified as ``io1``.
encrypted
Specifies whether the volume will be encrypted. Boolean.
If ``snapshot`` is also given in the list of kwargs, then this value is ignored
since volumes that are created from encrypted snapshots are also automatically
encrypted.
tags
The tags to apply to the volume during creation. Dictionary.
call
The ``create_volume`` function must be called with ``-f`` or ``--function``.
String.
wait_to_finish
Whether or not to wait for the volume to be available. Boolean. Defaults to
``False``.
CLI Examples:
.. code-block:: bash
salt-cloud -f create_volume my-ec2-config zone=us-east-1b
salt-cloud -f create_volume my-ec2-config zone=us-east-1b tags='{"tag1": "val1", "tag2", "val2"}'
'''
if call != 'function':
log.error(
'The create_volume function must be called with -f or --function.'
)
return False
if 'zone' not in kwargs:
log.error('An availability zone must be specified to create a volume.')
return False
if 'size' not in kwargs and 'snapshot' not in kwargs:
# This number represents GiB
kwargs['size'] = '10'
params = {'Action': 'CreateVolume',
'AvailabilityZone': kwargs['zone']}
if 'size' in kwargs:
params['Size'] = kwargs['size']
if 'snapshot' in kwargs:
params['SnapshotId'] = kwargs['snapshot']
if 'type' in kwargs:
params['VolumeType'] = kwargs['type']
if 'iops' in kwargs and kwargs.get('type', 'standard') == 'io1':
params['Iops'] = kwargs['iops']
# You can't set `encrypted` if you pass a snapshot
if 'encrypted' in kwargs and 'snapshot' not in kwargs:
params['Encrypted'] = kwargs['encrypted']
if 'kmskeyid' in kwargs:
params['KmsKeyId'] = kwargs['kmskeyid']
if 'kmskeyid' in kwargs and 'encrypted' not in kwargs:
log.error(
'If a KMS Key ID is specified, encryption must be enabled'
)
return False
log.debug(params)
data = aws.query(params,
return_url=True,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
r_data = {}
for d in data[0]:
for k, v in six.iteritems(d):
r_data[k] = v
volume_id = r_data['volumeId']
# Allow tags to be set upon creation
if 'tags' in kwargs:
if isinstance(kwargs['tags'], six.string_types):
tags = salt.utils.yaml.safe_load(kwargs['tags'])
else:
tags = kwargs['tags']
if isinstance(tags, dict):
new_tags = set_tags(tags=tags,
resource_id=volume_id,
call='action',
location=get_location())
r_data['tags'] = new_tags
# Waits till volume is available
if wait_to_finish:
salt.utils.cloud.run_func_until_ret_arg(fun=describe_volumes,
kwargs={'volume_id': volume_id},
fun_call=call,
argument_being_watched='status',
required_argument_response='available')
return r_data |
def attach_volume(name=None, kwargs=None, instance_id=None, call=None):
'''
Attach a volume to an instance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The attach_volume action must be called with -a or --action.'
)
if not kwargs:
kwargs = {}
if 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
if name and not instance_id:
instance_id = _get_node(name)['instanceId']
if not name and not instance_id:
log.error('Either a name or an instance_id is required.')
return False
if 'volume_id' not in kwargs:
log.error('A volume_id is required.')
return False
if 'device' not in kwargs:
log.error('A device is required (ex. /dev/sdb1).')
return False
params = {'Action': 'AttachVolume',
'VolumeId': kwargs['volume_id'],
'InstanceId': instance_id,
'Device': kwargs['device']}
log.debug(params)
vm_ = get_configured_provider()
data = salt.utils.cloud.wait_for_ip(
__attach_vol_to_instance,
update_args=(params, kwargs, instance_id),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10),
interval_multiplier=config.get_cloud_config_value(
'wait_for_ip_interval_multiplier', vm_, __opts__, default=1),
)
return data |
def describe_volumes(kwargs=None, call=None):
'''
Describe a volume (or volumes)
volume_id
One or more volume IDs. Multiple IDs must be separated by ",".
TODO: Add all of the filters.
'''
if call != 'function':
log.error(
'The describe_volumes function must be called with -f '
'or --function.'
)
return False
if not kwargs:
kwargs = {}
params = {'Action': 'DescribeVolumes'}
if 'volume_id' in kwargs:
volume_id = kwargs['volume_id'].split(',')
for volume_index, volume_id in enumerate(volume_id):
params['VolumeId.{0}'.format(volume_index)] = volume_id
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data |
def create_keypair(kwargs=None, call=None):
'''
Create an SSH keypair
'''
if call != 'function':
log.error(
'The create_keypair function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
params = {'Action': 'CreateKeyPair',
'KeyName': kwargs['keyname']}
data = aws.query(params,
return_url=True,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
keys = [x for x in data[0] if 'requestId' not in x]
return (keys, data[1]) |
def import_keypair(kwargs=None, call=None):
'''
Import an SSH public key.
.. versionadded:: 2015.8.3
'''
if call != 'function':
log.error(
'The import_keypair function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
if 'file' not in kwargs:
log.error('A public key file is required.')
return False
params = {'Action': 'ImportKeyPair',
'KeyName': kwargs['keyname']}
public_key_file = kwargs['file']
if os.path.exists(public_key_file):
with salt.utils.files.fopen(public_key_file, 'r') as fh_:
public_key = salt.utils.stringutils.to_unicode(fh_.read())
if public_key is not None:
params['PublicKeyMaterial'] = base64.b64encode(public_key)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data |
def delete_keypair(kwargs=None, call=None):
'''
Delete an SSH keypair
'''
if call != 'function':
log.error(
'The delete_keypair function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
params = {'Action': 'DeleteKeyPair',
'KeyName': kwargs['keyname']}
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data |
def create_snapshot(kwargs=None, call=None, wait_to_finish=False):
'''
Create a snapshot.
volume_id
The ID of the Volume from which to create a snapshot.
description
The optional description of the snapshot.
CLI Exampe:
.. code-block:: bash
salt-cloud -f create_snapshot my-ec2-config volume_id=vol-351d8826
salt-cloud -f create_snapshot my-ec2-config volume_id=vol-351d8826 \\
description="My Snapshot Description"
'''
if call != 'function':
raise SaltCloudSystemExit(
'The create_snapshot function must be called with -f '
'or --function.'
)
if kwargs is None:
kwargs = {}
volume_id = kwargs.get('volume_id', None)
description = kwargs.get('description', '')
if volume_id is None:
raise SaltCloudSystemExit(
'A volume_id must be specified to create a snapshot.'
)
params = {'Action': 'CreateSnapshot',
'VolumeId': volume_id,
'Description': description}
log.debug(params)
data = aws.query(params,
return_url=True,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')[0]
r_data = {}
for d in data:
for k, v in six.iteritems(d):
r_data[k] = v
if 'snapshotId' in r_data:
snapshot_id = r_data['snapshotId']
# Waits till volume is available
if wait_to_finish:
salt.utils.cloud.run_func_until_ret_arg(fun=describe_snapshots,
kwargs={'snapshot_id': snapshot_id},
fun_call=call,
argument_being_watched='status',
required_argument_response='completed')
return r_data |
def copy_snapshot(kwargs=None, call=None):
'''
Copy a snapshot
'''
if call != 'function':
log.error(
'The copy_snapshot function must be called with -f or --function.'
)
return False
if 'source_region' not in kwargs:
log.error('A source_region must be specified to copy a snapshot.')
return False
if 'source_snapshot_id' not in kwargs:
log.error('A source_snapshot_id must be specified to copy a snapshot.')
return False
if 'description' not in kwargs:
kwargs['description'] = ''
params = {'Action': 'CopySnapshot'}
if 'source_region' in kwargs:
params['SourceRegion'] = kwargs['source_region']
if 'source_snapshot_id' in kwargs:
params['SourceSnapshotId'] = kwargs['source_snapshot_id']
if 'description' in kwargs:
params['Description'] = kwargs['description']
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data |
def describe_snapshots(kwargs=None, call=None):
'''
Describe a snapshot (or snapshots)
snapshot_id
One or more snapshot IDs. Multiple IDs must be separated by ",".
owner
Return the snapshots owned by the specified owner. Valid values
include: self, amazon, <AWS Account ID>. Multiple values must be
separated by ",".
restorable_by
One or more AWS accounts IDs that can create volumes from the snapshot.
Multiple aws account IDs must be separated by ",".
TODO: Add all of the filters.
'''
if call != 'function':
log.error(
'The describe_snapshot function must be called with -f '
'or --function.'
)
return False
params = {'Action': 'DescribeSnapshots'}
# The AWS correct way is to use non-plurals like snapshot_id INSTEAD of snapshot_ids.
if 'snapshot_ids' in kwargs:
kwargs['snapshot_id'] = kwargs['snapshot_ids']
if 'snapshot_id' in kwargs:
snapshot_ids = kwargs['snapshot_id'].split(',')
for snapshot_index, snapshot_id in enumerate(snapshot_ids):
params['SnapshotId.{0}'.format(snapshot_index)] = snapshot_id
if 'owner' in kwargs:
owners = kwargs['owner'].split(',')
for owner_index, owner in enumerate(owners):
params['Owner.{0}'.format(owner_index)] = owner
if 'restorable_by' in kwargs:
restorable_bys = kwargs['restorable_by'].split(',')
for restorable_by_index, restorable_by in enumerate(restorable_bys):
params[
'RestorableBy.{0}'.format(restorable_by_index)
] = restorable_by
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data |
def get_console_output(
name=None,
location=None,
instance_id=None,
call=None,
kwargs=None,
):
'''
Show the console output from the instance.
By default, returns decoded data, not the Base64-encoded data that is
actually returned from the EC2 API.
'''
if call != 'action':
raise SaltCloudSystemExit(
'The get_console_output action must be called with '
'-a or --action.'
)
if location is None:
location = get_location()
if not instance_id:
instance_id = _get_node(name)['instanceId']
if kwargs is None:
kwargs = {}
if instance_id is None:
if 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
del kwargs['instance_id']
params = {'Action': 'GetConsoleOutput',
'InstanceId': instance_id}
ret = {}
data = aws.query(params,
return_root=True,
location=location,
provider=get_provider(),
opts=__opts__,
sigver='4')
for item in data:
if next(six.iterkeys(item)) == 'output':
ret['output_decoded'] = binascii.a2b_base64(next(six.itervalues(item)))
else:
ret[next(six.iterkeys(item))] = next(six.itervalues(item))
return ret |
def get_password_data(
name=None,
kwargs=None,
instance_id=None,
call=None,
):
'''
Return password data for a Windows instance.
By default only the encrypted password data will be returned. However, if a
key_file is passed in, then a decrypted password will also be returned.
Note that the key_file references the private key that was used to generate
the keypair associated with this instance. This private key will _not_ be
transmitted to Amazon; it is only used internally inside of Salt Cloud to
decrypt data _after_ it has been received from Amazon.
CLI Examples:
.. code-block:: bash
salt-cloud -a get_password_data mymachine
salt-cloud -a get_password_data mymachine key_file=/root/ec2key.pem
Note: PKCS1_v1_5 was added in PyCrypto 2.5
'''
if call != 'action':
raise SaltCloudSystemExit(
'The get_password_data action must be called with '
'-a or --action.'
)
if not instance_id:
instance_id = _get_node(name)['instanceId']
if kwargs is None:
kwargs = {}
if instance_id is None:
if 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
del kwargs['instance_id']
params = {'Action': 'GetPasswordData',
'InstanceId': instance_id}
ret = {}
data = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for item in data:
ret[next(six.iterkeys(item))] = next(six.itervalues(item))
if not HAS_M2 and not HAS_PYCRYPTO:
return ret
if 'key' not in kwargs:
if 'key_file' in kwargs:
with salt.utils.files.fopen(kwargs['key_file'], 'r') as kf_:
kwargs['key'] = salt.utils.stringutils.to_unicode(kf_.read())
if 'key' in kwargs:
pwdata = ret.get('passwordData', None)
if pwdata is not None:
rsa_key = kwargs['key']
pwdata = base64.b64decode(pwdata)
if HAS_M2:
key = RSA.load_key_string(rsa_key.encode('ascii'))
password = key.private_decrypt(pwdata, RSA.pkcs1_padding)
else:
dsize = Crypto.Hash.SHA.digest_size
sentinel = Crypto.Random.new().read(15 + dsize)
key_obj = Crypto.PublicKey.RSA.importKey(rsa_key)
key_obj = PKCS1_v1_5.new(key_obj)
password = key_obj.decrypt(pwdata, sentinel)
ret['password'] = salt.utils.stringutils.to_unicode(password)
return ret |
def update_pricing(kwargs=None, call=None):
'''
Download most recent pricing information from AWS and convert to a local
JSON file.
CLI Examples:
.. code-block:: bash
salt-cloud -f update_pricing my-ec2-config
salt-cloud -f update_pricing my-ec2-config type=linux
.. versionadded:: 2015.8.0
'''
sources = {
'linux': 'https://a0.awsstatic.com/pricing/1/ec2/linux-od.min.js',
'rhel': 'https://a0.awsstatic.com/pricing/1/ec2/rhel-od.min.js',
'sles': 'https://a0.awsstatic.com/pricing/1/ec2/sles-od.min.js',
'mswin': 'https://a0.awsstatic.com/pricing/1/ec2/mswin-od.min.js',
'mswinsql': 'https://a0.awsstatic.com/pricing/1/ec2/mswinSQL-od.min.js',
'mswinsqlweb': 'https://a0.awsstatic.com/pricing/1/ec2/mswinSQLWeb-od.min.js',
}
if kwargs is None:
kwargs = {}
if 'type' not in kwargs:
for source in sources:
_parse_pricing(sources[source], source)
else:
_parse_pricing(sources[kwargs['type']], kwargs['type']) |
def _parse_pricing(url, name):
'''
Download and parse an individual pricing file from AWS
.. versionadded:: 2015.8.0
'''
price_js = http.query(url, text=True)
items = []
current_item = ''
price_js = re.sub(JS_COMMENT_RE, '', price_js['text'])
price_js = price_js.strip().rstrip(');').lstrip('callback(')
for keyword in (
'vers',
'config',
'rate',
'valueColumns',
'currencies',
'instanceTypes',
'type',
'ECU',
'storageGB',
'name',
'vCPU',
'memoryGiB',
'storageGiB',
'USD',
):
price_js = price_js.replace(keyword, '"{0}"'.format(keyword))
for keyword in ('region', 'price', 'size'):
price_js = price_js.replace(keyword, '"{0}"'.format(keyword))
price_js = price_js.replace('"{0}"s'.format(keyword), '"{0}s"'.format(keyword))
price_js = price_js.replace('""', '"')
# Turn the data into something that's easier/faster to process
regions = {}
price_json = salt.utils.json.loads(price_js)
for region in price_json['config']['regions']:
sizes = {}
for itype in region['instanceTypes']:
for size in itype['sizes']:
sizes[size['size']] = size
regions[region['region']] = sizes
outfile = os.path.join(
__opts__['cachedir'], 'ec2-pricing-{0}.p'.format(name)
)
with salt.utils.files.fopen(outfile, 'w') as fho:
salt.utils.msgpack.dump(regions, fho)
return True |
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_pricing my-ec2-config profile=my-profile
If pricing sources have not been cached, they will be downloaded. Once they
have been cached, they will not be updated automatically. To manually update
all prices, use the following command:
.. code-block:: bash
salt-cloud -f update_pricing <provider>
.. versionadded:: 2015.8.0
'''
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
return {'Error': 'The requested profile was not found'}
# Make sure the profile belongs to ec2
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'ec2':
return {'Error': 'The requested profile does not belong to EC2'}
image_id = profile.get('image', None)
image_dict = show_image({'image': image_id}, 'function')
image_info = image_dict[0]
# Find out what platform it is
if image_info.get('imageOwnerAlias', '') == 'amazon':
if image_info.get('platform', '') == 'windows':
image_description = image_info.get('description', '')
if 'sql' in image_description.lower():
if 'web' in image_description.lower():
name = 'mswinsqlweb'
else:
name = 'mswinsql'
else:
name = 'mswin'
elif image_info.get('imageLocation', '').strip().startswith('amazon/suse'):
name = 'sles'
else:
name = 'linux'
elif image_info.get('imageOwnerId', '') == '309956199498':
name = 'rhel'
else:
name = 'linux'
pricefile = os.path.join(
__opts__['cachedir'], 'ec2-pricing-{0}.p'.format(name)
)
if not os.path.isfile(pricefile):
update_pricing({'type': name}, 'function')
with salt.utils.files.fopen(pricefile, 'r') as fhi:
ec2_price = salt.utils.stringutils.to_unicode(
salt.utils.msgpack.load(fhi))
region = get_location(profile)
size = profile.get('size', None)
if size is None:
return {'Error': 'The requested profile does not contain a size'}
try:
raw = ec2_price[region][size]
except KeyError:
return {'Error': 'The size ({0}) in the requested profile does not have '
'a price associated with it for the {1} region'.format(size, region)}
ret = {}
if kwargs.get('raw', False):
ret['_raw'] = raw
ret['per_hour'] = 0
for col in raw.get('valueColumns', []):
ret['per_hour'] += decimal.Decimal(col['prices'].get('USD', 0))
ret['per_hour'] = decimal.Decimal(ret['per_hour'])
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = ret['per_day'] * 30
ret['per_year'] = ret['per_week'] * 52
return {profile['profile']: ret} |
def ssm_create_association(name=None, kwargs=None, instance_id=None, call=None):
'''
Associates the specified SSM document with the specified instance
http://docs.aws.amazon.com/ssm/latest/APIReference/API_CreateAssociation.html
CLI Examples:
.. code-block:: bash
salt-cloud -a ssm_create_association ec2-instance-name ssm_document=ssm-document-name
'''
if call != 'action':
raise SaltCloudSystemExit(
'The ssm_create_association action must be called with '
'-a or --action.'
)
if not kwargs:
kwargs = {}
if 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
if name and not instance_id:
instance_id = _get_node(name)['instanceId']
if not name and not instance_id:
log.error('Either a name or an instance_id is required.')
return False
if 'ssm_document' not in kwargs:
log.error('A ssm_document is required.')
return False
params = {'Action': 'CreateAssociation',
'InstanceId': instance_id,
'Name': kwargs['ssm_document']}
result = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
product='ssm',
opts=__opts__,
sigver='4')
log.info(result)
return result |
def _build_machine_uri(machine, cwd):
'''
returns string used to fetch id names from the sdb store.
the cwd and machine name are concatenated with '?' which should
never collide with a Salt node id -- which is important since we
will be storing both in the same table.
'''
key = '{}?{}'.format(machine, os.path.abspath(cwd))
return _build_sdb_uri(key) |
def _update_vm_info(name, vm_):
''' store the vm_ information keyed by name '''
__utils__['sdb.sdb_set'](_build_sdb_uri(name), vm_, __opts__)
# store machine-to-name mapping, too
if vm_['machine']:
__utils__['sdb.sdb_set'](
_build_machine_uri(vm_['machine'], vm_.get('cwd', '.')),
name,
__opts__) |
def get_vm_info(name):
'''
get the information for a VM.
:param name: salt_id name
:return: dictionary of {'machine': x, 'cwd': y, ...}.
'''
try:
vm_ = __utils__['sdb.sdb_get'](_build_sdb_uri(name), __opts__)
except KeyError:
raise SaltInvocationError(
'Probable sdb driver not found. Check your configuration.')
if vm_ is None or 'machine' not in vm_:
raise SaltInvocationError(
'No Vagrant machine defined for Salt_id {}'.format(name))
return vm_ |
def get_machine_id(machine, cwd):
'''
returns the salt_id name of the Vagrant VM
:param machine: the Vagrant machine name
:param cwd: the path to Vagrantfile
:return: salt_id name
'''
name = __utils__['sdb.sdb_get'](_build_machine_uri(machine, cwd), __opts__)
return name |
def _erase_vm_info(name):
'''
erase the information for a VM the we are destroying.
some sdb drivers (such as the SQLite driver we expect to use)
do not have a `delete` method, so if the delete fails, we have
to replace the with a blank entry.
'''
try:
# delete the machine record
vm_ = get_vm_info(name)
if vm_['machine']:
key = _build_machine_uri(vm_['machine'], vm_.get('cwd', '.'))
try:
__utils__['sdb.sdb_delete'](key, __opts__)
except KeyError:
# no delete method found -- load a blank value
__utils__['sdb.sdb_set'](key, None, __opts__)
except Exception:
pass
uri = _build_sdb_uri(name)
try:
# delete the name record
__utils__['sdb.sdb_delete'](uri, __opts__)
except KeyError:
# no delete method found -- load an empty dictionary
__utils__['sdb.sdb_set'](uri, {}, __opts__)
except Exception:
pass |
def _vagrant_ssh_config(vm_):
'''
get the information for ssh communication from the new VM
:param vm_: the VM's info as we have it now
:return: dictionary of ssh stuff
'''
machine = vm_['machine']
log.info('requesting vagrant ssh-config for VM %s', machine or '(default)')
cmd = 'vagrant ssh-config {}'.format(machine)
reply = __salt__['cmd.shell'](cmd,
runas=vm_.get('runas'),
cwd=vm_.get('cwd'),
ignore_retcode=True)
ssh_config = {}
for line in reply.split('\n'): # build a dictionary of the text reply
tokens = line.strip().split()
if len(tokens) == 2: # each two-token line becomes a key:value pair
ssh_config[tokens[0]] = tokens[1]
log.debug('ssh_config=%s', repr(ssh_config))
return ssh_config |
def list_domains():
'''
Return a list of the salt_id names of all available Vagrant VMs on
this host without regard to the path where they are defined.
CLI Example:
.. code-block:: bash
salt '*' vagrant.list_domains --log-level=info
The log shows information about all known Vagrant environments
on this machine. This data is cached and may not be completely
up-to-date.
'''
vms = []
cmd = 'vagrant global-status'
reply = __salt__['cmd.shell'](cmd)
log.info('--->\n%s', reply)
for line in reply.split('\n'): # build a list of the text reply
tokens = line.strip().split()
try:
_ = int(tokens[0], 16) # valid id numbers are hexadecimal
except (ValueError, IndexError):
continue # skip lines without valid id numbers
machine = tokens[1]
cwd = tokens[-1]
name = get_machine_id(machine, cwd)
if name:
vms.append(name)
return vms |
def list_active_vms(cwd=None):
'''
Return a list of machine names for active virtual machine on the host,
which are defined in the Vagrantfile at the indicated path.
CLI Example:
.. code-block:: bash
salt '*' vagrant.list_active_vms cwd=/projects/project_1
'''
vms = []
cmd = 'vagrant status'
reply = __salt__['cmd.shell'](cmd, cwd=cwd)
log.info('--->\n%s', reply)
for line in reply.split('\n'): # build a list of the text reply
tokens = line.strip().split()
if len(tokens) > 1:
if tokens[1] == 'running':
vms.append(tokens[0])
return vms |
def vm_state(name='', cwd=None):
'''
Return list of information for all the vms indicating their state.
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs defined by
the Vagrantfile in the `cwd` directory.
CLI Example:
.. code-block:: bash
salt '*' vagrant.vm_state <name> cwd=/projects/project_1
returns a list of dictionaries with machine name, state, provider,
and salt_id name.
.. code-block:: python
datum = {'machine': _, # Vagrant machine name,
'state': _, # string indicating machine state, like 'running'
'provider': _, # the Vagrant VM provider
'name': _} # salt_id name
Known bug: if there are multiple machines in your Vagrantfile, and you request
the status of the ``primary`` machine, which you defined by leaving the ``machine``
parameter blank, then you may receive the status of all of them.
Please specify the actual machine name for each VM if there are more than one.
'''
if name:
vm_ = get_vm_info(name)
machine = vm_['machine']
cwd = vm_['cwd'] or cwd # usually ignore passed-in cwd
else:
if not cwd:
raise SaltInvocationError(
'Path to Vagranfile must be defined, but cwd={}'.format(cwd))
machine = ''
info = []
cmd = 'vagrant status {}'.format(machine)
reply = __salt__['cmd.shell'](cmd, cwd)
log.info('--->\n%s', reply)
for line in reply.split('\n'): # build a list of the text reply
tokens = line.strip().split()
if len(tokens) > 1 and tokens[-1].endswith(')'):
try:
datum = {'machine': tokens[0],
'state': ' '.join(tokens[1:-1]),
'provider': tokens[-1].lstrip('(').rstrip(')'),
'name': get_machine_id(tokens[0], cwd)
}
info.append(datum)
except IndexError:
pass
return info |
def init(name, # Salt_id for created VM
cwd=None, # path to find Vagrantfile
machine='', # name of machine in Vagrantfile
runas=None, # username who owns Vagrant box
start=False, # start the machine when initialized
vagrant_provider='', # vagrant provider (default=virtualbox)
vm=None, # a dictionary of VM configuration settings
):
'''
Initialize a new Vagrant VM.
This inputs all the information needed to start a Vagrant VM. These settings are stored in
a Salt sdb database on the Vagrant host minion and used to start, control, and query the
guest VMs. The salt_id assigned here is the key field for that database and must be unique.
:param name: The salt_id name you will use to control this VM
:param cwd: The path to the directory where the Vagrantfile is located
:param machine: The machine name in the Vagrantfile. If blank, the primary machine will be used.
:param runas: The username on the host who owns the Vagrant work files.
:param start: (default: False) Start the virtual machine now.
:param vagrant_provider: The name of a Vagrant VM provider (if not the default).
:param vm: Optionally, all the above information may be supplied in this dictionary.
:return: A string indicating success, or False.
CLI Example:
.. code-block:: bash
salt <host> vagrant.init <salt_id> /path/to/Vagrantfile
salt my_laptop vagrant.init x1 /projects/bevy_master machine=quail1
'''
vm_ = {} if vm is None else vm.copy() # passed configuration data
vm_['name'] = name
# passed-in keyword arguments overwrite vm dictionary values
vm_['cwd'] = cwd or vm_.get('cwd')
if not vm_['cwd']:
raise SaltInvocationError('Path to Vagrantfile must be defined by "cwd" argument')
vm_['machine'] = machine or vm_.get('machine', machine)
vm_['runas'] = runas or vm_.get('runas', runas)
vm_['vagrant_provider'] = vagrant_provider or vm_.get('vagrant_provider', '')
_update_vm_info(name, vm_)
if start:
log.debug('Starting VM %s', name)
ret = _start(name, vm_)
else:
ret = 'Name {} defined using VM {}'.format(name, vm_['machine'] or '(default)')
return ret |
def stop(name):
'''
Hard shutdown the virtual machine. (vagrant halt)
CLI Example:
.. code-block:: bash
salt <host> vagrant.stop <salt_id>
'''
vm_ = get_vm_info(name)
machine = vm_['machine']
cmd = 'vagrant halt {}'.format(machine)
ret = __salt__['cmd.retcode'](cmd,
runas=vm_.get('runas'),
cwd=vm_.get('cwd'))
return ret == 0 |
def reboot(name, provision=False):
'''
Reboot a VM. (vagrant reload)
CLI Example:
.. code-block:: bash
salt <host> vagrant.reboot <salt_id> provision=True
:param name: The salt_id name you will use to control this VM
:param provision: (False) also re-run the Vagrant provisioning scripts.
'''
vm_ = get_vm_info(name)
machine = vm_['machine']
prov = '--provision' if provision else ''
cmd = 'vagrant reload {} {}'.format(machine, prov)
ret = __salt__['cmd.retcode'](cmd,
runas=vm_.get('runas'),
cwd=vm_.get('cwd'))
return ret == 0 |
def destroy(name):
'''
Destroy and delete a virtual machine. (vagrant destroy -f)
This also removes the salt_id name defined by vagrant.init.
CLI Example:
.. code-block:: bash
salt <host> vagrant.destroy <salt_id>
'''
vm_ = get_vm_info(name)
machine = vm_['machine']
cmd = 'vagrant destroy -f {}'.format(machine)
ret = __salt__['cmd.run_all'](cmd,
runas=vm_.get('runas'),
cwd=vm_.get('cwd'),
output_loglevel='info')
if ret['retcode'] == 0:
_erase_vm_info(name)
return 'Destroyed VM {0}'.format(name)
return False |
def get_ssh_config(name, network_mask='', get_private_key=False):
r'''
Retrieve hints of how you might connect to a Vagrant VM.
:param name: the salt_id of the machine
:param network_mask: a CIDR mask to search for the VM's address
:param get_private_key: (default: False) return the key used for ssh login
:return: a dict of ssh login information for the VM
CLI Example:
.. code-block:: bash
salt <host> vagrant.get_ssh_config <salt_id>
salt my_laptop vagrant.get_ssh_config quail1 network_mask=10.0.0.0/8 get_private_key=True
The returned dictionary contains:
- key_filename: the name of the private key file on the VM host computer
- ssh_username: the username to be used to log in to the VM
- ssh_host: the IP address used to log in to the VM. (This will usually be `127.0.0.1`)
- ssh_port: the TCP port used to log in to the VM. (This will often be `2222`)
- \[ip_address:\] (if `network_mask` is defined. see below)
- \[private_key:\] (if `get_private_key` is True) the private key for ssh_username
About `network_mask`:
Vagrant usually uses a redirected TCP port on its host computer to log in to a VM using ssh.
This redirected port and its IP address are "ssh_port" and "ssh_host". The ssh_host is
usually the localhost (127.0.0.1).
This makes it impossible for a third machine (such as a salt-cloud master) to contact the VM
unless the VM has another network interface defined. You will usually want a bridged network
defined by having a `config.vm.network "public_network"` statement in your `Vagrantfile`.
The IP address of the bridged adapter will typically be assigned by DHCP and unknown to you,
but you should be able to determine what IP network the address will be chosen from.
If you enter a CIDR network mask, Salt will attempt to find the VM's address for you.
The host machine will send an "ifconfig" command to the VM (using ssh to `ssh_host`:`ssh_port`)
and return the IP address of the first interface it can find which matches your mask.
'''
vm_ = get_vm_info(name)
ssh_config = _vagrant_ssh_config(vm_)
try:
ans = {'key_filename': ssh_config['IdentityFile'],
'ssh_username': ssh_config['User'],
'ssh_host': ssh_config['HostName'],
'ssh_port': ssh_config['Port'],
}
except KeyError:
raise CommandExecutionError(
'Insufficient SSH information to contact VM {}. '
'Is it running?'.format(vm_.get('machine', '(default)')))
if network_mask:
# ask the new VM to report its network address
command = 'ssh -i {IdentityFile} -p {Port} ' \
'-oStrictHostKeyChecking={StrictHostKeyChecking} ' \
'-oUserKnownHostsFile={UserKnownHostsFile} ' \
'-oControlPath=none ' \
'{User}@{HostName} ifconfig'.format(**ssh_config)
log.info(
'Trying ssh -p %s %s@%s ifconfig',
ssh_config['Port'], ssh_config['User'], ssh_config['HostName']
)
reply = __salt__['cmd.shell'](command)
log.info('--->\n%s', reply)
target_network_range = ipaddress.ip_network(network_mask, strict=False)
for line in reply.split('\n'):
try: # try to find a bridged network address
# the lines we are looking for appear like:
# "inet addr:10.124.31.185 Bcast:10.124.31.255 Mask:255.255.248.0"
# or "inet6 addr: fe80::a00:27ff:fe04:7aac/64 Scope:Link"
tokens = line.replace('addr:', '', 1).split() # remove "addr:" if it exists, then split
found_address = None
if "inet" in tokens:
nxt = tokens.index("inet") + 1
found_address = ipaddress.ip_address(tokens[nxt])
elif "inet6" in tokens:
nxt = tokens.index("inet6") + 1
found_address = ipaddress.ip_address(tokens[nxt].split('/')[0])
if found_address in target_network_range:
ans['ip_address'] = six.text_type(found_address)
break # we have located a good matching address
except (IndexError, AttributeError, TypeError):
pass # all syntax and type errors loop here
# falling out if the loop leaves us remembering the last candidate
log.info('Network IP address in %s detected as: %s',
target_network_range, ans.get('ip_address', '(not found)'))
if get_private_key:
# retrieve the Vagrant private key from the host
try:
with salt.utils.files.fopen(ssh_config['IdentityFile']) as pks:
ans['private_key'] = salt.utils.stringutils.to_unicode(pks.read())
except (OSError, IOError) as e:
raise CommandExecutionError("Error processing Vagrant private key file: {}".format(e))
return ans |
def vm_configured(name, vm_name, cpu, memory, image, version, interfaces,
disks, scsi_devices, serial_ports, datacenter, datastore,
placement, cd_dvd_drives=None, sata_controllers=None,
advanced_configs=None, template=None, tools=True,
power_on=False, deploy=False):
'''
Selects the correct operation to be executed on a virtual machine, non
existing machines will be created, existing ones will be updated if the
config differs.
'''
result = {'name': name,
'result': None,
'changes': {},
'comment': ''}
log.trace('Validating virtual machine configuration')
schema = ESXVirtualMachineConfigSchema.serialize()
log.trace('schema = %s', schema)
try:
jsonschema.validate({'vm_name': vm_name,
'cpu': cpu,
'memory': memory,
'image': image,
'version': version,
'interfaces': interfaces,
'disks': disks,
'scsi_devices': scsi_devices,
'serial_ports': serial_ports,
'cd_dvd_drives': cd_dvd_drives,
'sata_controllers': sata_controllers,
'datacenter': datacenter,
'datastore': datastore,
'placement': placement,
'template': template,
'tools': tools,
'power_on': power_on,
'deploy': deploy}, schema)
except jsonschema.exceptions.ValidationError as exc:
raise salt.exceptions.InvalidConfigError(exc)
service_instance = __salt__['vsphere.get_service_instance_via_proxy']()
try:
__salt__['vsphere.get_vm'](vm_name, vm_properties=['name'],
service_instance=service_instance)
except salt.exceptions.VMwareObjectRetrievalError:
vm_file = __salt__['vsphere.get_vm_config_file'](
vm_name, datacenter,
placement, datastore,
service_instance=service_instance)
if vm_file:
if __opts__['test']:
result.update({'comment': 'The virtual machine {0}'
' will be registered.'.format(vm_name)})
__salt__['vsphere.disconnect'](service_instance)
return result
result = vm_registered(vm_name, datacenter, placement,
vm_file, power_on=power_on)
return result
else:
if __opts__['test']:
result.update({'comment': 'The virtual machine {0}'
' will be created.'.format(vm_name)})
__salt__['vsphere.disconnect'](service_instance)
return result
if template:
result = vm_cloned(name)
else:
result = vm_created(name, vm_name, cpu, memory, image, version,
interfaces, disks, scsi_devices,
serial_ports, datacenter, datastore,
placement, cd_dvd_drives=cd_dvd_drives,
advanced_configs=advanced_configs,
power_on=power_on)
return result
result = vm_updated(name, vm_name, cpu, memory, image, version,
interfaces, disks, scsi_devices,
serial_ports, datacenter, datastore,
cd_dvd_drives=cd_dvd_drives,
sata_controllers=sata_controllers,
advanced_configs=advanced_configs,
power_on=power_on)
__salt__['vsphere.disconnect'](service_instance)
log.trace(result)
return result |
def vm_updated(name, vm_name, cpu, memory, image, version, interfaces,
disks, scsi_devices, serial_ports, datacenter, datastore,
cd_dvd_drives=None, sata_controllers=None,
advanced_configs=None, power_on=False):
'''
Updates a virtual machine configuration if there is a difference between
the given and deployed configuration.
'''
result = {'name': name,
'result': None,
'changes': {},
'comment': ''}
service_instance = __salt__['vsphere.get_service_instance_via_proxy']()
current_config = __salt__['vsphere.get_vm_config'](
vm_name,
datacenter=datacenter,
objects=False,
service_instance=service_instance)
diffs = __salt__['vsphere.compare_vm_configs'](
{'name': vm_name,
'cpu': cpu,
'memory': memory,
'image': image,
'version': version,
'interfaces': interfaces,
'disks': disks,
'scsi_devices': scsi_devices,
'serial_ports': serial_ports,
'datacenter': datacenter,
'datastore': datastore,
'cd_drives': cd_dvd_drives,
'sata_controllers': sata_controllers,
'advanced_configs': advanced_configs},
current_config)
if not diffs:
result.update({
'result': True,
'changes': {},
'comment': 'Virtual machine {0} is already up to date'.format(vm_name)})
return result
if __opts__['test']:
comment = 'State vm_updated will update virtual machine \'{0}\' ' \
'in datacenter \'{1}\':\n{2}'.format(vm_name,
datacenter,
'\n'.join([':\n'.join([key, difference.changes_str])
for key, difference in six.iteritems(diffs)]))
result.update({'result': None,
'comment': comment})
__salt__['vsphere.disconnect'](service_instance)
return result
try:
changes = __salt__['vsphere.update_vm'](vm_name, cpu, memory, image,
version, interfaces, disks,
scsi_devices, serial_ports,
datacenter, datastore,
cd_dvd_drives=cd_dvd_drives,
sata_controllers=sata_controllers,
advanced_configs=advanced_configs,
service_instance=service_instance)
except salt.exceptions.CommandExecutionError as exc:
log.error('Error: %s', exc)
if service_instance:
__salt__['vsphere.disconnect'](service_instance)
result.update({
'result': False,
'comment': six.text_type(exc)})
return result
if power_on:
try:
__salt__['vsphere.power_on_vm'](vm_name, datacenter)
except salt.exceptions.VMwarePowerOnError as exc:
log.error('Error: %s', exc)
if service_instance:
__salt__['vsphere.disconnect'](service_instance)
result.update({
'result': False,
'comment': six.text_type(exc)})
return result
changes.update({'power_on': True})
__salt__['vsphere.disconnect'](service_instance)
result = {'name': name,
'result': True,
'changes': changes,
'comment': 'Virtual machine '
'{0} was updated successfully'.format(vm_name)}
return result |
def vm_created(name, vm_name, cpu, memory, image, version, interfaces,
disks, scsi_devices, serial_ports, datacenter, datastore,
placement, ide_controllers=None, sata_controllers=None,
cd_dvd_drives=None, advanced_configs=None, power_on=False):
'''
Creates a virtual machine with the given properties if it doesn't exist.
'''
result = {'name': name,
'result': None,
'changes': {},
'comment': ''}
if __opts__['test']:
result['comment'] = 'Virtual machine {0} will be created'.format(
vm_name)
return result
service_instance = __salt__['vsphere.get_service_instance_via_proxy']()
try:
info = __salt__['vsphere.create_vm'](vm_name, cpu, memory, image,
version, datacenter, datastore,
placement, interfaces, disks,
scsi_devices,
serial_ports=serial_ports,
ide_controllers=ide_controllers,
sata_controllers=sata_controllers,
cd_drives=cd_dvd_drives,
advanced_configs=advanced_configs,
service_instance=service_instance)
except salt.exceptions.CommandExecutionError as exc:
log.error('Error: %s', exc)
if service_instance:
__salt__['vsphere.disconnect'](service_instance)
result.update({
'result': False,
'comment': six.text_type(exc)})
return result
if power_on:
try:
__salt__['vsphere.power_on_vm'](vm_name, datacenter,
service_instance=service_instance)
except salt.exceptions.VMwarePowerOnError as exc:
log.error('Error: %s', exc)
if service_instance:
__salt__['vsphere.disconnect'](service_instance)
result.update({
'result': False,
'comment': six.text_type(exc)})
return result
info['power_on'] = power_on
changes = {'name': vm_name, 'info': info}
__salt__['vsphere.disconnect'](service_instance)
result = {'name': name,
'result': True,
'changes': changes,
'comment': 'Virtual machine '
'{0} created successfully'.format(vm_name)}
return result |
def vm_registered(vm_name, datacenter, placement, vm_file, power_on=False):
'''
Registers a virtual machine if the machine files are available on
the main datastore.
'''
result = {'name': vm_name,
'result': None,
'changes': {},
'comment': ''}
vmx_path = '{0}{1}'.format(vm_file.folderPath, vm_file.file[0].path)
log.trace('Registering virtual machine with vmx file: %s', vmx_path)
service_instance = __salt__['vsphere.get_service_instance_via_proxy']()
try:
__salt__['vsphere.register_vm'](vm_name, datacenter,
placement, vmx_path,
service_instance=service_instance)
except salt.exceptions.VMwareMultipleObjectsError as exc:
log.error('Error: %s', exc)
if service_instance:
__salt__['vsphere.disconnect'](service_instance)
result.update({'result': False,
'comment': six.text_type(exc)})
return result
except salt.exceptions.VMwareVmRegisterError as exc:
log.error('Error: %s', exc)
if service_instance:
__salt__['vsphere.disconnect'](service_instance)
result.update({'result': False,
'comment': six.text_type(exc)})
return result
if power_on:
try:
__salt__['vsphere.power_on_vm'](vm_name, datacenter,
service_instance=service_instance)
except salt.exceptions.VMwarePowerOnError as exc:
log.error('Error: %s', exc)
if service_instance:
__salt__['vsphere.disconnect'](service_instance)
result.update({
'result': False,
'comment': six.text_type(exc)})
return result
__salt__['vsphere.disconnect'](service_instance)
result.update({'result': True,
'changes': {'name': vm_name, 'power_on': power_on},
'comment': 'Virtual machine '
'{0} registered successfully'.format(vm_name)})
return result |
def installed(name,
pkgs=None,
dir=None,
user=None,
force_reinstall=False,
registry=None,
env=None):
'''
Verify that the given package is installed and is at the correct version
(if specified).
.. code-block:: yaml
coffee-script:
npm.installed:
- user: someuser
coffee-script@1.0.1:
npm.installed: []
name
The package to install
.. versionchanged:: 2014.7.2
This parameter is no longer lowercased by salt so that
case-sensitive NPM package names will work.
pkgs
A list of packages to install with a single npm invocation; specifying
this argument will ignore the ``name`` argument
.. versionadded:: 2014.7.0
dir
The target directory in which to install the package, or None for
global installation
user
The user to run NPM with
.. versionadded:: 0.17.0
registry
The NPM registry from which to install the package
.. versionadded:: 2014.7.0
env
A list of environment variables to be set prior to execution. The
format is the same as the :py:func:`cmd.run <salt.states.cmd.run>`.
state function.
.. versionadded:: 2014.7.0
force_reinstall
Install the package even if it is already installed
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
pkg_list = pkgs if pkgs else [name]
try:
installed_pkgs = __salt__['npm.list'](dir=dir, runas=user, env=env, depth=0)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error looking up \'{0}\': {1}'.format(name, err)
return ret
else:
installed_pkgs = dict((p, info)
for p, info in six.iteritems(installed_pkgs))
pkgs_satisfied = []
pkgs_to_install = []
def _pkg_is_installed(pkg, installed_pkgs):
'''
Helper function to determine if a package is installed
This performs more complex comparison than just checking
keys, such as examining source repos to see if the package
was installed by a different name from the same repo
:pkg str: The package to compare
:installed_pkgs: A dictionary produced by npm list --json
'''
if (pkg_name in installed_pkgs and
'version' in installed_pkgs[pkg_name]):
return True
# Check to see if we are trying to install from a URI
elif '://' in pkg_name: # TODO Better way?
for pkg_details in installed_pkgs.values():
try:
pkg_from = pkg_details.get('from', '').split('://')[1]
# Catch condition where we may have specified package as
# git://github.com/foo/bar but packager describes it as
# git://github.com/foo/bar.git in the package
if not pkg_from.endswith('.git') and pkg_name.startswith('git://'):
pkg_from += '.git'
if pkg_name.split('://')[1] == pkg_from:
return True
except IndexError:
pass
return False
for pkg in pkg_list:
# Valid:
#
# @google-cloud/bigquery@^0.9.6
# @foobar
# buffer-equal-constant-time@1.0.1
# coffee-script
matches = re.search(r'^(@?[^@\s]+)(?:@(\S+))?', pkg)
pkg_name, pkg_ver = matches.group(1), matches.group(2) or None
if force_reinstall is True:
pkgs_to_install.append(pkg)
continue
if not _pkg_is_installed(pkg, installed_pkgs):
pkgs_to_install.append(pkg)
continue
installed_name_ver = '{0}@{1}'.format(pkg_name,
installed_pkgs[pkg_name]['version'])
# If given an explicit version check the installed version matches.
if pkg_ver:
if installed_pkgs[pkg_name].get('version') != pkg_ver:
pkgs_to_install.append(pkg)
else:
pkgs_satisfied.append(installed_name_ver)
continue
else:
pkgs_satisfied.append(installed_name_ver)
continue
if __opts__['test']:
ret['result'] = None
comment_msg = []
if pkgs_to_install:
comment_msg.append('NPM package(s) \'{0}\' are set to be installed'
.format(', '.join(pkgs_to_install)))
ret['changes'] = {'old': [], 'new': pkgs_to_install}
if pkgs_satisfied:
comment_msg.append('Package(s) \'{0}\' satisfied by {1}'
.format(', '.join(pkg_list), ', '.join(pkgs_satisfied)))
ret['result'] = True
ret['comment'] = '. '.join(comment_msg)
return ret
if not pkgs_to_install:
ret['result'] = True
ret['comment'] = ('Package(s) \'{0}\' satisfied by {1}'
.format(', '.join(pkg_list), ', '.join(pkgs_satisfied)))
return ret
try:
cmd_args = {
'dir': dir,
'runas': user,
'registry': registry,
'env': env,
'pkgs': pkg_list,
}
call = __salt__['npm.install'](**cmd_args)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error installing \'{0}\': {1}'.format(
', '.join(pkg_list), err)
return ret
if call and (isinstance(call, list) or isinstance(call, dict)):
ret['result'] = True
ret['changes'] = {'old': [], 'new': pkgs_to_install}
ret['comment'] = 'Package(s) \'{0}\' successfully installed'.format(
', '.join(pkgs_to_install))
else:
ret['result'] = False
ret['comment'] = 'Could not install package(s) \'{0}\''.format(
', '.join(pkg_list))
return ret |
def removed(name, dir=None, user=None):
'''
Verify that the given package is not installed.
dir
The target directory in which to install the package, or None for
global installation
user
The user to run NPM with
.. versionadded:: 0.17.0
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
installed_pkgs = __salt__['npm.list'](dir=dir, depth=0)
except (CommandExecutionError, CommandNotFoundError) as err:
ret['result'] = False
ret['comment'] = 'Error uninstalling \'{0}\': {1}'.format(name, err)
return ret
if name not in installed_pkgs:
ret['result'] = True
ret['comment'] = 'Package \'{0}\' is not installed'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Package \'{0}\' is set to be removed'.format(name)
return ret
if __salt__['npm.uninstall'](pkg=name, dir=dir, runas=user):
ret['result'] = True
ret['changes'][name] = 'Removed'
ret['comment'] = 'Package \'{0}\' was successfully removed'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Error removing package \'{0}\''.format(name)
return ret |
def bootstrap(name, user=None, silent=True):
'''
Bootstraps a node.js application.
Will execute 'npm install --json' on the specified directory.
user
The user to run NPM with
.. versionadded:: 0.17.0
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if __opts__['test']:
try:
call = __salt__['npm.install'](dir=name, runas=user, pkg=None, silent=silent, dry_run=True)
if call:
ret['result'] = None
ret['changes'] = {'old': [], 'new': call}
ret['comment'] = '{0} is set to be bootstrapped'.format(name)
else:
ret['result'] = True
ret['comment'] = '{0} is already bootstrapped'.format(name)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error Bootstrapping \'{0}\': {1}'.format(name, err)
return ret
try:
call = __salt__['npm.install'](dir=name, runas=user, pkg=None, silent=silent)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error Bootstrapping \'{0}\': {1}'.format(name, err)
return ret
if not call:
ret['result'] = True
ret['comment'] = 'Directory is already bootstrapped'
return ret
# npm.install will return a string if it can't parse a JSON result
if isinstance(call, six.string_types):
ret['result'] = False
ret['changes'] = call
ret['comment'] = 'Could not bootstrap directory'
else:
ret['result'] = True
ret['changes'] = {name: 'Bootstrapped'}
ret['comment'] = 'Directory was successfully bootstrapped'
return ret |
def cache_cleaned(name=None,
user=None,
force=False):
'''
Ensure that the given package is not cached.
If no package is specified, this ensures the entire cache is cleared.
name
The name of the package to remove from the cache, or None for all packages
user
The user to run NPM with
force
Force cleaning of cache. Required for npm@5 and greater
.. versionadded:: 2016.11.6
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
specific_pkg = None
try:
cached_pkgs = __salt__['npm.cache_list'](path=name, runas=user)
except (CommandExecutionError, CommandNotFoundError) as err:
ret['result'] = False
ret['comment'] = 'Error looking up cached {0}: {1}'.format(
name or 'packages', err)
return ret
if name:
all_cached_pkgs = __salt__['npm.cache_list'](path=None, runas=user)
# The first package is always the cache path
cache_root_path = all_cached_pkgs[0]
specific_pkg = '{0}/{1}/'.format(cache_root_path, name)
if specific_pkg not in cached_pkgs:
ret['result'] = True
ret['comment'] = 'Package {0} is not in the cache'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Cached {0} set to be removed'.format(name or 'packages')
return ret
if __salt__['npm.cache_clean'](path=name, runas=user):
ret['result'] = True
ret['changes'][name or 'cache'] = 'Removed'
ret['comment'] = 'Cached {0} successfully removed'.format(
name or 'packages'
)
else:
ret['result'] = False
ret['comment'] = 'Error cleaning cached {0}'.format(name or 'packages')
return ret |
def present(name, profile="splunk", **kwargs):
'''
Ensure a search is present
.. code-block:: yaml
API Error Search:
splunk_search.present:
search: index=main sourcetype=blah
template: alert_5min
The following parameters are required:
name
This is the name of the search in splunk
'''
ret = {
'name': name,
'changes': {},
'result': None,
'comment': ''
}
target = __salt__['splunk_search.get'](name, profile=profile)
if target:
if __opts__['test']:
ret['comment'] = "Would update {0}".format(name)
return ret
# found a search... updating
result = __salt__['splunk_search.update'](
name, profile=profile, **kwargs
)
if not result:
# no update
ret['result'] = True
ret['comment'] = "No changes"
else:
(newvalues, diffs) = result
old_content = dict(target.content)
old_changes = {}
for x in newvalues:
old_changes[x] = old_content.get(x, None)
ret['result'] = True
ret['changes']['diff'] = diffs
ret['changes']['old'] = old_changes
ret['changes']['new'] = newvalues
else:
if __opts__['test']:
ret['comment'] = "Would create {0}".format(name)
return ret
# creating a new search
result = __salt__['splunk_search.create'](
name, profile=profile, **kwargs
)
if result:
ret['result'] = True
ret['changes']['old'] = False
ret['changes']['new'] = kwargs
else:
ret['result'] = False
ret['comment'] = 'Failed to create {0}'.format(name)
return ret |
def absent(name, profile="splunk"):
'''
Ensure a search is absent
.. code-block:: yaml
API Error Search:
splunk_search.absent
The following parameters are required:
name
This is the name of the search in splunk
'''
ret = {
'name': name,
'changes': {},
'result': True,
'comment': '{0} is absent.'.format(name)
}
target = __salt__['splunk_search.get'](name, profile=profile)
if target:
if __opts__['test']:
ret = {}
ret["name"] = name
ret['comment'] = "Would delete {0}".format(name)
ret['result'] = None
return ret
result = __salt__['splunk_search.delete'](name, profile=profile)
if result:
ret['comment'] = '{0} was deleted'.format(name)
else:
ret['comment'] = 'Failed to delete {0}'.format(name)
ret['result'] = False
return ret |
def _bulk_state(saltfunc, lbn, workers, profile):
'''
Generic function for bulk worker operation
'''
ret = {'name': lbn,
'result': True,
'changes': {},
'comment': ''}
if not isinstance(workers, list):
ret['result'] = False
ret['comment'] = 'workers should be a list not a {0}'.format(
type(workers)
)
return ret
if __opts__['test']:
ret['result'] = None
return ret
log.info('executing %s to modjk workers %s', saltfunc, workers)
try:
cmdret = __salt__[saltfunc](workers, lbn, profile=profile)
except KeyError:
ret['result'] = False
ret['comment'] = 'unsupported function {0}'.format(
saltfunc
)
return ret
errors = []
for worker, ok in six.iteritems(cmdret):
if not ok:
errors.append(worker)
ret['changes'] = {'status': cmdret}
if errors:
ret['result'] = False
ret['comment'] = '{0} failed on some workers'.format(saltfunc)
return ret |
def worker_stopped(name, workers=None, profile='default'):
'''
Stop all the workers in the modjk load balancer
Example:
.. code-block:: yaml
loadbalancer:
modjk.worker_stopped:
- workers:
- app1
- app2
'''
if workers is None:
workers = []
return _bulk_state(
'modjk.bulk_stop', name, workers, profile
) |
def worker_activated(name, workers=None, profile='default'):
'''
Activate all the workers in the modjk load balancer
Example:
.. code-block:: yaml
loadbalancer:
modjk.worker_activated:
- workers:
- app1
- app2
'''
if workers is None:
workers = []
return _bulk_state(
'modjk.bulk_activate', name, workers, profile
) |
def worker_disabled(name, workers=None, profile='default'):
'''
Disable all the workers in the modjk load balancer
Example:
.. code-block:: yaml
loadbalancer:
modjk.worker_disabled:
- workers:
- app1
- app2
'''
if workers is None:
workers = []
return _bulk_state(
'modjk.bulk_disable', name, workers, profile
) |
def worker_recover(name, workers=None, profile='default'):
'''
Recover all the workers in the modjk load balancer
Example:
.. code-block:: yaml
loadbalancer:
modjk.worker_recover:
- workers:
- app1
- app2
'''
if workers is None:
workers = []
return _bulk_state(
'modjk.bulk_recover', name, workers, profile
) |
def _parse_forward(mapping):
'''
Parses a port forwarding statement in the form used by this state:
from_port:to_port:protocol[:destination]
and returns a ForwardingMapping object
'''
if len(mapping.split(':')) > 3:
(srcport, destport, protocol, destaddr) = mapping.split(':')
else:
(srcport, destport, protocol) = mapping.split(':')
destaddr = ''
return ForwardingMapping(srcport, destport, protocol, destaddr) |
def present(name,
block_icmp=None,
prune_block_icmp=False,
default=None,
masquerade=False,
ports=None,
prune_ports=False,
port_fwd=None,
prune_port_fwd=False,
services=None,
prune_services=False,
interfaces=None,
prune_interfaces=False,
sources=None,
prune_sources=False,
rich_rules=None,
prune_rich_rules=False):
'''
Ensure a zone has specific attributes.
name
The zone to modify.
default : None
Set this zone as the default zone if ``True``.
masquerade : False
Enable or disable masquerade for a zone.
block_icmp : None
List of ICMP types to block in the zone.
prune_block_icmp : False
If ``True``, remove all but the specified block_icmp from the zone.
ports : None
List of ports to add to the zone.
prune_ports : False
If ``True``, remove all but the specified ports from the zone.
port_fwd : None
List of port forwards to add to the zone.
prune_port_fwd : False
If ``True``, remove all but the specified port_fwd from the zone.
services : None
List of services to add to the zone.
prune_services : False
If ``True``, remove all but the specified services from the zone.
.. note:: Currently defaults to True for compatibility, but will be changed to False in a future release.
interfaces : None
List of interfaces to add to the zone.
prune_interfaces : False
If ``True``, remove all but the specified interfaces from the zone.
sources : None
List of sources to add to the zone.
prune_sources : False
If ``True``, remove all but the specified sources from the zone.
rich_rules : None
List of rich rules to add to the zone.
prune_rich_rules : False
If ``True``, remove all but the specified rich rules from the zone.
'''
ret = _present(name, block_icmp, prune_block_icmp, default, masquerade, ports, prune_ports,
port_fwd, prune_port_fwd, services, prune_services, interfaces, prune_interfaces,
sources, prune_sources, rich_rules, prune_rich_rules)
# Reload firewalld service on changes
if ret['changes'] != {}:
__salt__['firewalld.reload_rules']()
return ret |
def service(name,
ports=None,
protocols=None):
'''
Ensure the service exists and encompasses the specified ports and
protocols.
.. versionadded:: 2016.11.0
'''
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
if name not in __salt__['firewalld.get_services']():
__salt__['firewalld.new_service'](name, restart=False)
ports = ports or []
try:
_current_ports = __salt__['firewalld.get_service_ports'](name)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
new_ports = set(ports) - set(_current_ports)
old_ports = set(_current_ports) - set(ports)
for port in new_ports:
if not __opts__['test']:
try:
__salt__['firewalld.add_service_port'](name, port)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
for port in old_ports:
if not __opts__['test']:
try:
__salt__['firewalld.remove_service_port'](name, port)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if new_ports or old_ports:
ret['changes'].update({'ports':
{'old': _current_ports,
'new': ports}})
protocols = protocols or []
try:
_current_protocols = __salt__['firewalld.get_service_protocols'](name)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
new_protocols = set(protocols) - set(_current_protocols)
old_protocols = set(_current_protocols) - set(protocols)
for protocol in new_protocols:
if not __opts__['test']:
try:
__salt__['firewalld.add_service_protocol'](name, protocol)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
for protocol in old_protocols:
if not __opts__['test']:
try:
__salt__['firewalld.remove_service_protocol'](name, protocol)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if new_protocols or old_protocols:
ret['changes'].update({'protocols':
{'old': _current_protocols,
'new': protocols}})
if ret['changes'] != {}:
__salt__['firewalld.reload_rules']()
ret['result'] = True
if ret['changes'] == {}:
ret['comment'] = '\'{0}\' is already in the desired state.'.format(
name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Configuration for \'{0}\' will change.'.format(name)
return ret
ret['comment'] = '\'{0}\' was configured.'.format(name)
return ret |
def _present(name,
block_icmp=None,
prune_block_icmp=False,
default=None,
masquerade=False,
ports=None,
prune_ports=False,
port_fwd=None,
prune_port_fwd=False,
services=None,
# TODO: prune_services=False in future release
# prune_services=False,
prune_services=None,
interfaces=None,
prune_interfaces=False,
sources=None,
prune_sources=False,
rich_rules=None,
prune_rich_rules=False):
'''
Ensure a zone has specific attributes.
'''
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
try:
zones = __salt__['firewalld.get_zones'](permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if name not in zones:
if not __opts__['test']:
try:
__salt__['firewalld.new_zone'](name)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
ret['changes'].update({name:
{'old': zones,
'new': name}})
if block_icmp or prune_block_icmp:
block_icmp = block_icmp or []
new_icmp_types = []
old_icmp_types = []
try:
_current_icmp_blocks = __salt__['firewalld.list_icmp_block'](name,
permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if block_icmp:
try:
_valid_icmp_types = __salt__['firewalld.get_icmp_types'](
permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
# log errors for invalid ICMP types in block_icmp input
for icmp_type in set(block_icmp) - set(_valid_icmp_types):
log.error('%s is an invalid ICMP type', icmp_type)
block_icmp.remove(icmp_type)
new_icmp_types = set(block_icmp) - set(_current_icmp_blocks)
for icmp_type in new_icmp_types:
if not __opts__['test']:
try:
__salt__['firewalld.block_icmp'](name, icmp_type,
permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if prune_block_icmp:
old_icmp_types = set(_current_icmp_blocks) - set(block_icmp)
for icmp_type in old_icmp_types:
# no need to check against _valid_icmp_types here, because all
# elements in old_icmp_types are guaranteed to be in
# _current_icmp_blocks, whose elements are inherently valid
if not __opts__['test']:
try:
__salt__['firewalld.allow_icmp'](name, icmp_type,
permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if new_icmp_types or old_icmp_types:
# If we're not pruning, include current items in new output so it's clear
# that they're still present
if not prune_block_icmp:
block_icmp = list(new_icmp_types | set(_current_icmp_blocks))
ret['changes'].update({'icmp_types':
{'old': _current_icmp_blocks,
'new': block_icmp}})
# that's the only parameter that can't be permanent or runtime, it's
# directly both
if default:
try:
default_zone = __salt__['firewalld.default_zone']()
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if name != default_zone:
if not __opts__['test']:
try:
__salt__['firewalld.set_default_zone'](name)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
ret['changes'].update({'default':
{'old': default_zone,
'new': name}})
try:
masquerade_ret = __salt__['firewalld.get_masquerade'](name,
permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if masquerade and not masquerade_ret:
if not __opts__['test']:
try:
__salt__['firewalld.add_masquerade'](name, permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
ret['changes'].update({'masquerade':
{'old': '',
'new': 'Masquerading successfully set.'}})
elif not masquerade and masquerade_ret:
if not __opts__['test']:
try:
__salt__['firewalld.remove_masquerade'](name,
permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
ret['changes'].update({'masquerade':
{'old': '',
'new': 'Masquerading successfully '
'disabled.'}})
if ports or prune_ports:
ports = ports or []
try:
_current_ports = __salt__['firewalld.list_ports'](name, permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
new_ports = set(ports) - set(_current_ports)
old_ports = []
for port in new_ports:
if not __opts__['test']:
try:
# TODO: force_masquerade to be removed in future release
__salt__['firewalld.add_port'](name, port, permanent=True, force_masquerade=False)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if prune_ports:
old_ports = set(_current_ports) - set(ports)
for port in old_ports:
if not __opts__['test']:
try:
__salt__['firewalld.remove_port'](name, port, permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if new_ports or old_ports:
# If we're not pruning, include current items in new output so it's clear
# that they're still present
if not prune_ports:
ports = list(new_ports | set(_current_ports))
ret['changes'].update({'ports':
{'old': _current_ports,
'new': ports}})
if port_fwd or prune_port_fwd:
port_fwd = port_fwd or []
try:
_current_port_fwd = __salt__['firewalld.list_port_fwd'](name,
permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
port_fwd = [_parse_forward(fwd) for fwd in port_fwd]
_current_port_fwd = [
ForwardingMapping(
srcport=fwd['Source port'],
destport=fwd['Destination port'],
protocol=fwd['Protocol'],
destaddr=fwd['Destination address']
) for fwd in _current_port_fwd]
new_port_fwd = set(port_fwd) - set(_current_port_fwd)
old_port_fwd = []
for fwd in new_port_fwd:
if not __opts__['test']:
try:
# TODO: force_masquerade to be removed in future release
__salt__['firewalld.add_port_fwd'](name, fwd.srcport,
fwd.destport, fwd.protocol, fwd.destaddr, permanent=True,
force_masquerade=False)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if prune_port_fwd:
old_port_fwd = set(_current_port_fwd) - set(port_fwd)
for fwd in old_port_fwd:
if not __opts__['test']:
try:
__salt__['firewalld.remove_port_fwd'](name, fwd.srcport,
fwd.destport, fwd.protocol, fwd.destaddr, permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if new_port_fwd or old_port_fwd:
# If we're not pruning, include current items in new output so it's clear
# that they're still present
if not prune_port_fwd:
port_fwd = list(new_port_fwd | set(_current_port_fwd))
ret['changes'].update({'port_fwd':
{'old': [fwd.todict() for fwd in
_current_port_fwd],
'new': [fwd.todict() for fwd in port_fwd]}})
if services or prune_services:
services = services or []
try:
_current_services = __salt__['firewalld.list_services'](name,
permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
new_services = set(services) - set(_current_services)
old_services = []
for new_service in new_services:
if not __opts__['test']:
try:
__salt__['firewalld.add_service'](new_service, name,
permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if prune_services:
old_services = set(_current_services) - set(services)
for old_service in old_services:
if not __opts__['test']:
try:
__salt__['firewalld.remove_service'](old_service, name,
permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if new_services or old_services:
# If we're not pruning, include current items in new output so it's clear
# that they're still present
if not prune_services:
services = list(new_services | set(_current_services))
ret['changes'].update({'services':
{'old': _current_services,
'new': services}})
if interfaces or prune_interfaces:
interfaces = interfaces or []
try:
_current_interfaces = __salt__['firewalld.get_interfaces'](name,
permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
new_interfaces = set(interfaces) - set(_current_interfaces)
old_interfaces = []
for interface in new_interfaces:
if not __opts__['test']:
try:
__salt__['firewalld.add_interface'](name, interface,
permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if prune_interfaces:
old_interfaces = set(_current_interfaces) - set(interfaces)
for interface in old_interfaces:
if not __opts__['test']:
try:
__salt__['firewalld.remove_interface'](name, interface,
permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if new_interfaces or old_interfaces:
# If we're not pruning, include current items in new output so it's clear
# that they're still present
if not prune_interfaces:
interfaces = list(new_interfaces | set(_current_interfaces))
ret['changes'].update({'interfaces':
{'old': _current_interfaces,
'new': interfaces}})
if sources or prune_sources:
sources = sources or []
try:
_current_sources = __salt__['firewalld.get_sources'](name,
permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
new_sources = set(sources) - set(_current_sources)
old_sources = []
for source in new_sources:
if not __opts__['test']:
try:
__salt__['firewalld.add_source'](name, source, permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if prune_sources:
old_sources = set(_current_sources) - set(sources)
for source in old_sources:
if not __opts__['test']:
try:
__salt__['firewalld.remove_source'](name, source,
permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if new_sources or old_sources:
# If we're not pruning, include current items in new output so it's clear
# that they're still present
if not prune_sources:
sources = list(new_sources | set(_current_sources))
ret['changes'].update({'sources':
{'old': _current_sources,
'new': sources}})
if rich_rules or prune_rich_rules:
rich_rules = rich_rules or []
try:
_current_rich_rules = __salt__['firewalld.get_rich_rules'](name,
permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
new_rich_rules = set(rich_rules) - set(_current_rich_rules)
old_rich_rules = []
for rich_rule in new_rich_rules:
if not __opts__['test']:
try:
__salt__['firewalld.add_rich_rule'](name, rich_rule,
permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if prune_rich_rules:
old_rich_rules = set(_current_rich_rules) - set(rich_rules)
for rich_rule in old_rich_rules:
if not __opts__['test']:
try:
__salt__['firewalld.remove_rich_rule'](name, rich_rule,
permanent=True)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if new_rich_rules or old_rich_rules:
# If we're not pruning, include current items in new output so it's clear
# that they're still present
if not prune_rich_rules:
rich_rules = list(new_rich_rules | set(_current_rich_rules))
ret['changes'].update({'rich_rules':
{'old': _current_rich_rules,
'new': rich_rules}})
# No changes
if ret['changes'] == {}:
ret['result'] = True
ret['comment'] = '\'{0}\' is already in the desired state.'.format(name)
return ret
# test=True and changes predicted
if __opts__['test']:
ret['result'] = None
# build comment string
nested.__opts__ = __opts__
comment = []
comment.append('Configuration for \'{0}\' will change:'.format(name))
comment.append(nested.output(ret['changes']).rstrip())
ret['comment'] = '\n'.join(comment)
ret['changes'] = {}
return ret
# Changes were made successfully
ret['result'] = True
ret['comment'] = '\'{0}\' was configured.'.format(name)
return ret |
def todict(self):
'''
Returns a pretty dictionary meant for command line output.
'''
return {
'Source port': self.srcport,
'Destination port': self.destport,
'Protocol': self.protocol,
'Destination address': self.destaddr} |
def _connect():
'''
Return server object used to interact with Jenkins.
:return: server object used to interact with Jenkins
'''
jenkins_url = __salt__['config.get']('jenkins.url') or \
__salt__['config.get']('jenkins:url') or \
__salt__['pillar.get']('jenkins.url')
jenkins_user = __salt__['config.get']('jenkins.user') or \
__salt__['config.get']('jenkins:user') or \
__salt__['pillar.get']('jenkins.user')
jenkins_password = __salt__['config.get']('jenkins.password') or \
__salt__['config.get']('jenkins:password') or \
__salt__['pillar.get']('jenkins.password')
if not jenkins_url:
raise SaltInvocationError('No Jenkins URL found.')
return jenkins.Jenkins(jenkins_url,
username=jenkins_user,
password=jenkins_password) |
def _retrieve_config_xml(config_xml, saltenv):
'''
Helper to cache the config XML and raise a CommandExecutionError if we fail
to do so. If we successfully cache the file, return the cached path.
'''
ret = __salt__['cp.cache_file'](config_xml, saltenv)
if not ret:
raise CommandExecutionError('Failed to retrieve {0}'.format(config_xml))
return ret |
def job_exists(name=None):
'''
Check whether the job exists in configured Jenkins jobs.
:param name: The name of the job is check if it exists.
:return: True if job exists, False if job does not exist.
CLI Example:
.. code-block:: bash
salt '*' jenkins.job_exists jobname
'''
if not name:
raise SaltInvocationError('Required parameter \'name\' is missing')
server = _connect()
if server.job_exists(name):
return True
else:
return False |
def get_job_info(name=None):
'''
Return information about the Jenkins job.
:param name: The name of the job is check if it exists.
:return: Information about the Jenkins job.
CLI Example:
.. code-block:: bash
salt '*' jenkins.get_job_info jobname
'''
if not name:
raise SaltInvocationError('Required parameter \'name\' is missing')
server = _connect()
if not job_exists(name):
raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
job_info = server.get_job_info(name)
if job_info:
return job_info
return False |
def build_job(name=None, parameters=None):
'''
Initiate a build for the provided job.
:param name: The name of the job is check if it exists.
:param parameters: Parameters to send to the job.
:return: True is successful, otherwise raise an exception.
CLI Example:
.. code-block:: bash
salt '*' jenkins.build_job jobname
'''
if not name:
raise SaltInvocationError('Required parameter \'name\' is missing')
server = _connect()
if not job_exists(name):
raise CommandExecutionError('Job \'{0}\' does not exist.'.format(name))
try:
server.build_job(name, parameters)
except jenkins.JenkinsException as err:
raise CommandExecutionError(
'Encountered error building job \'{0}\': {1}'.format(name, err)
)
return True |
def create_job(name=None,
config_xml=None,
saltenv='base'):
'''
Return the configuration file.
:param name: The name of the job is check if it exists.
:param config_xml: The configuration file to use to create the job.
:param saltenv: The environment to look for the file in.
:return: The configuration file used for the job.
CLI Example:
.. code-block:: bash
salt '*' jenkins.create_job jobname
salt '*' jenkins.create_job jobname config_xml='salt://jenkins/config.xml'
'''
if not name:
raise SaltInvocationError('Required parameter \'name\' is missing')
if job_exists(name):
raise CommandExecutionError('Job \'{0}\' already exists'.format(name))
if not config_xml:
config_xml = jenkins.EMPTY_CONFIG_XML
else:
config_xml_file = _retrieve_config_xml(config_xml, saltenv)
with salt.utils.files.fopen(config_xml_file) as _fp:
config_xml = salt.utils.stringutils.to_unicode(_fp.read())
server = _connect()
try:
server.create_job(name, config_xml)
except jenkins.JenkinsException as err:
raise CommandExecutionError(
'Encountered error creating job \'{0}\': {1}'.format(name, err)
)
return config_xml |
def delete_job(name=None):
'''
Return true is job is deleted successfully.
:param name: The name of the job to delete.
:return: Return true if job is deleted successfully.
CLI Example:
.. code-block:: bash
salt '*' jenkins.delete_job jobname
'''
if not name:
raise SaltInvocationError('Required parameter \'name\' is missing')
server = _connect()
if not job_exists(name):
raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
try:
server.delete_job(name)
except jenkins.JenkinsException as err:
raise CommandExecutionError(
'Encountered error deleting job \'{0}\': {1}'.format(name, err)
)
return True |