input
stringlengths 49
3.53k
| target
stringclasses 19
values |
---|---|
def get_callable_handler_function(src, handler):
"""Translate a string of the form "module.function" into a callable
function.
:param str src:
The path to your Lambda project containing a valid handler file.
:param str handler:
A dot delimited string representing the `<module>.<function name>`.
"""
# "cd" into `src` directory.
os.chdir(src)
module_name, function_name = handler.split(".")
filename = get_handler_filename(handler)
path_to_module_file = os.path.join(src, filename)
module = load_source(module_name, path_to_module_file)
return getattr(module, function_name) | def get_prep_value(self, value):
return ','.join(value) |
def get_callable_handler_function(src, handler):
"""Translate a string of the form "module.function" into a callable
function.
:param str src:
The path to your Lambda project containing a valid handler file.
:param str handler:
A dot delimited string representing the `<module>.<function name>`.
"""
# "cd" into `src` directory.
os.chdir(src)
module_name, function_name = handler.split(".")
filename = get_handler_filename(handler)
path_to_module_file = os.path.join(src, filename)
module = load_source(module_name, path_to_module_file)
return getattr(module, function_name) | def get_concurrency(cfg):
"""Return the Reserved Concurrent Executions if present in the config"""
concurrency = int(cfg.get("concurrency", 0))
return max(0, concurrency) |
def get_callable_handler_function(src, handler):
"""Translate a string of the form "module.function" into a callable
function.
:param str src:
The path to your Lambda project containing a valid handler file.
:param str handler:
A dot delimited string representing the `<module>.<function name>`.
"""
# "cd" into `src` directory.
os.chdir(src)
module_name, function_name = handler.split(".")
filename = get_handler_filename(handler)
path_to_module_file = os.path.join(src, filename)
module = load_source(module_name, path_to_module_file)
return getattr(module, function_name) | def test_iterators_are_a_type(self):
it = iter(range(1,6))
total = 0
for num in it:
total += num
self.assertEqual(15 , total) |
def get_callable_handler_function(src, handler):
"""Translate a string of the form "module.function" into a callable
function.
:param str src:
The path to your Lambda project containing a valid handler file.
:param str handler:
A dot delimited string representing the `<module>.<function name>`.
"""
# "cd" into `src` directory.
os.chdir(src)
module_name, function_name = handler.split(".")
filename = get_handler_filename(handler)
path_to_module_file = os.path.join(src, filename)
module = load_source(module_name, path_to_module_file)
return getattr(module, function_name) | def test_iterating_with_next(self):
stages = iter(['alpha','beta','gamma'])
try:
self.assertEqual('alpha', next(stages))
next(stages)
self.assertEqual('gamma', next(stages))
next(stages)
except StopIteration as ex:
err_msg = 'Ran out of iterations'
self.assertRegex(err_msg, 'Ran out') |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | def __init__(self, allow=None, disallow=None, secure=True, *args, **kwargs):
super(TemplateField, self).__init__(*args, **kwargs)
self.validators.append(TemplateValidator(allow, disallow, secure)) |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | def load_source(module_name, module_path):
"""Loads a python module from the path of the corresponding file."""
if sys.version_info[0] == 3 and sys.version_info[1] >= 5:
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
elif sys.version_info[0] == 3 and sys.version_info[1] < 5:
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(module_name, module_path)
module = loader.load_module()
return module |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | def __init__(self, field):
self.field = field |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | def __get__(self, instance, owner):
if instance is None:
raise AttributeError # ? |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
setattr(instance, self.field.attname, json.dumps(value)) |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | def __delete__(self, instance):
del(instance.__dict__[self.field.name])
setattr(instance, self.field.attname, json.dumps(None)) |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | def get_attname(self):
return "%s_json" % self.name |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | def contribute_to_class(self, cls, name):
super(JSONField, self).contribute_to_class(cls, name)
setattr(cls, name, JSONDescriptor(self))
models.signals.pre_init.connect(self.fix_init_kwarg, sender=cls) |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | def init(src, minimal=False):
"""Copies template files to a given directory.
:param str src:
The path to output the template lambda project files.
:param bool minimal:
Minimal possible template files (excludes event.json).
"""
templates_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "project_templates",
)
for filename in os.listdir(templates_path):
if (minimal and filename == "event.json") or filename.endswith(".pyc"):
continue
dest_path = os.path.join(templates_path, filename)
if not os.path.isdir(dest_path):
copy(dest_path, src) |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | def cleanup_old_versions(
src, keep_last_versions, config_file="config.yaml", profile_name=None,
):
"""Deletes old deployed versions of the function in AWS Lambda.
Won't delete $Latest and any aliased version
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param int keep_last_versions:
The number of recent versions to keep and not delete
"""
if keep_last_versions <= 0:
print("Won't delete all versions. Please do this manually")
else:
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
response = client.list_versions_by_function(
FunctionName=cfg.get("function_name"),
)
versions = response.get("Versions")
if len(response.get("Versions")) < keep_last_versions:
print("Nothing to delete. (Too few versions published)")
else:
version_numbers = [
elem.get("Version") for elem in versions[1:-keep_last_versions]
]
for version_number in version_numbers:
try:
client.delete_function(
FunctionName=cfg.get("function_name"),
Qualifier=version_number,
)
except botocore.exceptions.ClientError as e:
print(f"Skipping Version {version_number}: {e}") |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | def deploy(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
preserve_vpc=False,
):
"""Deploys a new function to AWS Lambda.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
existing_config = get_function_config(cfg)
if existing_config:
update_function(
cfg, path_to_zip_file, existing_config, preserve_vpc=preserve_vpc
)
else:
create_function(cfg, path_to_zip_file) |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | def deploy_s3(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
preserve_vpc=False,
):
"""Deploys a new function via AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
use_s3 = True
s3_file = upload_s3(cfg, path_to_zip_file, use_s3)
existing_config = get_function_config(cfg)
if existing_config:
update_function(
cfg,
path_to_zip_file,
existing_config,
use_s3=use_s3,
s3_file=s3_file,
preserve_vpc=preserve_vpc,
)
else:
create_function(cfg, path_to_zip_file, use_s3=use_s3, s3_file=s3_file) |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | def upload(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
):
"""Uploads a new function to AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
upload_s3(cfg, path_to_zip_file) |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | def invoke(
src,
event_file="event.json",
config_file="config.yaml",
profile_name=None,
verbose=False,
):
"""Simulates a call to your function.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str alt_event:
An optional argument to override which event file to use.
:param bool verbose:
Whether to print out verbose details.
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Set AWS_PROFILE environment variable based on `--profile` option.
if profile_name:
os.environ["AWS_PROFILE"] = profile_name
# Load environment variables from the config file into the actual
# environment.
env_vars = cfg.get("environment_variables")
if env_vars:
for key, value in env_vars.items():
os.environ[key] = get_environment_variable_value(value)
# Load and parse event file.
path_to_event_file = os.path.join(src, event_file)
event = read(path_to_event_file, loader=json.loads)
# Tweak to allow module to import local modules
try:
sys.path.index(src)
except ValueError:
sys.path.append(src)
handler = cfg.get("handler")
# Inspect the handler string (<module>.<function name>) and translate it
# into a function we can execute.
fn = get_callable_handler_function(src, handler)
timeout = cfg.get("timeout")
if timeout:
context = LambdaContext(cfg.get("function_name"), timeout)
else:
context = LambdaContext(cfg.get("function_name"))
start = time.time()
results = fn(event, context)
end = time.time()
print("{0}".format(results))
if verbose:
print(
"\nexecution time: {:.8f}s\nfunction execution "
"timeout: {:2}s".format(end - start, cfg.get("timeout", 15))
) |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | def to_python(self, value):
if not value:
return [] |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | def get_prep_value(self, value):
return ','.join(value) |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | def get_concurrency(cfg):
"""Return the Reserved Concurrent Executions if present in the config"""
concurrency = int(cfg.get("concurrency", 0))
return max(0, concurrency) |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | def test_iterators_are_a_type(self):
it = iter(range(1,6))
total = 0
for num in it:
total += num
self.assertEqual(15 , total) |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | def test_iterating_with_next(self):
stages = iter(['alpha','beta','gamma'])
try:
self.assertEqual('alpha', next(stages))
next(stages)
self.assertEqual('gamma', next(stages))
next(stages)
except StopIteration as ex:
err_msg = 'Ran out of iterations'
self.assertRegex(err_msg, 'Ran out') |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account") | def __init__(self, allow=None, disallow=None, secure=True, *args, **kwargs):
super(TemplateField, self).__init__(*args, **kwargs)
self.validators.append(TemplateValidator(allow, disallow, secure)) |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account") | def load_source(module_name, module_path):
"""Loads a python module from the path of the corresponding file."""
if sys.version_info[0] == 3 and sys.version_info[1] >= 5:
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
elif sys.version_info[0] == 3 and sys.version_info[1] < 5:
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(module_name, module_path)
module = loader.load_module()
return module |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account") | def __init__(self, field):
self.field = field |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account") | def __get__(self, instance, owner):
if instance is None:
raise AttributeError # ? |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account") | def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
setattr(instance, self.field.attname, json.dumps(value)) |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account") | def __delete__(self, instance):
del(instance.__dict__[self.field.name])
setattr(instance, self.field.attname, json.dumps(None)) |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account") | def get_attname(self):
return "%s_json" % self.name |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account") | def contribute_to_class(self, cls, name):
super(JSONField, self).contribute_to_class(cls, name)
setattr(cls, name, JSONDescriptor(self))
models.signals.pre_init.connect(self.fix_init_kwarg, sender=cls) |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account") | def init(src, minimal=False):
"""Copies template files to a given directory.
:param str src:
The path to output the template lambda project files.
:param bool minimal:
Minimal possible template files (excludes event.json).
"""
templates_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "project_templates",
)
for filename in os.listdir(templates_path):
if (minimal and filename == "event.json") or filename.endswith(".pyc"):
continue
dest_path = os.path.join(templates_path, filename)
if not os.path.isdir(dest_path):
copy(dest_path, src) |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account") | def cleanup_old_versions(
src, keep_last_versions, config_file="config.yaml", profile_name=None,
):
"""Deletes old deployed versions of the function in AWS Lambda.
Won't delete $Latest and any aliased version
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param int keep_last_versions:
The number of recent versions to keep and not delete
"""
if keep_last_versions <= 0:
print("Won't delete all versions. Please do this manually")
else:
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
response = client.list_versions_by_function(
FunctionName=cfg.get("function_name"),
)
versions = response.get("Versions")
if len(response.get("Versions")) < keep_last_versions:
print("Nothing to delete. (Too few versions published)")
else:
version_numbers = [
elem.get("Version") for elem in versions[1:-keep_last_versions]
]
for version_number in version_numbers:
try:
client.delete_function(
FunctionName=cfg.get("function_name"),
Qualifier=version_number,
)
except botocore.exceptions.ClientError as e:
print(f"Skipping Version {version_number}: {e}") |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account") | def deploy(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
preserve_vpc=False,
):
"""Deploys a new function to AWS Lambda.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
existing_config = get_function_config(cfg)
if existing_config:
update_function(
cfg, path_to_zip_file, existing_config, preserve_vpc=preserve_vpc
)
else:
create_function(cfg, path_to_zip_file) |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account") | def deploy_s3(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
preserve_vpc=False,
):
"""Deploys a new function via AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
use_s3 = True
s3_file = upload_s3(cfg, path_to_zip_file, use_s3)
existing_config = get_function_config(cfg)
if existing_config:
update_function(
cfg,
path_to_zip_file,
existing_config,
use_s3=use_s3,
s3_file=s3_file,
preserve_vpc=preserve_vpc,
)
else:
create_function(cfg, path_to_zip_file, use_s3=use_s3, s3_file=s3_file) |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account") | def upload(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
):
"""Uploads a new function to AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
upload_s3(cfg, path_to_zip_file) |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account") | def invoke(
src,
event_file="event.json",
config_file="config.yaml",
profile_name=None,
verbose=False,
):
"""Simulates a call to your function.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str alt_event:
An optional argument to override which event file to use.
:param bool verbose:
Whether to print out verbose details.
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Set AWS_PROFILE environment variable based on `--profile` option.
if profile_name:
os.environ["AWS_PROFILE"] = profile_name
# Load environment variables from the config file into the actual
# environment.
env_vars = cfg.get("environment_variables")
if env_vars:
for key, value in env_vars.items():
os.environ[key] = get_environment_variable_value(value)
# Load and parse event file.
path_to_event_file = os.path.join(src, event_file)
event = read(path_to_event_file, loader=json.loads)
# Tweak to allow module to import local modules
try:
sys.path.index(src)
except ValueError:
sys.path.append(src)
handler = cfg.get("handler")
# Inspect the handler string (<module>.<function name>) and translate it
# into a function we can execute.
fn = get_callable_handler_function(src, handler)
timeout = cfg.get("timeout")
if timeout:
context = LambdaContext(cfg.get("function_name"), timeout)
else:
context = LambdaContext(cfg.get("function_name"))
start = time.time()
results = fn(event, context)
end = time.time()
print("{0}".format(results))
if verbose:
print(
"\nexecution time: {:.8f}s\nfunction execution "
"timeout: {:2}s".format(end - start, cfg.get("timeout", 15))
) |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account") | def to_python(self, value):
if not value:
return [] |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account") | def get_prep_value(self, value):
return ','.join(value) |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account") | def get_concurrency(cfg):
"""Return the Reserved Concurrent Executions if present in the config"""
concurrency = int(cfg.get("concurrency", 0))
return max(0, concurrency) |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account") | def test_iterators_are_a_type(self):
it = iter(range(1,6))
total = 0
for num in it:
total += num
self.assertEqual(15 , total) |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account") | def test_iterating_with_next(self):
stages = iter(['alpha','beta','gamma'])
try:
self.assertEqual('alpha', next(stages))
next(stages)
self.assertEqual('gamma', next(stages))
next(stages)
except StopIteration as ex:
err_msg = 'Ran out of iterations'
self.assertRegex(err_msg, 'Ran out') |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def __init__(self, allow=None, disallow=None, secure=True, *args, **kwargs):
super(TemplateField, self).__init__(*args, **kwargs)
self.validators.append(TemplateValidator(allow, disallow, secure)) |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def load_source(module_name, module_path):
"""Loads a python module from the path of the corresponding file."""
if sys.version_info[0] == 3 and sys.version_info[1] >= 5:
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
elif sys.version_info[0] == 3 and sys.version_info[1] < 5:
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(module_name, module_path)
module = loader.load_module()
return module |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def __init__(self, field):
self.field = field |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def __get__(self, instance, owner):
if instance is None:
raise AttributeError # ? |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
setattr(instance, self.field.attname, json.dumps(value)) |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def __delete__(self, instance):
del(instance.__dict__[self.field.name])
setattr(instance, self.field.attname, json.dumps(None)) |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def get_attname(self):
return "%s_json" % self.name |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def contribute_to_class(self, cls, name):
super(JSONField, self).contribute_to_class(cls, name)
setattr(cls, name, JSONDescriptor(self))
models.signals.pre_init.connect(self.fix_init_kwarg, sender=cls) |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def init(src, minimal=False):
"""Copies template files to a given directory.
:param str src:
The path to output the template lambda project files.
:param bool minimal:
Minimal possible template files (excludes event.json).
"""
templates_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "project_templates",
)
for filename in os.listdir(templates_path):
if (minimal and filename == "event.json") or filename.endswith(".pyc"):
continue
dest_path = os.path.join(templates_path, filename)
if not os.path.isdir(dest_path):
copy(dest_path, src) |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def cleanup_old_versions(
src, keep_last_versions, config_file="config.yaml", profile_name=None,
):
"""Deletes old deployed versions of the function in AWS Lambda.
Won't delete $Latest and any aliased version
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param int keep_last_versions:
The number of recent versions to keep and not delete
"""
if keep_last_versions <= 0:
print("Won't delete all versions. Please do this manually")
else:
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
response = client.list_versions_by_function(
FunctionName=cfg.get("function_name"),
)
versions = response.get("Versions")
if len(response.get("Versions")) < keep_last_versions:
print("Nothing to delete. (Too few versions published)")
else:
version_numbers = [
elem.get("Version") for elem in versions[1:-keep_last_versions]
]
for version_number in version_numbers:
try:
client.delete_function(
FunctionName=cfg.get("function_name"),
Qualifier=version_number,
)
except botocore.exceptions.ClientError as e:
print(f"Skipping Version {version_number}: {e}") |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def deploy(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
preserve_vpc=False,
):
"""Deploys a new function to AWS Lambda.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
existing_config = get_function_config(cfg)
if existing_config:
update_function(
cfg, path_to_zip_file, existing_config, preserve_vpc=preserve_vpc
)
else:
create_function(cfg, path_to_zip_file) |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def deploy_s3(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
preserve_vpc=False,
):
"""Deploys a new function via AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
use_s3 = True
s3_file = upload_s3(cfg, path_to_zip_file, use_s3)
existing_config = get_function_config(cfg)
if existing_config:
update_function(
cfg,
path_to_zip_file,
existing_config,
use_s3=use_s3,
s3_file=s3_file,
preserve_vpc=preserve_vpc,
)
else:
create_function(cfg, path_to_zip_file, use_s3=use_s3, s3_file=s3_file) |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def upload(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
):
"""Uploads a new function to AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
upload_s3(cfg, path_to_zip_file) |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def invoke(
src,
event_file="event.json",
config_file="config.yaml",
profile_name=None,
verbose=False,
):
"""Simulates a call to your function.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str alt_event:
An optional argument to override which event file to use.
:param bool verbose:
Whether to print out verbose details.
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Set AWS_PROFILE environment variable based on `--profile` option.
if profile_name:
os.environ["AWS_PROFILE"] = profile_name
# Load environment variables from the config file into the actual
# environment.
env_vars = cfg.get("environment_variables")
if env_vars:
for key, value in env_vars.items():
os.environ[key] = get_environment_variable_value(value)
# Load and parse event file.
path_to_event_file = os.path.join(src, event_file)
event = read(path_to_event_file, loader=json.loads)
# Tweak to allow module to import local modules
try:
sys.path.index(src)
except ValueError:
sys.path.append(src)
handler = cfg.get("handler")
# Inspect the handler string (<module>.<function name>) and translate it
# into a function we can execute.
fn = get_callable_handler_function(src, handler)
timeout = cfg.get("timeout")
if timeout:
context = LambdaContext(cfg.get("function_name"), timeout)
else:
context = LambdaContext(cfg.get("function_name"))
start = time.time()
results = fn(event, context)
end = time.time()
print("{0}".format(results))
if verbose:
print(
"\nexecution time: {:.8f}s\nfunction execution "
"timeout: {:2}s".format(end - start, cfg.get("timeout", 15))
) |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def to_python(self, value):
if not value:
return [] |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def get_prep_value(self, value):
return ','.join(value) |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def get_concurrency(cfg):
"""Return the Reserved Concurrent Executions if present in the config"""
concurrency = int(cfg.get("concurrency", 0))
return max(0, concurrency) |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def test_iterators_are_a_type(self):
it = iter(range(1,6))
total = 0
for num in it:
total += num
self.assertEqual(15 , total) |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) | def test_iterating_with_next(self):
stages = iter(['alpha','beta','gamma'])
try:
self.assertEqual('alpha', next(stages))
next(stages)
self.assertEqual('gamma', next(stages))
next(stages)
except StopIteration as ex:
err_msg = 'Ran out of iterations'
self.assertRegex(err_msg, 'Ran out') |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | def __init__(self, allow=None, disallow=None, secure=True, *args, **kwargs):
super(TemplateField, self).__init__(*args, **kwargs)
self.validators.append(TemplateValidator(allow, disallow, secure)) |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | def load_source(module_name, module_path):
"""Loads a python module from the path of the corresponding file."""
if sys.version_info[0] == 3 and sys.version_info[1] >= 5:
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
elif sys.version_info[0] == 3 and sys.version_info[1] < 5:
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(module_name, module_path)
module = loader.load_module()
return module |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | def __init__(self, field):
self.field = field |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | def __get__(self, instance, owner):
if instance is None:
raise AttributeError # ? |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
setattr(instance, self.field.attname, json.dumps(value)) |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | def __delete__(self, instance):
del(instance.__dict__[self.field.name])
setattr(instance, self.field.attname, json.dumps(None)) |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | def get_attname(self):
return "%s_json" % self.name |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | def contribute_to_class(self, cls, name):
super(JSONField, self).contribute_to_class(cls, name)
setattr(cls, name, JSONDescriptor(self))
models.signals.pre_init.connect(self.fix_init_kwarg, sender=cls) |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | def init(src, minimal=False):
"""Copies template files to a given directory.
:param str src:
The path to output the template lambda project files.
:param bool minimal:
Minimal possible template files (excludes event.json).
"""
templates_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "project_templates",
)
for filename in os.listdir(templates_path):
if (minimal and filename == "event.json") or filename.endswith(".pyc"):
continue
dest_path = os.path.join(templates_path, filename)
if not os.path.isdir(dest_path):
copy(dest_path, src) |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | def cleanup_old_versions(
src, keep_last_versions, config_file="config.yaml", profile_name=None,
):
"""Deletes old deployed versions of the function in AWS Lambda.
Won't delete $Latest and any aliased version
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param int keep_last_versions:
The number of recent versions to keep and not delete
"""
if keep_last_versions <= 0:
print("Won't delete all versions. Please do this manually")
else:
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
response = client.list_versions_by_function(
FunctionName=cfg.get("function_name"),
)
versions = response.get("Versions")
if len(response.get("Versions")) < keep_last_versions:
print("Nothing to delete. (Too few versions published)")
else:
version_numbers = [
elem.get("Version") for elem in versions[1:-keep_last_versions]
]
for version_number in version_numbers:
try:
client.delete_function(
FunctionName=cfg.get("function_name"),
Qualifier=version_number,
)
except botocore.exceptions.ClientError as e:
print(f"Skipping Version {version_number}: {e}") |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | def deploy(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
preserve_vpc=False,
):
"""Deploys a new function to AWS Lambda.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
existing_config = get_function_config(cfg)
if existing_config:
update_function(
cfg, path_to_zip_file, existing_config, preserve_vpc=preserve_vpc
)
else:
create_function(cfg, path_to_zip_file) |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | def deploy_s3(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
preserve_vpc=False,
):
"""Deploys a new function via AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
use_s3 = True
s3_file = upload_s3(cfg, path_to_zip_file, use_s3)
existing_config = get_function_config(cfg)
if existing_config:
update_function(
cfg,
path_to_zip_file,
existing_config,
use_s3=use_s3,
s3_file=s3_file,
preserve_vpc=preserve_vpc,
)
else:
create_function(cfg, path_to_zip_file, use_s3=use_s3, s3_file=s3_file) |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | def upload(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
):
"""Uploads a new function to AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
upload_s3(cfg, path_to_zip_file) |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | def invoke(
src,
event_file="event.json",
config_file="config.yaml",
profile_name=None,
verbose=False,
):
"""Simulates a call to your function.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str alt_event:
An optional argument to override which event file to use.
:param bool verbose:
Whether to print out verbose details.
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Set AWS_PROFILE environment variable based on `--profile` option.
if profile_name:
os.environ["AWS_PROFILE"] = profile_name
# Load environment variables from the config file into the actual
# environment.
env_vars = cfg.get("environment_variables")
if env_vars:
for key, value in env_vars.items():
os.environ[key] = get_environment_variable_value(value)
# Load and parse event file.
path_to_event_file = os.path.join(src, event_file)
event = read(path_to_event_file, loader=json.loads)
# Tweak to allow module to import local modules
try:
sys.path.index(src)
except ValueError:
sys.path.append(src)
handler = cfg.get("handler")
# Inspect the handler string (<module>.<function name>) and translate it
# into a function we can execute.
fn = get_callable_handler_function(src, handler)
timeout = cfg.get("timeout")
if timeout:
context = LambdaContext(cfg.get("function_name"), timeout)
else:
context = LambdaContext(cfg.get("function_name"))
start = time.time()
results = fn(event, context)
end = time.time()
print("{0}".format(results))
if verbose:
print(
"\nexecution time: {:.8f}s\nfunction execution "
"timeout: {:2}s".format(end - start, cfg.get("timeout", 15))
) |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | def to_python(self, value):
if not value:
return [] |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | def get_prep_value(self, value):
return ','.join(value) |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | def get_concurrency(cfg):
"""Return the Reserved Concurrent Executions if present in the config"""
concurrency = int(cfg.get("concurrency", 0))
return max(0, concurrency) |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | def test_iterators_are_a_type(self):
it = iter(range(1,6))
total = 0
for num in it:
total += num
self.assertEqual(15 , total) |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | def test_iterating_with_next(self):
stages = iter(['alpha','beta','gamma'])
try:
self.assertEqual('alpha', next(stages))
next(stages)
self.assertEqual('gamma', next(stages))
next(stages)
except StopIteration as ex:
err_msg = 'Ran out of iterations'
self.assertRegex(err_msg, 'Ran out') |
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) | def __init__(self, allow=None, disallow=None, secure=True, *args, **kwargs):
super(TemplateField, self).__init__(*args, **kwargs)
self.validators.append(TemplateValidator(allow, disallow, secure)) |
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) | def load_source(module_name, module_path):
"""Loads a python module from the path of the corresponding file."""
if sys.version_info[0] == 3 and sys.version_info[1] >= 5:
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
elif sys.version_info[0] == 3 and sys.version_info[1] < 5:
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(module_name, module_path)
module = loader.load_module()
return module |
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) | def __init__(self, field):
self.field = field |
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) | def __get__(self, instance, owner):
if instance is None:
raise AttributeError # ? |
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) | def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
setattr(instance, self.field.attname, json.dumps(value)) |
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) | def __delete__(self, instance):
del(instance.__dict__[self.field.name])
setattr(instance, self.field.attname, json.dumps(None)) |
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) | def get_attname(self):
return "%s_json" % self.name |
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) | def contribute_to_class(self, cls, name):
super(JSONField, self).contribute_to_class(cls, name)
setattr(cls, name, JSONDescriptor(self))
models.signals.pre_init.connect(self.fix_init_kwarg, sender=cls) |
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) | def init(src, minimal=False):
"""Copies template files to a given directory.
:param str src:
The path to output the template lambda project files.
:param bool minimal:
Minimal possible template files (excludes event.json).
"""
templates_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "project_templates",
)
for filename in os.listdir(templates_path):
if (minimal and filename == "event.json") or filename.endswith(".pyc"):
continue
dest_path = os.path.join(templates_path, filename)
if not os.path.isdir(dest_path):
copy(dest_path, src) |
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) | def cleanup_old_versions(
src, keep_last_versions, config_file="config.yaml", profile_name=None,
):
"""Deletes old deployed versions of the function in AWS Lambda.
Won't delete $Latest and any aliased version
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param int keep_last_versions:
The number of recent versions to keep and not delete
"""
if keep_last_versions <= 0:
print("Won't delete all versions. Please do this manually")
else:
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
response = client.list_versions_by_function(
FunctionName=cfg.get("function_name"),
)
versions = response.get("Versions")
if len(response.get("Versions")) < keep_last_versions:
print("Nothing to delete. (Too few versions published)")
else:
version_numbers = [
elem.get("Version") for elem in versions[1:-keep_last_versions]
]
for version_number in version_numbers:
try:
client.delete_function(
FunctionName=cfg.get("function_name"),
Qualifier=version_number,
)
except botocore.exceptions.ClientError as e:
print(f"Skipping Version {version_number}: {e}") |
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) | def deploy(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
preserve_vpc=False,
):
"""Deploys a new function to AWS Lambda.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
existing_config = get_function_config(cfg)
if existing_config:
update_function(
cfg, path_to_zip_file, existing_config, preserve_vpc=preserve_vpc
)
else:
create_function(cfg, path_to_zip_file) |
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) | def deploy_s3(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
preserve_vpc=False,
):
"""Deploys a new function via AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
use_s3 = True
s3_file = upload_s3(cfg, path_to_zip_file, use_s3)
existing_config = get_function_config(cfg)
if existing_config:
update_function(
cfg,
path_to_zip_file,
existing_config,
use_s3=use_s3,
s3_file=s3_file,
preserve_vpc=preserve_vpc,
)
else:
create_function(cfg, path_to_zip_file, use_s3=use_s3, s3_file=s3_file) |
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) | def upload(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
):
"""Uploads a new function to AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
upload_s3(cfg, path_to_zip_file) |
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) | def invoke(
src,
event_file="event.json",
config_file="config.yaml",
profile_name=None,
verbose=False,
):
"""Simulates a call to your function.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str alt_event:
An optional argument to override which event file to use.
:param bool verbose:
Whether to print out verbose details.
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Set AWS_PROFILE environment variable based on `--profile` option.
if profile_name:
os.environ["AWS_PROFILE"] = profile_name
# Load environment variables from the config file into the actual
# environment.
env_vars = cfg.get("environment_variables")
if env_vars:
for key, value in env_vars.items():
os.environ[key] = get_environment_variable_value(value)
# Load and parse event file.
path_to_event_file = os.path.join(src, event_file)
event = read(path_to_event_file, loader=json.loads)
# Tweak to allow module to import local modules
try:
sys.path.index(src)
except ValueError:
sys.path.append(src)
handler = cfg.get("handler")
# Inspect the handler string (<module>.<function name>) and translate it
# into a function we can execute.
fn = get_callable_handler_function(src, handler)
timeout = cfg.get("timeout")
if timeout:
context = LambdaContext(cfg.get("function_name"), timeout)
else:
context = LambdaContext(cfg.get("function_name"))
start = time.time()
results = fn(event, context)
end = time.time()
print("{0}".format(results))
if verbose:
print(
"\nexecution time: {:.8f}s\nfunction execution "
"timeout: {:2}s".format(end - start, cfg.get("timeout", 15))
) |
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) | def to_python(self, value):
if not value:
return [] |
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) | def get_prep_value(self, value):
return ','.join(value) |
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) | def get_concurrency(cfg):
"""Return the Reserved Concurrent Executions if present in the config"""
concurrency = int(cfg.get("concurrency", 0))
return max(0, concurrency) |
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) | def test_iterators_are_a_type(self):
it = iter(range(1,6))
total = 0
for num in it:
total += num
self.assertEqual(15 , total) |
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) | def test_iterating_with_next(self):
stages = iter(['alpha','beta','gamma'])
try:
self.assertEqual('alpha', next(stages))
next(stages)
self.assertEqual('gamma', next(stages))
next(stages)
except StopIteration as ex:
err_msg = 'Ran out of iterations'
self.assertRegex(err_msg, 'Ran out') |
def upload_s3(cfg, path_to_zip_file, *use_s3):
"""Upload a function to AWS S3."""
print("Uploading your new Lambda function")
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
client = get_client(
"s3",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
byte_stream = b""
with open(path_to_zip_file, mode="rb") as fh:
byte_stream = fh.read()
s3_key_prefix = cfg.get("s3_key_prefix", "/dist")
checksum = hashlib.new("md5", byte_stream).hexdigest()
timestamp = str(time.time())
filename = "{prefix}{checksum}-{ts}.zip".format(
prefix=s3_key_prefix, checksum=checksum, ts=timestamp,
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
kwargs = {
"Bucket": "{}".format(buck_name),
"Key": "{}".format(filename),
"Body": byte_stream,
}
client.put_object(**kwargs)
print("Finished uploading {} to S3 bucket {}".format(func_name, buck_name))
if use_s3:
return filename | def __init__(self, allow=None, disallow=None, secure=True, *args, **kwargs):
super(TemplateField, self).__init__(*args, **kwargs)
self.validators.append(TemplateValidator(allow, disallow, secure)) |