diff --git a/.gitignore b/.gitignore
index a65824a..fedee1b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -60,3 +60,6 @@ target/
# Jetbrains/PyCharm project files
.idea/
+
+# virtualenv
+.virtual
diff --git a/Makefile b/Makefile
index ed7320a..7361bd2 100644
--- a/Makefile
+++ b/Makefile
@@ -11,7 +11,7 @@ clean-build:
rm -fr dist/
rm -fr .eggs/
find . -name '*.egg-info' -exec rm -fr {} +
- find . -name '*.egg' -exec rm -f {} +
+ find . -name '*.egg' -exec rm -fr {} +
clean-pyc:
find . -name '*.pyc' -exec rm -f {} +
diff --git a/README.rst b/README.rst
index 39f260e..b50055e 100644
--- a/README.rst
+++ b/README.rst
@@ -112,6 +112,20 @@ When you're ready to deploy your code to Lambda simply run:
(pylambda) $ lambda deploy
+There is also option to upload your lambda code to S3 and deploy to Lambda, run:
+
+.. code:: bash
+
+ (pylambda) $ lambda deploy_s3
+
+This command will: zip and upload you code to S3 storage basing on your configuration (bucket/s3_key_prefix) and then create (or update) lambda with you zipped code.
+
+If you want to use different configuration files (e.g dev and prod) simply create one and when deploying lambda add --config-file-path option:
+
+.. code:: bash
+
+ (pylambda) $ lambda deploy_s3 --config-file-path config-dev.yaml
+
The deploy script will evaluate your virtualenv and identify your project dependencies. It will package these up along with your handler function to a zip file that it then uploads to AWS Lambda.
You can now log into the `AWS Lambda management console `_ to verify the code deployed successfully.
@@ -176,6 +190,74 @@ s3_key_prefix: 'path/to/file/'
```
Your role must have `s3:PutObject` permission on the bucket/key that you specify for the upload to work properly. Once you have that set, you can execute `lambda upload` to initiate the transfer.
+Trigger
+=======
+It is possible to configure your lambda to use trigger. Currently supported events are:
+ * event (scheduled event/cron)
+ * sns topic
+ * S3 bucket
+Before using trigger you need to provide configuration appropriate to trigger type (use only one that match your needs).
+Event
+```
+type: event
+name: trigger_name
+frequency: cron(rate 1 hour)
+```
+SNS topic - if given topic doesn't exist it will be created
+```
+type: sns
+name: sns_topic_name
+buckets:
+ bucket1:
+ bucket_name: 'bucket_name'
+ events:
+ - 's3:ReducedRedundancyLostObject'
+ - 's3:ObjectCreated:*'
+ - 's3:ObjectCreated:Put'
+ - 's3:ObjectCreated:Post'
+ - 's3:ObjectCreated:Copy'
+ - 's3:ObjectCreated:CompleteMultipartUpload'
+ - 's3:ObjectRemoved:*'
+ - 's3:ObjectRemoved:Delete'
+ - 's3:ObjectRemoved:DeleteMarkerCreated'
+ prefix: ''
+ suffix: ''
+ bucket2:
+ bucket_name: 'bucket_name'
+ events:
+ - 's3:ReducedRedundancyLostObject'
+ - 's3:ObjectCreated:*'
+ - 's3:ObjectCreated:Put'
+ - 's3:ObjectCreated:Post'
+ - 's3:ObjectCreated:Copy'
+ - 's3:ObjectCreated:CompleteMultipartUpload'
+ - 's3:ObjectRemoved:*'
+ - 's3:ObjectRemoved:Delete'
+ - 's3:ObjectRemoved:DeleteMarkerCreated'
+ prefix: ''
+ suffix: ''
+```
+S3 Bucket
+```
+type: bucket
+bucket_name'bucket_name'
+ events:
+ - 's3:ReducedRedundancyLostObject'
+ - 's3:ObjectCreated:*'
+ - 's3:ObjectCreated:Put'
+ - 's3:ObjectCreated:Post'
+ - 's3:ObjectCreated:Copy'
+ - 's3:ObjectCreated:CompleteMultipartUpload'
+ - 's3:ObjectRemoved:*'
+ - 's3:ObjectRemoved:Delete'
+ - 's3:ObjectRemoved:DeleteMarkerCreated'
+```
+
+Ignore file
+===========
+If you don't want to include some files in your zipped lambda package, simply add file patterns to
+```.lambdaignore``` file.
+
Development
===========
diff --git a/aws_lambda/__init__.py b/aws_lambda/__init__.py
index 231c19e..ac244b6 100755
--- a/aws_lambda/__init__.py
+++ b/aws_lambda/__init__.py
@@ -2,7 +2,7 @@
# flake8: noqa
__author__ = 'Nick Ficano'
__email__ = 'nficano@gmail.com'
-__version__ = '2.1.1'
+__version__ = '2.2.1'
from .aws_lambda import deploy, invoke, init, build, upload, cleanup_old_versions
diff --git a/aws_lambda/aws_lambda.py b/aws_lambda/aws_lambda.py
index 836b633..df759e9 100755
--- a/aws_lambda/aws_lambda.py
+++ b/aws_lambda/aws_lambda.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import print_function
-import hashlib
+import glob
import json
import logging
import os
@@ -18,13 +18,12 @@
import botocore
import pip
import yaml
+from pip._vendor.distlib._backport import shutil
from .helpers import archive
from .helpers import get_environment_variable_value
from .helpers import mkdir
from .helpers import read
-from .helpers import timestamp
-
ARN_PREFIXES = {
'us-gov-west-1': 'aws-us-gov',
@@ -33,11 +32,12 @@
log = logging.getLogger(__name__)
-def cleanup_old_versions(src, keep_last_versions):
+def cleanup_old_versions(src, keep_last_versions, config_file_path=None, aws_profile=None):
"""Deletes old deployed versions of the function in AWS Lambda.
Won't delete $Latest and any aliased version
+ :param config_file_path: path to custom config.yaml file
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
@@ -47,16 +47,9 @@ def cleanup_old_versions(src, keep_last_versions):
if keep_last_versions <= 0:
print("Won't delete all versions. Please do this manually")
else:
- path_to_config_file = os.path.join(src, 'config.yaml')
- cfg = read(path_to_config_file, loader=yaml.load)
-
- aws_access_key_id = cfg.get('aws_access_key_id')
- aws_secret_access_key = cfg.get('aws_secret_access_key')
+ cfg = read_config_file(config_file_path, src)
- client = get_client(
- 'lambda', aws_access_key_id, aws_secret_access_key,
- cfg.get('region'),
- )
+ client = get_client('lambda', cfg, aws_profile=aws_profile)
response = client.list_versions_by_function(
FunctionName=cfg.get('function_name'),
@@ -78,9 +71,13 @@ def cleanup_old_versions(src, keep_last_versions):
.format(version_number, e.message))
-def deploy(src, requirements=False, local_package=None):
+def deploy(src, requirements=False, local_package=None, upload_to_s3=False, config_file_path=None, aws_profile=None):
"""Deploys a new function to AWS Lambda.
+ :param aws_profile: aws profile name stored in ~/.aws/credentials
+ :param config_file_path: path to custom config file
+ :param upload_to_s3: upload function code to S3
+ :param requirements:
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
@@ -89,24 +86,29 @@ def deploy(src, requirements=False, local_package=None):
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
- path_to_config_file = os.path.join(src, 'config.yaml')
- cfg = read(path_to_config_file, loader=yaml.load)
+ cfg = read_config_file(config_file_path, src)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
- path_to_zip_file = build(src, requirements, local_package)
+ path_to_zip_file = build(src, requirements, local_package, config_file_path=config_file_path)
+ filename = upload_s3(cfg, path_to_zip_file, aws_profile=aws_profile) if upload_to_s3 else None
- if function_exists(cfg, cfg.get('function_name')):
- update_function(cfg, path_to_zip_file)
+ if function_exists(cfg, cfg.get('function_name'), aws_profile=aws_profile):
+ update_function(cfg, path_to_zip_file, upload_to_s3, filename=filename, aws_profile=aws_profile)
else:
- create_function(cfg, path_to_zip_file)
+ create_function(cfg, path_to_zip_file, upload_to_s3, filename=filename, aws_profile=aws_profile)
+ if cfg.get('trigger'):
+ create_trigger(cfg, aws_profile=aws_profile)
-def upload(src, requirements=False, local_package=None):
+def upload(src, requirements=False, local_package=None, config_file_path=None, aws_profile=None):
"""Uploads a new function to AWS S3.
+ :param aws_profile: aws profile name stored in ~/.aws/credentials
+ :param config_file_path: path to custom config.yaml file
+ :param requirements:
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
@@ -115,8 +117,7 @@ def upload(src, requirements=False, local_package=None):
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
- path_to_config_file = os.path.join(src, 'config.yaml')
- cfg = read(path_to_config_file, loader=yaml.load)
+ cfg = read_config_file(config_file_path, src)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
@@ -124,12 +125,13 @@ def upload(src, requirements=False, local_package=None):
# directory.
path_to_zip_file = build(src, requirements, local_package)
- upload_s3(cfg, path_to_zip_file)
+ upload_s3(cfg, path_to_zip_file, aws_profile=aws_profile)
-def invoke(src, alt_event=None, verbose=False):
+def invoke(src, alt_event=None, verbose=False, config_file_path=None):
"""Simulates a call to your function.
+ :param config_file_path: path to custom config.yaml file
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
@@ -139,13 +141,14 @@ def invoke(src, alt_event=None, verbose=False):
Whether to print out verbose details.
"""
# Load and parse the config file.
- path_to_config_file = os.path.join(src, 'config.yaml')
- cfg = read(path_to_config_file, loader=yaml.load)
+ cfg = read_config_file(config_file_path, src)
# Load environment variables from the config file into the actual
# environment.
- for key, value in cfg.get('environment_variables').items():
- os.environ[key] = value
+ env_vars = cfg.get('environment_variables')
+ if env_vars:
+ for key, value in env_vars.items():
+ os.environ[key] = value
# Load and parse event file.
if alt_event:
@@ -199,9 +202,11 @@ def init(src, minimal=False):
copy(dest_path, src)
-def build(src, requirements=False, local_package=None):
+def build(src, requirements=False, local_package=None, config_file_path=None):
"""Builds the file bundle.
+ :param config_file_path: path to custom config.yam.file
+ :param requirements:
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
@@ -210,8 +215,7 @@ def build(src, requirements=False, local_package=None):
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
- path_to_config_file = os.path.join(src, 'config.yaml')
- cfg = read(path_to_config_file, loader=yaml.load)
+ cfg = read_config_file(config_file_path, src)
# Get the absolute path to the output directory and create it if it doesn't
# already exist.
@@ -222,13 +226,14 @@ def build(src, requirements=False, local_package=None):
# Combine the name of the Lambda function with the current timestamp to use
# for the output filename.
function_name = cfg.get('function_name')
- output_filename = '{0}-{1}.zip'.format(timestamp(), function_name)
-
+ output_filename = '{}.zip'.format(function_name)
+ build_config = defaultdict(**cfg.get('build', {}))
path_to_temp = mkdtemp(prefix='aws-lambda')
pip_install_to_target(
path_to_temp,
requirements=requirements,
local_package=local_package,
+ **cfg['build']
)
# Hack for Zope.
@@ -250,7 +255,6 @@ def build(src, requirements=False, local_package=None):
# Allow definition of source code directories we want to build into our
# zipped package.
- build_config = defaultdict(**cfg.get('build', {}))
build_source_directories = build_config.get('source_directories', '')
build_source_directories = (
build_source_directories
@@ -261,12 +265,25 @@ def build(src, requirements=False, local_package=None):
d.strip() for d in build_source_directories.split(',')
]
+ def filter_ignored_files(file_name):
+ ignore_file_path = os.path.join(src, ".lambdaignore")
+ if os.path.exists(ignore_file_path):
+ with open(ignore_file_path) as ignored:
+ ignored_patterns = map(str.strip, ignored.readlines())
+ return all(file_name not in glob.glob(entry) for entry in ignored_patterns)
+ else:
+ return True
+
files = []
- for filename in os.listdir(src):
+ listdir = os.listdir(src)
+ filtered_files = filter(filter_ignored_files, listdir)
+ for filename in filtered_files:
if os.path.isfile(filename):
if filename == '.DS_Store':
continue
- if filename == 'config.yaml':
+ if 'yaml' in filename:
+ continue
+ if filename == '.lambdaignore':
continue
print('Bundling: %r' % filename)
files.append(os.path.join(src, filename))
@@ -289,9 +306,20 @@ def build(src, requirements=False, local_package=None):
# Zip them together into a single file.
# TODO: Delete temp directory created once the archive has been compiled.
path_to_zip_file = archive('./', path_to_dist, output_filename)
+ shutil.rmtree(path_to_temp)
+ os.chdir(src)
return path_to_zip_file
+def read_config_file(config_file_path, src):
+ if config_file_path:
+ path_to_config_file = os.path.join(src, config_file_path)
+ else:
+ path_to_config_file = os.path.join(src, 'config.yaml')
+ cfg = read(path_to_config_file, loader=yaml.load)
+ return cfg
+
+
def get_callable_handler_function(src, handler):
"""Tranlate a string of the form "module.function" into a callable
function.
@@ -334,9 +362,12 @@ def _install_packages(path, packages):
:param list packages:
A list of packages to be installed via pip.
"""
+
def _filter_blacklist(package):
- blacklist = ['-i', '#', 'Python==', 'python-lambda==']
+ blacklist = ['-i', '#', 'Python==', 'python-lambda==', 'hbi-python-lambda==', 'boto3==', 'tox==', 'pip==',
+ 'setuptools', 'virtualenv==', 'click==', 'argparse==', 'botocore==']
return all(package.startswith(entry) is False for entry in blacklist)
+
filtered_packages = filter(_filter_blacklist, packages)
for package in filtered_packages:
if package.startswith('-e '):
@@ -346,7 +377,7 @@ def _filter_blacklist(package):
pip.main(['install', package, '-t', path, '--ignore-installed'])
-def pip_install_to_target(path, requirements=False, local_package=None):
+def pip_install_to_target(path, requirements=False, local_package=None, **kwargs):
"""For a given active virtualenv, gather all installed pip packages then
copy (re-install) them to the path provided.
@@ -371,6 +402,8 @@ def pip_install_to_target(path, requirements=False, local_package=None):
print('Gathering requirement packages')
data = read('requirements.txt')
packages.extend(data.splitlines())
+ if 'remote_packages' in kwargs.keys():
+ packages.extend(kwargs['remote_packages'].split(','))
if not packages:
print('No dependency packages installed!')
@@ -389,60 +422,57 @@ def get_role_name(region, account_id, role):
return 'arn:{0}:iam::{1}:role/{2}'.format(prefix, account_id, role)
-def get_account_id(aws_access_key_id, aws_secret_access_key):
- """Query STS for a users' account_id"""
- client = get_client('sts', aws_access_key_id, aws_secret_access_key)
- return client.get_caller_identity().get('Account')
+client_cache = {}
-def get_client(client, aws_access_key_id, aws_secret_access_key, region=None):
+def get_client(client, cfg, aws_profile=None):
"""Shortcut for getting an initialized instance of the boto3 client."""
-
- return boto3.client(
- client,
- aws_access_key_id=aws_access_key_id,
- aws_secret_access_key=aws_secret_access_key,
- region_name=region,
- )
+ if client not in client_cache:
+ if aws_profile:
+ log.info('Using aws profile name: {}'.format(aws_profile))
+ session = boto3.Session(profile_name=aws_profile)
+ client_cache[client] = session.client(client, region_name=cfg.get('region'))
+ else:
+ client_cache[client] = boto3.client(
+ client,
+ aws_access_key_id=cfg.get('aws_access_key_id'),
+ aws_secret_access_key=cfg.get('aws_secret_access_key'),
+ region_name=cfg.get('region'),
+ )
+ return client_cache[client]
-def create_function(cfg, path_to_zip_file):
+def create_function(cfg, path_to_zip_file, upload_to_s3=False, filename=None, aws_profile=None):
"""Register and upload a function to AWS Lambda."""
print('Creating your new Lambda function')
byte_stream = read(path_to_zip_file, binary_file=True)
- aws_access_key_id = cfg.get('aws_access_key_id')
- aws_secret_access_key = cfg.get('aws_secret_access_key')
+ role = create_role_for_function(cfg, aws_profile=aws_profile)
- account_id = get_account_id(aws_access_key_id, aws_secret_access_key)
- role = get_role_name(
- cfg.get('region'), account_id,
- cfg.get('role', 'lambda_basic_execution'),
- )
-
- client = get_client(
- 'lambda', aws_access_key_id, aws_secret_access_key,
- cfg.get('region'),
- )
+ client = get_client('lambda', cfg, aws_profile=aws_profile)
# Do we prefer development variable over config?
func_name = (
os.environ.get('LAMBDA_FUNCTION_NAME') or cfg.get('function_name')
)
print('Creating lambda function with name: {}'.format(func_name))
+ code_params_dict = {}
+ code_params_dict.update([('ZipFile', byte_stream)]) if not upload_to_s3 else code_params_dict.update(
+ [('S3Bucket', cfg.get('bucket_name')), ('S3Key', filename)]
+ )
kwargs = {
'FunctionName': func_name,
'Runtime': cfg.get('runtime', 'python2.7'),
'Role': role,
'Handler': cfg.get('handler'),
- 'Code': {'ZipFile': byte_stream},
+ 'Code': code_params_dict,
'Description': cfg.get('description'),
'Timeout': cfg.get('timeout', 15),
'MemorySize': cfg.get('memory_size', 512),
'Publish': True,
}
- if 'environment_variables' in cfg:
+ if 'environment_variables' in cfg and cfg.get('environment_variables'):
kwargs.update(
Environment={
'Variables': {
@@ -452,35 +482,41 @@ def create_function(cfg, path_to_zip_file):
},
},
)
-
- client.create_function(**kwargs)
+ for i in range(5):
+ try:
+ if function_exists(cfg, func_name, aws_profile=aws_profile):
+ continue
+ else:
+ client.create_function(**kwargs)
+ print('Successfully created function {}'.format(func_name))
+ except Exception as e:
+ print("Error while updating function, backing off.")
+ time.sleep(5) # aws tells that deploys everything almost immediately. Almost...
+ if i > 3:
+ raise e
-def update_function(cfg, path_to_zip_file):
+def update_function(cfg, path_to_zip_file, upload_to_s3=False, filename=None, aws_profile=None):
"""Updates the code of an existing Lambda function"""
print('Updating your Lambda function')
byte_stream = read(path_to_zip_file, binary_file=True)
- aws_access_key_id = cfg.get('aws_access_key_id')
- aws_secret_access_key = cfg.get('aws_secret_access_key')
-
- account_id = get_account_id(aws_access_key_id, aws_secret_access_key)
- role = get_role_name(
- cfg.get('region'), account_id,
- cfg.get('role', 'lambda_basic_execution'),
- )
-
- client = get_client(
- 'lambda', aws_access_key_id, aws_secret_access_key,
- cfg.get('region'),
- )
-
- client.update_function_code(
- FunctionName=cfg.get('function_name'),
- ZipFile=byte_stream,
- Publish=False,
- )
+ role = create_role_for_function(cfg, aws_profile=aws_profile)
+ client = get_client('lambda', cfg, aws_profile=aws_profile)
+ if not upload_to_s3:
+ client.update_function_code(
+ FunctionName=cfg.get('function_name'),
+ ZipFile=byte_stream,
+ Publish=False,
+ )
+ else:
+ client.update_function_code(
+ FunctionName=cfg.get('function_name'),
+ S3Bucket=cfg.get('bucket_name'),
+ S3Key=filename,
+ Publish=False,
+ )
kwargs = {
'FunctionName': cfg.get('function_name'),
'Role': role,
@@ -504,8 +540,16 @@ def update_function(cfg, path_to_zip_file):
},
},
)
-
- client.update_function_configuration(**kwargs)
+ for i in range(5):
+ try:
+ if client.update_function_configuration(**kwargs):
+ print("Successfully updated function {}".format(cfg.get('function_name')))
+ break
+ except Exception as e:
+ print("Error while updating function, backing off.")
+ time.sleep(5) # aws tells that deploys everything almost immediately. Almost...
+ if i > 3:
+ raise e
# Publish last, so versions pick up eventually updated description...
client.publish_version(
@@ -513,25 +557,33 @@ def update_function(cfg, path_to_zip_file):
)
-def upload_s3(cfg, path_to_zip_file):
+def create_role_for_function(cfg, aws_profile=None):
+ role_cfg = cfg.get('role')
+ if role_cfg is not None:
+ if not get_role_arn(role_cfg['name'], cfg, aws_profile=aws_profile):
+ log.info("Creating new role: {}".format(role_cfg['name']))
+ role = create_role(role_cfg['name'], cfg, aws_profile=aws_profile)
+ else:
+ log.info("Found an existing role, updating policies")
+ role = get_role_arn(role_cfg['name'], cfg, aws_profile=aws_profile)
+ put_role_policy(role_cfg['name'], cfg, aws_profile=aws_profile)
+ else:
+ log.info("""No roles found. Will use role with name: lambda_basic_execution.\n
+ You can create one by updating your configuration and calling $lambda deploy.""")
+ role = get_role_arn("lambda_basic_execution", cfg=cfg, aws_profile=aws_profile)
+ return role
+
+
+def upload_s3(cfg, path_to_zip_file, aws_profile=None):
"""Upload a function to AWS S3."""
print('Uploading your new Lambda function')
- aws_access_key_id = cfg.get('aws_access_key_id')
- aws_secret_access_key = cfg.get('aws_secret_access_key')
- client = get_client(
- 's3', aws_access_key_id, aws_secret_access_key,
- cfg.get('region'),
- )
+ client = get_client('s3', cfg, aws_profile=aws_profile)
byte_stream = b''
with open(path_to_zip_file, mode='rb') as fh:
byte_stream = fh.read()
s3_key_prefix = cfg.get('s3_key_prefix', '/dist')
- checksum = hashlib.new('md5', byte_stream).hexdigest()
- timestamp = str(time.time())
- filename = '{prefix}{checksum}-{ts}.zip'.format(
- prefix=s3_key_prefix, checksum=checksum, ts=timestamp,
- )
+ filename = '{prefix}.zip'.format(prefix=s3_key_prefix)
# Do we prefer development variable over config?
buck_name = (
@@ -548,17 +600,12 @@ def upload_s3(cfg, path_to_zip_file):
client.put_object(**kwargs)
print('Finished uploading {} to S3 bucket {}'.format(func_name, buck_name))
+ return filename
-def function_exists(cfg, function_name):
+def function_exists(cfg, function_name, aws_profile=None):
"""Check whether a function exists or not"""
-
- aws_access_key_id = cfg.get('aws_access_key_id')
- aws_secret_access_key = cfg.get('aws_secret_access_key')
- client = get_client(
- 'lambda', aws_access_key_id, aws_secret_access_key,
- cfg.get('region'),
- )
+ client = get_client('lambda', cfg, aws_profile=aws_profile)
# Need to loop through until we get all of the lambda functions returned.
# It appears to be only returning 50 functions at a time.
@@ -567,7 +614,7 @@ def function_exists(cfg, function_name):
functions.extend([
f['FunctionName'] for f in functions_resp.get('Functions', [])
])
- while('NextMarker' in functions_resp):
+ while 'NextMarker' in functions_resp:
functions_resp = client.list_functions(
Marker=functions_resp.get('NextMarker'),
)
@@ -575,3 +622,228 @@ def function_exists(cfg, function_name):
f['FunctionName'] for f in functions_resp.get('Functions', [])
])
return function_name in functions
+
+
+def create_trigger(cfg, aws_profile=None):
+ """Creates trigger and associates it with function function (S3 or CloudWatch)"""
+ trigger_type = cfg.get('trigger')['type']
+ log.info("Creating trigger: {}".format(trigger_type))
+ return {
+ "bucket": create_trigger_s3,
+ "event": create_trigger_cloud_watch,
+ "sns": create_sns_trigger
+ }[trigger_type](cfg, aws_profile)
+
+
+def create_trigger_s3(cfg, aws_profile=None):
+ s3_client = get_client('s3', cfg, aws_profile=aws_profile)
+ bucket_notification = s3_client.BucketNotification(cfg.get('trigger')['bucket_name'])
+ bucket_notification.put(
+ NotificationConfiguration={
+ 'LambdaFunctionConfigurations': [
+ {
+ 'LambdaFunctionArn': get_function_arn_name(cfg, aws_profile=aws_profile),
+ 'Events': cfg.get('trigger')['events']
+ }
+ ]
+ }
+ )
+
+
+def create_trigger_cloud_watch(cfg, aws_profile=None):
+ """Creates or updates cron trigger and associates it with lambda function"""
+ lambda_client = get_client('lambda', cfg, aws_profile=aws_profile)
+ events_client = get_client('events', cfg, aws_profile=aws_profile)
+ function_arn = get_function_arn_name(cfg, aws_profile=aws_profile)
+ frequency = cfg.get('trigger')['frequency']
+ trigger_name = "{}".format(cfg.get('trigger')['name'])
+
+ rule_response = events_client.put_rule(
+ Name=trigger_name,
+ ScheduleExpression=frequency,
+ State='DISABLED'
+ )
+
+ statement_id = "{}-Event".format(trigger_name)
+ try:
+ lambda_client.remove_permission(
+ FunctionName=function_arn,
+ StatementId=statement_id,
+ )
+ except Exception: # sanity check if resource is not found. boto uses its own factory to instantiate exceptions
+ pass # that's why exception clause is so broad
+
+ lambda_client.add_permission(
+ FunctionName=function_arn,
+ StatementId=statement_id,
+ Action="lambda:InvokeFunction",
+ Principal="events.amazonaws.com",
+ SourceArn=rule_response['RuleArn']
+ )
+
+ events_client.put_targets(
+ Rule=trigger_name,
+ Targets=[
+ {
+ 'Id': "1",
+ 'Arn': function_arn
+ }
+ ]
+ )
+
+
+def create_sns_trigger(cfg, aws_profile=None):
+ sns_client = get_client('sns', cfg, aws_profile)
+ lambda_client = get_client('lambda', cfg, aws_profile)
+ s3_client = get_client('s3', cfg, aws_profile)
+
+ function_arn = get_function_arn_name(cfg, aws_profile)
+ trigger_name = cfg.get('trigger')['name']
+ topic_arn = sns_client.create_topic(
+ Name=trigger_name
+ )['TopicArn']
+
+ topic_policy_document = """
+ {{
+ "Version": "2008-10-17",
+ "Id": "__default_policy_ID",
+ "Statement": [
+ {{
+ "Sid": "_s3",
+ "Effect": "Allow",
+ "Principal": {{
+ "Service": "s3.amazonaws.com"
+ }},
+ "Action": [
+ "SNS:Publish"
+ ],
+ "Resource": "{topic_arn}",
+ "Condition": {{
+ "StringLike": {{
+ "aws:SourceArn": "arn:aws:s3:::*"
+ }}
+ }}
+ }}
+ ]
+}}"""
+ sns_client.set_topic_attributes(
+ TopicArn=topic_arn,
+ AttributeName='Policy',
+ AttributeValue=topic_policy_document.format(topic_arn=topic_arn)
+ )
+
+ sns_client.subscribe(
+ TopicArn=topic_arn,
+ Protocol='lambda',
+ Endpoint=function_arn
+ )
+
+ statement_id = "{}-Topic".format(trigger_name)
+ try:
+ lambda_client.remove_permission(
+ FunctionName=function_arn,
+ StatementId=statement_id,
+ )
+ except Exception: # sanity check if resource is not found. boto uses its own factory to instantiate exceptions
+ pass # that's why exception clause is so broad
+ lambda_client.add_permission(
+ FunctionName=function_arn,
+ StatementId=statement_id,
+ Action="lambda:InvokeFunction",
+ Principal="sns.amazonaws.com",
+ SourceArn=topic_arn
+ )
+ for bucket in cfg.get('trigger')['buckets']:
+ bucket_values = bucket.values()[0]
+ s3_client.put_bucket_notification_configuration(
+ Bucket=bucket_values['bucket_name'],
+ NotificationConfiguration={
+ 'TopicConfigurations': [
+ {
+ 'TopicArn': topic_arn,
+ 'Events': bucket_values['events'],
+ 'Filter': {
+ 'Key': {
+ 'FilterRules': [
+ {
+ 'Name': 'prefix',
+ 'Value': bucket_values['prefix']
+ },
+ {
+ 'Name': 'suffix',
+ 'Value': bucket_values['suffix']
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+ )
+
+
+def get_function_arn_name(cfg, aws_profile):
+ """Retrieves arn name of an existing function"""
+ client = get_client('lambda', cfg, aws_profile=aws_profile)
+ return client.get_function(FunctionName=cfg.get('function_name'))['Configuration']['FunctionArn']
+
+
+def get_role_arn(role_name, cfg, aws_profile=None):
+ client = get_client("iam", cfg, aws_profile=aws_profile)
+ response = None
+ try:
+ response = client.get_role(
+ RoleName=role_name
+ )['Role']['Arn']
+ except Exception as e:
+ pass
+ return response
+
+
+def create_role(role_name, cfg, aws_profile=None):
+ client = get_client('iam', cfg, aws_profile=aws_profile)
+ response = client.create_role(
+ RoleName=role_name,
+ AssumeRolePolicyDocument="""{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "lambda.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ },
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "apigateway.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}"""
+ )
+ role_arn = response['Role']['Arn']
+ put_role_policy(role_name, cfg, aws_profile)
+ print("Checking if policy is available.")
+ policy = client.get_role_policy(RoleName=role_name, PolicyName=cfg.get('role')['policy_name'])
+ assert policy['ResponseMetadata']['HTTPStatusCode'] == 200
+ return role_arn
+
+
+def put_role_policy(role_name, cfg, aws_profile=None):
+ client = get_client('iam', cfg, aws_profile=aws_profile)
+ role_cfg = cfg.get('role')
+ if os.path.exists(os.path.join(os.getcwd(), role_cfg['policy_document'])):
+ try:
+ with open(role_cfg['policy_document']) as policy:
+ client.put_role_policy(
+ RoleName=role_name,
+ PolicyName=role_cfg['policy_name'],
+ PolicyDocument=json.dumps(json.load(policy))
+ )
+ except Exception as e:
+ log.warn(e.message)
+ else:
+ log.debug("No policy file found")
diff --git a/aws_lambda/project_templates/.lambdaignore b/aws_lambda/project_templates/.lambdaignore
new file mode 100644
index 0000000..e69de29
diff --git a/aws_lambda/project_templates/config.yaml b/aws_lambda/project_templates/config.yaml
index 72bfdab..2424d5f 100644
--- a/aws_lambda/project_templates/config.yaml
+++ b/aws_lambda/project_templates/config.yaml
@@ -4,7 +4,10 @@ function_name: my_lambda_function
handler: service.handler
description: My first lambda function
runtime: python2.7
-# role: lambda_basic_execution
+# role:
+# name: lambda_basic_execution
+# policy_name: lambda_basic_policy
+# policy_document: policy.json
# S3 upload requires appropriate role with s3:PutObject permission
# (ex. basic_s3_upload), a destination bucket, and the key prefix
@@ -12,7 +15,7 @@ runtime: python2.7
# s3_key_prefix: 'path/to/file/'
# if access key and secret are left blank, boto will use the credentials
-# defined in the [default] section of ~/.aws/credentials.
+# defined in the [default] section of ~/.aws/credentials unless there is no aws_profile defined
aws_access_key_id:
aws_secret_access_key:
@@ -29,3 +32,41 @@ environment_variables:
# Build options
build:
source_directories: lib # a comma delimited list of directories in your project root that contains source to package.
+trigger:
+ name: 'trigger_name'
+ type: bucket | event | sns # bucket if lambda is suppose to ba launchede on S3 event, event in case of CloudWatchEvent, sns if sns topic
+ # Configuration template below, edit according to your configuration
+ # S3 configuration
+ bucket_name: 'bucket_name'
+ events:
+ - 's3:ReducedRedundancyLostObject'
+ - 's3:ObjectCreated:*'
+ - 's3:ObjectCreated:Put'
+ - 's3:ObjectCreated:Post'
+ - 's3:ObjectCreated:Copy'
+ - 's3:ObjectCreated:CompleteMultipartUpload'
+ - 's3:ObjectRemoved:*'
+ - 's3:ObjectRemoved:Delete'
+ - 's3:ObjectRemoved:DeleteMarkerCreated'
+ # S3 configuration end
+ # CloudWatch configuration (cron)
+ frequency: "rate(1 hour)" # cron(0 12 * * ? *) - daily at 12.00 UTC
+ # SNS configuration
+ name: 'sns_name'
+ # NOTE: For sns configuration you must provide a list all of your buckets in following format
+ # Add more buckets with next number as suffix
+ buckets:
+ - bucket1:
+ bucket_name: 'bucket_name'
+ events:
+ - 's3:ReducedRedundancyLostObject'
+ - 's3:ObjectCreated:*'
+ - 's3:ObjectCreated:Put'
+ - 's3:ObjectCreated:Post'
+ - 's3:ObjectCreated:Copy'
+ - 's3:ObjectCreated:CompleteMultipartUpload'
+ - 's3:ObjectRemoved:*'
+ - 's3:ObjectRemoved:Delete'
+ - 's3:ObjectRemoved:DeleteMarkerCreated'
+ prefix: ''
+ suffix: ''
diff --git a/aws_lambda/project_templates/policy.json b/aws_lambda/project_templates/policy.json
new file mode 100644
index 0000000..e24d2f9
--- /dev/null
+++ b/aws_lambda/project_templates/policy.json
@@ -0,0 +1,35 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "s3:ListBucket",
+ "s3:GetObject",
+ "s3:PutObject"
+ ],
+ "Resource": [
+ "arn:aws:s3:::*/*"
+ ]
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "s3:ListBucket",
+ "s3:GetObject"
+ ],
+ "Resource": [
+ "arn:aws:s3:::*/*"
+ ]
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "logs:CreateLogGroup",
+ "logs:CreateLogStream",
+ "logs:PutLogEvents"
+ ],
+ "Resource": "arn:aws:logs:*:*:*"
+ }
+ ]
+}
diff --git a/scripts/lambda b/scripts/lambda
index e245ba1..8c2d027 100755
--- a/scripts/lambda
+++ b/scripts/lambda
@@ -42,15 +42,23 @@ def init(folder, minimal):
'--local-package', default=None, type=click.Path(),
help='Install local package as well.', multiple=True,
)
-def build(use_requirements, local_package):
- aws_lambda.build(CURRENT_DIR, use_requirements, local_package)
+@click.option(
+ '--config-file-path', default=None, type=click.Path(),
+ help='Path to custom config.yaml file', multiple=False
+)
+def build(use_requirements, local_package, config_file_path=None):
+ aws_lambda.build(CURRENT_DIR, use_requirements, local_package, config_file_path)
@click.command(help='Run a local test of your function.')
@click.option('--event-file', default=None, help='Alternate event file.')
@click.option('--verbose', '-v', is_flag=True)
-def invoke(event_file, verbose):
- aws_lambda.invoke(CURRENT_DIR, event_file, verbose)
+@click.option(
+ '--config-file-path', default=None, type=click.Path(),
+ help='Path to custom config.yaml file', multiple=False
+)
+def invoke(event_file, verbose, config_file_path):
+ aws_lambda.invoke(CURRENT_DIR, event_file, verbose, config_file_path)
@click.command(help='Register and deploy your code to lambda.')
@@ -62,8 +70,39 @@ def invoke(event_file, verbose):
'--local-package', default=None, type=click.Path(),
help='Install local package as well.', multiple=True,
)
-def deploy(use_requirements, local_package):
- aws_lambda.deploy(CURRENT_DIR, use_requirements, local_package)
+@click.option(
+ '--config-file-path', default=None, type=click.Path(),
+ help='Path to custom config.yaml file', multiple=False
+)
+@click.option(
+ '--aws-profile', default=None, type=click.STRING,
+ help='AWS profile name.', multiple=False
+)
+def deploy(use_requirements, local_package, config_file_path, aws_profile):
+ aws_lambda.deploy(CURRENT_DIR, use_requirements, local_package, config_file_path=config_file_path,
+ aws_profile=aws_profile)
+
+
+@click.command(help='Deploy your code to S3 and register to lambda.')
+@click.option(
+ '--use-requirements', default=False, is_flag=True,
+ help='Install all packages defined in requirements.txt',
+)
+@click.option(
+ '--local-package', default=None, type=click.Path(),
+ help='Install local package as well.', multiple=True,
+)
+@click.option(
+ '--config-file-path', default=None, type=click.Path(),
+ help='Path to custom config.yaml file', multiple=False
+)
+@click.option(
+ '--aws-profile', default=None, type=click.STRING,
+ help='AWS profile name.', multiple=False
+)
+def deploy_s3(use_requirements, local_package, config_file_path, aws_profile):
+ aws_lambda.deploy(CURRENT_DIR, use_requirements, local_package, upload_to_s3=True,
+ config_file_path=config_file_path, aws_profile=aws_profile)
@click.command(help='Upload your lambda to S3.')
@@ -75,8 +114,17 @@ def deploy(use_requirements, local_package):
'--local-package', default=None, type=click.Path(),
help='Install local package as well.', multiple=True,
)
-def upload(use_requirements, local_package):
- aws_lambda.upload(CURRENT_DIR, use_requirements, local_package)
+@click.option(
+ '--config-file-path', default=None, type=click.Path(),
+ help='Path to custom config.yaml file', multiple=False
+)
+@click.option(
+ '--aws-profile', default=None, type=click.STRING,
+ help='AWS profile name.', multiple=False
+)
+def upload(use_requirements, local_package, config_file_path, aws_profile):
+ aws_lambda.upload(CURRENT_DIR, use_requirements, local_package, config_file_path=config_file_path,
+ aws_profile=aws_profile)
@click.command(help='Delete old versions of your functions')
@@ -84,14 +132,19 @@ def upload(use_requirements, local_package):
'--keep-last', type=int,
prompt='Please enter the number of recent versions to keep',
)
-def cleanup(keep_last):
- aws_lambda.cleanup_old_versions(CURRENT_DIR, keep_last)
+@click.option(
+ '--config-file-path', default=None, type=click.Path(),
+ help='Path to custom config.yaml file', multiple=False
+)
+def cleanup(keep_last, config_file_path):
+ aws_lambda.cleanup_old_versions(CURRENT_DIR, keep_last, config_file_path=config_file_path)
if __name__ == '__main__':
cli.add_command(init)
cli.add_command(invoke)
cli.add_command(deploy)
+ cli.add_command(deploy_s3)
cli.add_command(upload)
cli.add_command(build)
cli.add_command(cleanup)
diff --git a/setup.cfg b/setup.cfg
index 3061c58..01e3ac3 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,7 +1,7 @@
[bumpversion]
commit = True
tag = True
-current_version = 2.1.1
+current_version = 2.2.1
parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+))?
serialize =
{major}.{minor}.{patch}
diff --git a/setup.py b/setup.py
index 4381d6c..099b94c 100755
--- a/setup.py
+++ b/setup.py
@@ -17,8 +17,8 @@
]
setup(
- name='python-lambda',
- version='2.1.1',
+ name='hbi-python-lambda',
+ version='2.2.1',
description='The bare minimum for a Python app running on Amazon Lambda.',
long_description=readme,
author='Nick Ficano',
@@ -51,4 +51,9 @@
],
test_suite='tests',
tests_require=test_requirements,
+ options={
+ 'build_scripts': {
+ 'executable': "/usr/bin/env python"
+ }
+ }
)