Initial checkin

This commit is contained in:
Stefan Reimer 2018-11-22 18:31:59 +00:00
commit ea58e60aa9
10 changed files with 1333 additions and 0 deletions

60
.gitignore vendored Normal file
View File

@ -0,0 +1,60 @@
# Vim
*.swp
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
.pytest*
nosetests.xml
coverage.xml
*,cover
.hypothesis/
# Translations
*.mo
*.pot
# dotenv
.env
# virtualenv
venv/
ENV/

15
cloudbender/__init__.py Normal file
View File

@ -0,0 +1,15 @@
import logging
__author__ = 'Stefan Reimer'
__email__ = 'stefan@zero-downtimet.net'
__version__ = '1.1.0'
# Set up logging to ``/dev/null`` like a library is supposed to.
# http://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
class NullHandler(logging.Handler): # pragma: no cover
def emit(self, record):
pass
logging.getLogger('cloudbender').addHandler(NullHandler())

171
cloudbender/cli.py Normal file
View File

@ -0,0 +1,171 @@
import os
import click
import functools
from concurrent.futures import ThreadPoolExecutor, wait
from . import __version__
from .core import CloudBender
from .utils import setup_logging
import logging
logger = logging.getLogger(__name__)
@click.group()
@click.version_option(version=__version__, prog_name="CloudBender")
@click.option("--debug", is_flag=True, help="Turn on debug logging.")
@click.option("--dir", "directory", help="Specify cloudbender project directory.")
@click.pass_context
def cli(ctx, debug, directory):
logger = setup_logging(debug)
# Read global config
cb = CloudBender(directory if directory else os.getcwd())
cb.read_config()
cb.dump_config()
ctx.obj['cb'] = cb
@click.command()
@click.argument("stack_name")
@click.option("--multi", is_flag=True, help="Allow more than one stack to match")
@click.pass_context
def render(ctx, stack_name, multi):
""" Renders template and its parameters """
stacks = _find_stacks(ctx, stack_name, multi)
for s in stacks:
s.render()
s.write_template_file()
@click.command()
@click.argument("stack_name")
@click.option("--multi", is_flag=True, help="Allow more than one stack to match")
@click.pass_context
def validate(ctx, stack_name, multi):
stacks = _find_stacks(ctx, stack_name, multi)
for s in stacks:
s.validate()
@click.command()
@click.argument("stack_name")
@click.option("--multi", is_flag=True, help="Allow more than one stack to match")
@click.pass_context
def provision(ctx, stack_name, multi):
""" Creates or updates stacks or stack groups """
stacks = _find_stacks(ctx, stack_name, multi)
for step in sort_stacks(ctx, stacks):
if step:
with ThreadPoolExecutor(max_workers=len(step)) as group:
futures = []
for stack in step:
status = stack.get_status()
if not status:
futures.append(group.submit(stack.create))
else:
futures.append(group.submit(stack.update))
wait(futures)
@click.command()
@click.argument("stack_name")
@click.option("--multi", is_flag=True, help="Allow more than one stack to match")
@click.pass_context
def delete(ctx, stack_name, multi):
""" Deletes stacks or stack groups """
stacks = _find_stacks(ctx, stack_name, multi)
# Reverse steps
steps = [s for s in sort_stacks(ctx, stacks)]
delete_steps = steps[::-1]
for step in delete_steps:
if step:
with ThreadPoolExecutor(max_workers=len(step)) as group:
futures = []
for stack in step:
if stack.multi_delete:
futures.append(group.submit(stack.delete))
wait(futures)
@click.command()
@click.pass_context
def clean(ctx):
""" Deletes all previously rendered files locally """
cb = ctx.obj['cb']
cb.clean()
def sort_stacks(ctx, stacks):
""" Sort stacks by dependencies """
cb = ctx.obj['cb']
data = {}
for s in stacks:
# Resolve dependencies
deps = []
for d in s.dependencies:
# For now we assume deps are artifacts so we prepend them with our local profile and region to match stack.id
for dep_stack in cb.filter_stacks({'region': s.region, 'profile': s.profile, 'provides': d}):
deps.append(dep_stack.id)
data[s.id] = set(deps)
for k, v in data.items():
v.discard(k) # Ignore self dependencies
extra_items_in_deps = functools.reduce(set.union, data.values()) - set(data.keys())
data.update({item:set() for item in extra_items_in_deps})
while True:
ordered = set(item for item,dep in data.items() if not dep)
if not ordered:
break
# return list of stack objects rather than just names
result = []
for o in ordered:
for s in stacks:
if s.id == o: result.append(s)
yield result
data = {item: (dep - ordered) for item,dep in data.items()
if item not in ordered}
assert not data, "A cyclic dependency exists amongst %r" % data
def _find_stacks(ctx, stack_name,multi):
cb = ctx.obj['cb']
# ALL acts ass config and multi=True
if stack_name == "ALL":
multi = True
stack_name = "config"
stacks = cb.resolve_stacks(stack_name)
if not stacks:
logger.error('Cannot find stack matching: {}'.format(stack_name))
raise click.Abort()
if not multi and len(stacks) > 1:
logger.error('Found more than one ({}) stacks matching name {}: {}. Abort.'.format(len(stacks), stack_name, [s.stackname for s in stacks]))
raise click.Abort()
return stacks
cli.add_command(render)
cli.add_command(validate)
cli.add_command(provision)
cli.add_command(delete)
cli.add_command(clean)
if __name__ == '__main__':
cli(obj={})

55
cloudbender/connection.py Normal file
View File

@ -0,0 +1,55 @@
import os
import boto3
import botocore.session
from botocore import credentials
import logging
logger = logging.getLogger(__name__)
class BotoConnection():
_sessions= {}
_clients = {}
def __init__(self, profile=None, region=None):
self.region = region
self.profile = profile
def _get_session(self, profile=None, region=None):
if self._sessions.get((profile,region)):
return self._sessions[(profile,region)]
# Construct botocore session with cache
# Setup boto to cache STS tokens for MFA
# Change the cache path from the default of ~/.aws/boto/cache to the one used by awscli
session_vars = {}
if profile:
session_vars['profile'] = (None,None,profile,None)
if region and region != 'global':
session_vars['region'] = (None,None,region,None)
session = botocore.session.Session(session_vars=session_vars)
cli_cache = os.path.join(os.path.expanduser('~'),'.aws/cli/cache')
session.get_component('credential_provider').get_provider('assume-role').cache = credentials.JSONFileCache(cli_cache)
self._sessions[(profile,region)] = session
return session
def _get_client(self, service, profile=None, region=None):
if self._clients.get((profile,region,service)):
return self._clients[(profile,region,service)]
session = self._get_session(profile,region)
client = boto3.Session(botocore_session=session).client(service)
self._clients[(profile,region,service)] = client
return client
def call(self, service, command, kwargs={}, profile=None, region=None):
client = self._get_client(service, profile, region)
return getattr(client, command)(**kwargs)

120
cloudbender/core.py Normal file
View File

@ -0,0 +1,120 @@
import os
import glob
import logging
from .utils import read_yaml_file, ensure_dir
from .stack import Stack
from .stackgroup import StackGroup
from .connection import BotoConnection
logger = logging.getLogger(__name__)
class CloudBender(object):
""" Config Class to handle recursive conf/* config tree """
def __init__(self, root_path):
self.root = root_path
self.sg = None
self.all_stacks = []
self.ctx = {
"config_path": os.path.join(self.root, "config"),
"template_path": os.path.join(self.root, "cloudformation"),
"parameter_path": os.path.join(self.root, "parameters"),
"artifact_paths": [os.path.join(self.root, "artifacts")]
}
if not os.path.isdir(self.root):
raise "Check '{0}' exists and is a valid project folder.".format(root_path)
def read_config(self):
"""Load the <path>/config.yaml, <path>/*.yaml as stacks, sub-folders are child groups """
# Read top level config.yaml and extract CloudBender CTX
_config = read_yaml_file(os.path.join(self.ctx['config_path'], 'config.yaml'))
if _config:
self.ctx.update(_config.get('CloudBender'))
# Make sure all paths are abs
for k, v in self.ctx.items():
if k in ['config_path','template_path','parameter_path','artifact_paths']:
if isinstance(v, list):
new_list = []
for path in v:
if not os.path.isabs(path):
new_list.append(os.path.normpath(os.path.join(self.root, path)))
else:
new_list.append(path)
self.ctx[k] = new_list
elif isinstance(v, str):
if not os.path.isabs(v):
self.ctx[k]=os.path.normpath(os.path.join(self.root, v))
if k in ['template_path','parameter_path']:
ensure_dir(self.ctx[k])
self.sg = StackGroup(self.ctx['config_path'], self.ctx)
self.sg.read_config()
self.all_stacks = self.sg.get_stacks()
# If cfn vars config is completely empty set some default for tests to work
# if "vars" not in _config:
# _config = { "vars": { 'Azs': {'TestAZ': 'Next'}, 'Segments': {'Testnet': 'internet'}, "Mode": "Piped" } }
# self.vars.update(_config.get('vars'))
def dump_config(self):
logger.debug("<CloudBender: {}>".format(vars(self)))
self.sg.dump_config()
def clean(self):
for s in self.all_stacks:
s.delete_template_file()
s.delete_parameter_file()
def resolve_stacks(self, token):
stacks = None
# remove optional leading "config/" to allow bash path expansions
if token.startswith("config/"):
token = token[7:]
# If path ends with yaml we look for stacks
if token.endswith('.yaml'):
stacks = self.sg.get_stacks(token, match_by='path')
# otherwise assume we look for a group, if we find a group return all stacks below
else:
# Strip potential trailing slash
token = token.rstrip('/')
sg = self.sg.get_stackgroup(token, match_by='path')
if sg:
stacks = sg.get_stacks()
return stacks
def filter_stacks(self, filter_by, stacks=None):
# filter_by is a dict { property, value }
# if no group of stacks provided, look in all available
if not stacks:
stacks = self.all_stacks
matching_stacks = []
for s in stacks:
match = True
for p,v in filter_by.items():
if not (hasattr(s, p) and getattr(s, p) == v):
match = False
break
if match:
matching_stacks.append(s)
return matching_stacks

179
cloudbender/jinja.py Normal file
View File

@ -0,0 +1,179 @@
import os
import io
import gzip
import jinja2
import oyaml as yaml
import re
import base64
import pyminifier.token_utils
import pyminifier.minification
import pyminifier.compression
import pyminifier.obfuscate
import types
import logging
logger = logging.getLogger(__name__)
@jinja2.contextfunction
def get_custom_att(context, att=None, ResourceName="FortyTwo", attributes={}, flush=False, dump=False, dependencies=False):
""" Returns the rendered required fragement and also collects all foreign
attributes for the specified CustomResource to include them later in
the actual CustomResource include property """
# If flush is set all we do is empty our state dict
if flush:
attributes.clear()
return
# return all registered attributes
if dump:
return attributes
# If dependencies, return all Artifacts this stack depends on, which are the attr of FortyTwo
if dependencies:
deps = set()
if ResourceName in attributes:
for att in attributes[ResourceName]:
deps.add(att.split('.')[0])
return list(deps)
# If call with an attribute, return fragement and register
if att:
if ResourceName not in attributes:
attributes[ResourceName] = set()
attributes[ResourceName].add(att)
config = context.get_all()['_config']
if config['cfn']['Mode'] == "FortyTwo":
return('{{ "Fn::GetAtt": ["{0}", "{1}"] }}'.format(ResourceName, att))
elif config['cfn']['Mode'] == "AWSImport" and ResourceName == "FortyTwo":
# AWS only allows - and :, so replace '.' with ":"
return('{{ "Fn::ImportValue": {{ "Fn::Sub": "${{Conglomerate}}:{0}" }} }}'.format(att.replace('.',':')))
else:
# We need to replace . with some PureAlphaNumeric thx AWS ...
return('{{ Ref: {0} }}'.format(att.replace('.','DoT')))
@jinja2.contextfunction
def include_raw_gz(context, files=None, gz=True):
jenv = context.environment
output = ''
for name in files:
output = output + jinja2.Markup(jenv.loader.get_source(jenv, name)[0])
# logger.debug(output)
if not gz:
return(output)
buf = io.BytesIO()
f = gzip.GzipFile(mode='w', fileobj=buf, mtime=0)
f.write(output.encode())
f.close()
return base64.b64encode(buf.getvalue()).decode('utf-8')
@jinja2.contextfunction
def render_once(context, name=None, resources=set(), reset=False):
""" Utility function to True only once """
if reset:
resources.clear()
return
if name and name not in resources:
resources.add(name)
return True
return False
@jinja2.contextfunction
def raise_helper(context, msg):
raise Exception(msg)
# Custom tests
def regex(value='', pattern='', ignorecase=False, match_type='search'):
''' Expose `re` as a boolean filter using the `search` method by default.
This is likely only useful for `search` and `match` which already
have their own filters.
'''
if ignorecase:
flags = re.I
else:
flags = 0
_re = re.compile(pattern, flags=flags)
if getattr(_re, match_type, 'search')(value) is not None:
return True
return False
def match(value, pattern='', ignorecase=False):
''' Perform a `re.match` returning a boolean '''
return regex(value, pattern, ignorecase, 'match')
def search(value, pattern='', ignorecase=False):
''' Perform a `re.search` returning a boolean '''
return regex(value, pattern, ignorecase, 'search')
# Custom filters
def regex_replace(value='', pattern='', replace='', ignorecase=False):
if ignorecase:
flags = re.I
else:
flags = 0
return re.sub(pattern,replace,value,flags=flags)
def pyminify(source, obfuscate=False, minify=True):
# pyminifier options
options = types.SimpleNamespace(tabs=False,replacement_length=1,use_nonlatin=0,
obfuscate=0,obf_variables=1,obf_classes=0,obf_functions=0,obf_import_methods=0,obf_builtins=0)
tokens = pyminifier.token_utils.listified_tokenizer(source)
if minify:
source = pyminifier.minification.minify(tokens, options)
tokens = pyminifier.token_utils.listified_tokenizer(source)
if obfuscate:
name_generator = pyminifier.obfuscate.obfuscation_machine(use_unicode=False)
pyminifier.obfuscate.obfuscate("__main__", tokens, options, name_generator=name_generator)
#source = pyminifier.obfuscate.apply_obfuscation(source)
source = pyminifier.token_utils.untokenize(tokens)
# logger.debug(source)
minified_source = pyminifier.compression.gz_pack(source)
logger.info("Compressed python code to {}".format(len(minified_source)))
return minified_source
def JinjaEnv(template_locations=[]):
jenv = jinja2.Environment(trim_blocks=True, lstrip_blocks=True, undefined=jinja2.Undefined)
jinja_loaders = []
for _dir in template_locations:
jinja_loaders.append(jinja2.FileSystemLoader(_dir))
jenv.loader = jinja2.ChoiceLoader(jinja_loaders)
jenv.globals['include_raw'] = include_raw_gz
jenv.globals['get_custom_att'] = get_custom_att
jenv.globals['render_once'] = render_once
jenv.globals['raise'] = raise_helper
jenv.filters['regex_replace'] = regex_replace
jenv.filters['pyminify'] = pyminify
jenv.tests['match'] = match
jenv.tests['regex'] = regex
jenv.tests['search'] = search
return jenv

486
cloudbender/stack.py Normal file
View File

@ -0,0 +1,486 @@
import os
import re
import semver
import hashlib
import oyaml as yaml
import json
import time
from datetime import datetime, timedelta
from dateutil.tz import tzutc
import botocore
from botocore.exceptions import ClientError
from .utils import read_yaml_file, dict_merge
from .connection import BotoConnection
from .jinja import JinjaEnv
from . import __version__
import cfnlint.core
import logging
logger = logging.getLogger(__name__)
class StackStatus(object):
"""
StackStatus stores simplified stack statuses.
"""
COMPLETE = "complete"
FAILED = "failed"
IN_PROGRESS = "in progress"
PENDING = "pending"
class Stack(object):
def __init__(self, name, path, rel_path, tags=None, parameters=None, template_vars=None, region='global', profile=None, template=None, ctx={}):
self.id = (profile, region, name)
self.stackname = name
self.path = path
self.rel_path = rel_path
self.tags = tags
self.parameters = parameters
self.template_vars = template_vars
self.region = region
self.profile = profile
self.template = template
self.provides = template
self.cfn_template = None
self.cfn_parameters = []
self.connection_manager = BotoConnection(self.profile, self.region)
self.ctx = ctx
self.status = None
self.dependencies = set()
self.default_lock = None
self.multi_delete = True
def dump_config(self):
logger.debug("<Stack {}: {}>".format(self.id, vars(self)))
def read_config(self):
_config = read_yaml_file(self.path)
for p in ["stackname", "template", "dependencies", "default_lock", "multi_delete", "provides"]:
if p in _config:
setattr(self, p, _config[p])
for p in ["parameters", "tags"]:
if p in _config:
setattr(self, p, dict_merge(getattr(self, p), _config[p]))
# Inject Artifact for now hard coded
self.tags['Artifact'] = self.provides
if 'vars' in _config:
self.template_vars = dict_merge(self.template_vars, _config['vars'])
logger.info("Stack {} added.".format(self.id))
def check_fortytwo(self, template):
# Fail early if 42 is enabled but not available
if self.cfn['Mode'] == "FortyTwo" and self.template != 'FortyTwo':
try:
response = self.connection_manager.call('lambda', 'get_function', {'FunctionName': 'FortyTwo'},
profile=self.profile, region=self.region)
# Also verify version in case specified in the template's metadata
try:
req_ver = template['Metadata']['FortyTwo']['RequiredVersion']
if 'Release' not in response['Tags']:
abort("Lambda FortyTwo has no Release Tag! Required: {}".format(req_ver))
elif semver.compare(req_ver, re.sub("-.*$",'', response['Tags']['Release'])) > 0:
abort("Lambda FortyTwo version is not recent enough! Required: {} vs. Found: {}".format(req_ver, response['Tags']['Release']))
except KeyError:
pass
except botocore.exceptions.ClientError:
abort("No Lambda FortyTwo found in your account")
def render(self):
"""Renders the cfn jinja template for this stack"""
jenv = JinjaEnv(self.ctx['artifact_paths'])
template = jenv.get_template('{0}{1}'.format(self.template, '.yaml.jinja'))
template_metadata = {
'Template.Name': self.template,
'Template.Hash': 'unknown',
'Template.GitComment': 'unknown',
'CloudBender.Version': __version__
}
jenv.globals['_config'] = { 'cfn': self.template_vars, 'Metadata': template_metadata }
# First render pass to calculate a md5 checksum
template_metadata['Template.Hash'] = hashlib.md5(template.render({ 'cfn': self.template_vars, 'Metadata': template_metadata }).encode('utf-8')).hexdigest()
# Reset and set Metadata for final render pass
jenv.globals['get_custom_att'](context={'_config': self.template_vars}, flush=True)
jenv.globals['render_once'](context={'_config': self.template_vars}, reset=True)
# try to get local git info
try:
self.template_vars['Metadata']['{}.Version'.format(PROJECT_NAME)] = subprocess.check_output('git describe --tags'.split(' '), universal_newlines=True)[:-1]
except:
pass
# Add latest tag/commit
try:
os.chdir(ROOT_DIR)
_version = subprocess.check_output('git describe --tags'.split(' '), universal_newlines=True)[:-1]
if _version:
self.template_vars['Metadata']['CloudBender.Version'] = _version
os.chdir(os.path.dirname(template.filename))
_comment = subprocess.check_output('git log -1 --pretty=%B {0}{1}'
.format(input_file, TEMPLATE_EXT).split(' ')).decode('utf-8').strip() \
.replace('"', '').replace('#', '').replace('\n', '').replace(':', ' ')
if _comment:
self.template_vars['Metadata']['Template.GitComment'] = _comment
os.chdir(PROJECT_DIR)
except:
pass
logger.info('Rendering %s', template.filename)
rendered = template.render({ 'cfn': self.template_vars, 'Metadata': template_metadata })
try:
data = yaml.load(rendered)
except:
# In case we rendered invalid yaml this helps to debug
logger.error(rendered)
raise
# Some sanity checks and final cosmetics
# Check for empty top level Parameters, Outputs and Conditions and remove
for key in ['Parameters', 'Outputs', 'Conditions']:
if key in data and data[key] is None:
# Delete from data structure which also takes care of json
del data[key]
# but also remove from rendered for the yaml file
rendered = rendered.replace('\n'+key+":",'')
# Condense multiple empty lines to one
self.cfn_template = re.sub(r'\n\s*\n', '\n\n', rendered)
def write_template_file(self):
if self.cfn_template:
yaml_file = os.path.join(self.ctx['template_path'], self.rel_path, self.stackname+".yaml")
self._ensure_dirs('template_path')
with open(yaml_file, 'w') as yaml_contents:
yaml_contents.write(self.cfn_template)
logger.info('Wrote %s to %s', self.template, yaml_file)
else:
logger.error('No cfn template rendered yet for stack {}.'.format(self.stackname))
def delete_template_file(self):
yaml_file = os.path.join(self.ctx['template_path'], self.rel_path, self.stackname+".yaml")
try:
os.remove(yaml_file)
logger.debug('Deleted cfn template %s.', yaml_file)
except OSError:
pass
def read_template_file(self):
yaml_file = os.path.join(self.ctx['template_path'], self.rel_path, self.stackname+".yaml")
with open(yaml_file, 'r') as yaml_contents:
self.cfn_template = yaml_contents.read()
logger.debug('Read cfn template %s.', yaml_file)
def validate(self):
"""Validates the rendered template via cfn-lint"""
if not self.cfn_template:
self.read_template_file()
data = yaml.load(self.cfn_template)
try:
ignore_checks = data['Metadata']['cfnlint_ignore']
except KeyError:
ignore_checks = []
# Ignore some more checks around injected parameters as we generate these
if self.template_vars['Mode'] == "Piped":
ignore_checks = ignore_checks+['W2505','W2509','W2507']
filename = os.path.join(self.ctx['template_path'], self.rel_path, self.stackname+".yaml")
logger.info('Validating {0}'.format(filename))
lint_args = ['--template', filename]
if ignore_checks:
lint_args.append('--ignore-checks')
lint_args = lint_args+ignore_checks
logger.info('Ignoring checks: {}'.format(','.join(ignore_checks)))
(args, filenames, formatter) = cfnlint.core.get_args_filenames(lint_args)
(template, rules, matches) = cfnlint.core.get_template_rules(filename, args)
if not matches:
matches.extend(cfnlint.core.run_cli(filename, template, rules, ['us-east-1'], None))
if len(matches):
for match in matches:
logger.error(formatter._format(match))
else:
logger.info("Passed.")
def resolve_parameters(self):
""" Renders parameters for the stack based on the source template and the environment configuration """
if not self.cfn_template:
self.read_template_file()
data = yaml.load(self.cfn_template)
# Inspect all outputs of the running Conglomerate members
# if we run in Piped Mode
# if self.template_vars['Mode'] == "Piped":
# try:
# stack_outputs = inspect_stacks(config['tags']['Conglomerate'])
# logger.info(pprint.pformat(stack_outputs))
# except KeyError:
# pass
if 'Parameters' in data:
self.cfn_parameters = []
for p in data['Parameters']:
# In Piped mode we try to resolve all Paramters first via stack_outputs
#if config['cfn']['Mode'] == "Piped":
# try:
# # first reverse the rename due to AWS alphanumeric restriction for parameter names
# _p = p.replace('DoT','.')
# value = str(stack_outputs[_p])
# parameters.append({'ParameterKey': p, 'ParameterValue': value })
# logger.info('Got {} = {} from running stack'.format(p,value))
# continue
# except KeyError:
# pass
# Key name in config tree is: stacks.<self.stackname>.parameters.<parameter>
try:
value = str(self.parameters[p])
self.cfn_parameters.append({'ParameterKey': p, 'ParameterValue': value })
logger.info('Got {} = {}'.format(p,value))
except KeyError as e:
# If we have a Default defined in the CFN skip, as AWS will use it
if 'Default' in data['Parameters'][p]:
continue
else:
logger.error('Cannot find value for parameter {0}'.format(p))
def write_parameter_file(self):
parameter_file = os.path.join(self.ctx['parameter_path'], self.rel_path, self.stackname+".yaml")
# Render parameters as json for AWS CFN
self._ensure_dirs('parameter_path')
with open(parameter_file, 'w') as parameter_contents:
parameter_contents.write(json.dumps(self.cfn_parameters, indent=2, separators=(',', ': '), sort_keys=True))
logger.info('Wrote json parameters for %s to %s', self.stackname, parameter_file)
if not self.cfn_parameters:
# Make sure there are no parameters from previous runs
if os.path.isfile(parameter_file):
os.remove(parameter_file)
def delete_parameter_file(self):
parameter_file = os.path.join(self.ctx['parameter_path'], self.rel_path, self.stackname+".yaml")
try:
os.remove(parameter_file)
logger.debug('Deleted parameter %s.', parameter_file)
except OSError:
pass
def create(self):
"""Creates a stack """
# Prepare parameters
self.resolve_parameters()
self.write_parameter_file()
if not self.cfn_template:
self.read_template_file()
logger.info('Creating {0}'.format(self.stackname))
response = self.connection_manager.call('cloudformation', 'create_stack',
{'StackName':self.stackname,
'TemplateBody':self.cfn_template,
'Parameters':self.cfn_parameters,
'Tags':[ {"Key": str(k), "Value": str(v)} for k, v in self.tags.items() ],
'Capabilities':['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']},
profile=self.profile, region=self.region)
return self._wait_for_completion()
def update(self):
"""Updates an existing stack """
# Prepare parameters
self.resolve_parameters()
self.write_parameter_file()
if not self.cfn_template:
self.read_template_file()
logger.info('Updating {0}'.format(self.stackname))
try:
response = self.connection_manager.call('cloudformation', 'update_stack',
{'StackName':self.stackname,
'TemplateBody':self.cfn_template,
'Parameters':self.cfn_parameters,
'Tags':[ {"Key": str(k), "Value": str(v)} for k, v in self.tags.items() ],
'Capabilities':['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']},
profile=self.profile, region=self.region)
except ClientError as e:
if 'No updates are to be performed' in e.response['Error']['Message']:
logger.info('No updates for {0}'.format(self.stackname))
return StackStatus.COMPLETE
else:
raise e
return self._wait_for_completion()
def delete(self):
"""Deletes a stack """
logger.info('Deleting {0}'.format(self.stackname))
response = self.connection_manager.call('cloudformation', 'delete_stack',
{'StackName':self.stackname}, profile=self.profile, region=self.region)
return self._wait_for_completion()
def describe(self):
"""
Returns the a description of the stack.
:returns: A stack description.
"""
return self.connection_manager.call(
"cloudformation",
"describe_stacks",
{"StackName": self.stackname},
profile=self.profile, region=self.region)
def get_status(self):
"""
Returns the stack's status.
:returns: The stack's status.
"""
try:
status = self.describe()["Stacks"][0]["StackStatus"]
except ClientError as e:
if e.response["Error"]["Message"].endswith("does not exist"):
return None
else:
raise e
return status
def describe_events(self):
"""
Returns a dictionary contianing the stack events.
:returns: The CloudFormation events for a stack.
"""
try:
status = self.connection_manager.call(
"cloudformation",
"describe_stack_events",
{"StackName": self.stackname},
profile=self.profile, region=self.region)
except ClientError as e:
if e.response["Error"]["Message"].endswith("does not exist"):
return None
else:
raise e
return status
def _wait_for_completion(self, timeout=0):
"""
Waits for a stack operation to finish. Prints CloudFormation events while it waits.
:param timeout: Timeout before returning
:returns: The final stack status.
"""
def timed_out(elapsed):
return elapsed >= timeout if timeout else False
status = StackStatus.IN_PROGRESS
self.most_recent_event_datetime = (
datetime.now(tzutc()) - timedelta(seconds=3)
)
elapsed = 0
while status == StackStatus.IN_PROGRESS and not timed_out(elapsed):
status = self._get_simplified_status(self.get_status())
if not status:
return None
self._log_new_events()
time.sleep(4)
elapsed += 4
return status
@staticmethod
def _get_simplified_status(status):
""" Returns the simplified Stack Status. """
if status:
if status.endswith("ROLLBACK_COMPLETE"):
return StackStatus.FAILED
elif status.endswith("_COMPLETE"):
return StackStatus.COMPLETE
elif status.endswith("_IN_PROGRESS"):
return StackStatus.IN_PROGRESS
elif status.endswith("_FAILED"):
return StackStatus.FAILED
else:
return 'Unknown'
def _log_new_events(self):
"""
Log the latest stack events while the stack is being built.
"""
events = self.describe_events()
if events:
events = events["StackEvents"]
events.reverse()
new_events = [
event for event in events
if event["Timestamp"] > self.most_recent_event_datetime
]
for event in new_events:
logger.info(" ".join([
self.stackname,
event["LogicalResourceId"],
event["ResourceType"],
event["ResourceStatus"],
event.get("ResourceStatusReason", "")
]))
self.most_recent_event_datetime = event["Timestamp"]
def _ensure_dirs(self, path):
# Ensure output dirs exist
if not os.path.exists(os.path.join(self.ctx[path], self.rel_path)):
os.makedirs(os.path.join(self.ctx[path], self.rel_path))

169
cloudbender/stackgroup.py Normal file
View File

@ -0,0 +1,169 @@
import os
import glob
import logging
from .utils import read_yaml_file, dict_merge
from .stack import Stack
logger = logging.getLogger(__name__)
class StackGroup(object):
def __init__(self, path, ctx):
self.name = None
self.ctx = ctx
self.path = path
self.rel_path = os.path.relpath(path ,ctx['config_path'])
self.config = {}
self.sgs = []
self.stacks = []
if self.rel_path == '.':
self.rel_path = ''
def dump_config(self):
for sg in self.sgs:
sg.dump_config()
logger.debug("<StackGroup {}: {}>".format(self.name, vars(self)))
for s in self.stacks:
s.dump_config()
def read_config(self, parent_config={}):
if not os.path.isdir(self.path):
return None
# First read config.yaml if present
_config = read_yaml_file(os.path.join(self.path, 'config.yaml'))
# Stack Group name if not explicit via config is derived from subfolder, or in case of root object the parent folder
if "stackgroupname" in _config:
self.name = _config["stackgroupname"]
elif not self.name:
self.name = os.path.split(self.path)[1]
# Merge config with parent config
_config = dict_merge(parent_config, _config)
tags = _config.get('tags', {})
parameters = _config.get('parameters', {})
template_vars = _config.get('vars', {})
region = _config.get('region', 'global')
profile = _config.get('profile', '')
stackname_prefix = _config.get('stacknameprefix', '')
logger.info("StackGroup {} added.".format(self.name))
# Add stacks
stacks = [s for s in glob.glob(os.path.join(self.path, '*.yaml')) if not s.endswith("config.yaml")]
for stack_path in stacks:
stackname = os.path.basename(stack_path).split('.')[0]
template = stackname
if stackname_prefix:
stackname = stackname_prefix + stackname
new_stack = Stack(name=stackname, template=template,
path=stack_path, rel_path=str(self.rel_path),
tags=dict(tags), parameters=dict(parameters),
template_vars=dict(template_vars),
region=str(region), profile=str(profile),
ctx=self.ctx
)
new_stack.read_config()
self.stacks.append(new_stack)
# Create StackGroups recursively
for sub_group in [f.path for f in os.scandir(self.path) if f.is_dir() ]:
sg = StackGroup(sub_group, self.ctx)
sg.read_config(_config)
self.sgs.append(sg)
# Return raw, merged config to parent
return _config
def get_stacks(self, name=None, recursive=True, match_by='name'):
""" Returns [stack] matching stack_name or [all] """
stacks = []
if name:
logger.debug("Looking for stack {} in group {}".format(name, self.name))
for s in self.stacks:
if not name or (s.stackname == name and match_by == 'name') or (s.path.endswith(name) and match_by == 'path'):
if self.rel_path:
logger.debug("Found stack {} in group {}".format(s.stackname, self.rel_path))
else:
logger.debug("Found stack {}".format(s.stackname))
stacks.append(s)
if recursive:
for sg in self.sgs:
s = sg.get_stacks(name, recursive, match_by)
if s:
stacks = stacks+s
return stacks
def get_stackgroup(self, name=None, recursive=True, match_by='name'):
""" Returns stack group matching stackgroup_name or all if None """
if not name or (self.name == name and match_by == 'name') or (self.path.endswith(name) and match_by == 'path'):
logger.debug("Found stack_group {}".format(self.name))
return self
if name and name != 'config':
logger.debug("Looking for stack_group {} in group {}".format(name, self.name))
if recursive:
for sg in self.sgs:
s = sg.get_stackgroup(name, recursive, match_by)
if s:
return s
return None
# TODO: Integrate properly into stackgroup class, borken for now
# stackoutput inspection
def BROKEN_inspect_stacks(conglomerate):
# Get all stacks of the conglomertate
client = Connection.get_connection('cloudformation')
running_stacks=client.describe_stacks()
stacks = []
for stack in running_stacks['Stacks']:
for tag in stack['Tags']:
if tag['Key'] == 'Conglomerate' and tag['Value'] == conglomerate:
stacks.append(stack)
break
# Gather stack outputs, use Tag['Artifact'] as name space: Artifact.OutputName, same as FortyTwo
stack_outputs = {}
for stack in stacks:
# If stack has an Artifact Tag put resources into the namespace Artifact.Resource
artifact = None
for tag in stack['Tags']:
if tag['Key'] == 'Artifact':
artifact = tag['Value']
if artifact:
key_prefix = "{}.".format(artifact)
else:
key_prefix = ""
try:
for output in stack['Outputs']:
# Gather all outputs of the stack into one dimensional key=value structure
stack_outputs[key_prefix+output['OutputKey']]=output['OutputValue']
except KeyError:
pass
# Add outputs from stacks into the data for jinja under StackOutput
return stack_outputs

70
cloudbender/utils.py Normal file
View File

@ -0,0 +1,70 @@
import os
import yaml
import copy
import logging
import boto3
logger = logging.getLogger(__name__)
def read_yaml_file(path):
data = {}
if os.path.exists(path):
with open(path, 'r') as config_file_contents:
logger.debug("Reading config file: {}".format(path))
try:
_data = yaml.load(config_file_contents.read())
if _data:
data.update(_data)
except Exception as e:
logger.warning("Error reading config file: {} ({})".format(path,e))
return data
def dict_merge(a, b):
""" Deep merge to allow proper inheritance for config files"""
if not a:
return b
if not b:
return a
if not isinstance(a, dict) or not isinstance(b, dict):
raise TypeError
result = copy.deepcopy(a)
for k, v in b.items():
if k in result and isinstance(result[k], dict):
result[k] = dict_merge(result[k], v)
else:
result[k] = copy.deepcopy(v)
return result
def ensure_dir(path):
"""Creates dir if it does not already exist."""
if not os.path.exists(path):
os.makedirs(path)
logger.info('Created directory: %s', path)
def setup_logging(debug):
if debug:
our_level = logging.DEBUG
# logging.getLogger("botocore").setLevel(logging.INFO)
boto3.set_stream_logger('')
else:
our_level = logging.INFO
logging.getLogger("botocore").setLevel(logging.CRITICAL)
formatter = logging.Formatter(
fmt="[%(asctime)s] - %(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S"
)
log_handler = logging.StreamHandler()
log_handler.setFormatter(formatter)
logger = logging.getLogger("cloudbender")
logger.addHandler(log_handler)
logger.setLevel(our_level)
return logger

8
requirements.txt Normal file
View File

@ -0,0 +1,8 @@
boto3
Jinja2
oyaml
click
pytest
semver
pyminifier
cfn-lint