diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..db21a62 --- /dev/null +++ b/.flake8 @@ -0,0 +1,3 @@ +[flake8] +extend-ignore = E501 +exclude = .git,__pycache__,build,dist,report diff --git a/Makefile b/Makefile index d61e41d..9de69a6 100644 --- a/Makefile +++ b/Makefile @@ -20,7 +20,7 @@ dev_setup: pip install -r dev-requirements.txt --user pytest: - flake8 --ignore=E501 cloudbender tests + flake8 cloudbender tests TEST=True pytest --log-cli-level=DEBUG clean: diff --git a/cloudbender/__init__.py b/cloudbender/__init__.py index 5961f1d..304402c 100644 --- a/cloudbender/__init__.py +++ b/cloudbender/__init__.py @@ -17,4 +17,4 @@ class NullHandler(logging.Handler): # pragma: no cover pass -logging.getLogger('cloudbender').addHandler(NullHandler()) +logging.getLogger("cloudbender").addHandler(NullHandler()) diff --git a/cloudbender/cli.py b/cloudbender/cli.py index 5f26b2e..750b508 100644 --- a/cloudbender/cli.py +++ b/cloudbender/cli.py @@ -12,6 +12,7 @@ from .utils import setup_logging from .exceptions import InvalidProjectDir import logging + logger = logging.getLogger(__name__) @@ -27,8 +28,8 @@ def cli(ctx, debug, directory): if directory: if not os.path.isabs(directory): directory = os.path.normpath(os.path.join(os.getcwd(), directory)) - elif os.getenv('CLOUDBENDER_PROJECT_ROOT'): - directory = os.getenv('CLOUDBENDER_PROJECT_ROOT') + elif os.getenv("CLOUDBENDER_PROJECT_ROOT"): + directory = os.getenv("CLOUDBENDER_PROJECT_ROOT") else: directory = os.getcwd() @@ -50,7 +51,7 @@ def cli(ctx, debug, directory): @click.option("--multi", is_flag=True, help="Allow more than one stack to match") @click.pass_obj def render(cb, stack_names, multi): - """ Renders template and its parameters - CFN only""" + """Renders template and its parameters - CFN only""" stacks = _find_stacks(cb, stack_names, multi) _render(stacks) @@ -61,7 +62,7 @@ def render(cb, stack_names, multi): @click.option("--multi", is_flag=True, help="Allow more than one stack to match") @click.pass_obj def sync(cb, stack_names, multi): - """ Renders template and provisions it right away """ + """Renders template and provisions it right away""" stacks = _find_stacks(cb, stack_names, multi) @@ -74,7 +75,7 @@ def sync(cb, stack_names, multi): @click.option("--multi", is_flag=True, help="Allow more than one stack to match") @click.pass_obj def validate(cb, stack_names, multi): - """ Validates already rendered templates using cfn-lint - CFN only""" + """Validates already rendered templates using cfn-lint - CFN only""" stacks = _find_stacks(cb, stack_names, multi) for s in stacks: @@ -86,11 +87,17 @@ def validate(cb, stack_names, multi): @click.command() @click.argument("stack_names", nargs=-1) @click.option("--multi", is_flag=True, help="Allow more than one stack to match") -@click.option("--include", default='.*', help="regex matching wanted outputs, default '.*'") -@click.option("--values", is_flag=True, help="Only output values, most useful if only one outputs is returned") +@click.option( + "--include", default=".*", help="regex matching wanted outputs, default '.*'" +) +@click.option( + "--values", + is_flag=True, + help="Only output values, most useful if only one outputs is returned", +) @click.pass_obj def outputs(cb, stack_names, multi, include, values): - """ Prints all stack outputs """ + """Prints all stack outputs""" stacks = _find_stacks(cb, stack_names, multi) for s in stacks: @@ -110,7 +117,7 @@ def outputs(cb, stack_names, multi, include, values): @click.option("--graph", is_flag=True, help="Create Dot Graph file") @click.pass_obj def create_docs(cb, stack_names, multi, graph): - """ Parses all documentation fragments out of rendered templates creating docs/*.md file """ + """Parses all documentation fragments out of rendered templates creating docs/*.md file""" stacks = _find_stacks(cb, stack_names, multi) for s in stacks: @@ -122,7 +129,7 @@ def create_docs(cb, stack_names, multi, graph): @click.argument("change_set_name") @click.pass_obj def create_change_set(cb, stack_name, change_set_name): - """ Creates a change set for an existing stack - CFN only""" + """Creates a change set for an existing stack - CFN only""" stacks = _find_stacks(cb, [stack_name]) for s in stacks: @@ -133,29 +140,33 @@ def create_change_set(cb, stack_name, change_set_name): @click.argument("stack_name") @click.pass_obj def refresh(cb, stack_name): - """ Refreshes Pulumi stack / Drift detection """ + """Refreshes Pulumi stack / Drift detection""" stacks = _find_stacks(cb, [stack_name]) for s in stacks: - if s.mode == 'pulumi': + if s.mode == "pulumi": s.refresh() else: - logger.info('{} uses Cloudformation, refresh skipped.'.format(s.stackname)) + logger.info("{} uses Cloudformation, refresh skipped.".format(s.stackname)) @click.command() @click.argument("stack_name") -@click.option("--reset", is_flag=True, help="All pending stack operations are removed and the stack will be re-imported") +@click.option( + "--reset", + is_flag=True, + help="All pending stack operations are removed and the stack will be re-imported", +) @click.pass_obj def export(cb, stack_name, reset=False): - """ Exports a Pulumi stack to repair state """ + """Exports a Pulumi stack to repair state""" stacks = _find_stacks(cb, [stack_name]) for s in stacks: - if s.mode == 'pulumi': + if s.mode == "pulumi": s.export(reset) else: - logger.info('{} uses Cloudformation, export skipped.'.format(s.stackname)) + logger.info("{} uses Cloudformation, export skipped.".format(s.stackname)) @click.command() @@ -165,7 +176,7 @@ def export(cb, stack_name, reset=False): @click.option("--secret", is_flag=True, help="Value is a secret") @click.pass_obj def set_config(cb, stack_name, key, value, secret=False): - """ Sets a config value, encrypts with stack key if secret """ + """Sets a config value, encrypts with stack key if secret""" stacks = _find_stacks(cb, [stack_name]) for s in stacks: @@ -177,7 +188,7 @@ def set_config(cb, stack_name, key, value, secret=False): @click.argument("key") @click.pass_obj def get_config(cb, stack_name, key): - """ Get a config value, decrypted if secret """ + """Get a config value, decrypted if secret""" stacks = _find_stacks(cb, [stack_name]) for s in stacks: @@ -188,14 +199,18 @@ def get_config(cb, stack_name, key): @click.argument("stack_name") @click.pass_obj def preview(cb, stack_name): - """ Preview of Pulumi stack up operation """ + """Preview of Pulumi stack up operation""" stacks = _find_stacks(cb, [stack_name]) for s in stacks: - if s.mode == 'pulumi': + if s.mode == "pulumi": s.preview() else: - logger.warning('{} uses Cloudformation, use create-change-set for previews.'.format(s.stackname)) + logger.warning( + "{} uses Cloudformation, use create-change-set for previews.".format( + s.stackname + ) + ) @click.command() @@ -203,7 +218,7 @@ def preview(cb, stack_name): @click.option("--multi", is_flag=True, help="Allow more than one stack to match") @click.pass_obj def provision(cb, stack_names, multi): - """ Creates or updates stacks or stack groups """ + """Creates or updates stacks or stack groups""" stacks = _find_stacks(cb, stack_names, multi) _provision(cb, stacks) @@ -214,7 +229,7 @@ def provision(cb, stack_names, multi): @click.option("--multi", is_flag=True, help="Allow more than one stack to match") @click.pass_obj def delete(cb, stack_names, multi): - """ Deletes stacks or stack groups """ + """Deletes stacks or stack groups""" stacks = _find_stacks(cb, stack_names, multi) # Reverse steps @@ -235,16 +250,16 @@ def delete(cb, stack_names, multi): @click.command() @click.pass_obj def clean(cb): - """ Deletes all previously rendered files locally """ + """Deletes all previously rendered files locally""" cb.clean() def sort_stacks(cb, stacks): - """ Sort stacks by dependencies """ + """Sort stacks by dependencies""" data = {} for s in stacks: - if s.mode == 'pulumi': + if s.mode == "pulumi": data[s.id] = set() continue @@ -253,10 +268,14 @@ def sort_stacks(cb, stacks): deps = [] for d in s.dependencies: # For now we assume deps are artifacts so we prepend them with our local profile and region to match stack.id - for dep_stack in cb.filter_stacks({'region': s.region, 'profile': s.profile, 'provides': d}): + for dep_stack in cb.filter_stacks( + {"region": s.region, "profile": s.profile, "provides": d} + ): deps.append(dep_stack.id) # also look for global services - for dep_stack in cb.filter_stacks({'region': 'global', 'profile': s.profile, 'provides': d}): + for dep_stack in cb.filter_stacks( + {"region": "global", "profile": s.profile, "provides": d} + ): deps.append(dep_stack.id) data[s.id] = set(deps) @@ -267,7 +286,9 @@ def sort_stacks(cb, stacks): v.discard(k) if data: - extra_items_in_deps = functools.reduce(set.union, data.values()) - set(data.keys()) + extra_items_in_deps = functools.reduce(set.union, data.values()) - set( + data.keys() + ) data.update({item: set() for item in extra_items_in_deps}) while True: @@ -283,41 +304,46 @@ def sort_stacks(cb, stacks): result.append(s) yield result - data = {item: (dep - ordered) for item, dep in data.items() - if item not in ordered} + data = { + item: (dep - ordered) for item, dep in data.items() if item not in ordered + } assert not data, "A cyclic dependency exists amongst %r" % data def _find_stacks(cb, stack_names, multi=False): - """ search stacks by name """ + """search stacks by name""" stacks = [] for s in stack_names: stacks = stacks + cb.resolve_stacks(s) if not multi and len(stacks) > 1: - logger.error('Found more than one stack matching name ({}). Please set --multi if that is what you want.'.format(', '.join(stack_names))) + logger.error( + "Found more than one stack matching name ({}). Please set --multi if that is what you want.".format( + ", ".join(stack_names) + ) + ) raise click.Abort() if not stacks: - logger.error('Cannot find stack matching: {}'.format(', '.join(stack_names))) + logger.error("Cannot find stack matching: {}".format(", ".join(stack_names))) raise click.Abort() return stacks def _render(stacks): - """ Utility function to reuse code between tasks """ + """Utility function to reuse code between tasks""" for s in stacks: - if s.mode != 'pulumi': + if s.mode != "pulumi": s.render() s.write_template_file() else: - logger.info('{} uses Pulumi, render skipped.'.format(s.stackname)) + logger.info("{} uses Pulumi, render skipped.".format(s.stackname)) def _provision(cb, stacks): - """ Utility function to reuse code between tasks """ + """Utility function to reuse code between tasks""" for step in sort_stacks(cb, stacks): if step: with ThreadPoolExecutor(max_workers=len(step)) as group: @@ -348,5 +374,5 @@ cli.add_command(set_config) cli.add_command(get_config) cli.add_command(export) -if __name__ == '__main__': +if __name__ == "__main__": cli(obj={}) diff --git a/cloudbender/connection.py b/cloudbender/connection.py index b7356b6..a14a4d7 100644 --- a/cloudbender/connection.py +++ b/cloudbender/connection.py @@ -10,7 +10,7 @@ import logging logger = logging.getLogger(__name__) -class BotoConnection(): +class BotoConnection: _sessions = {} _clients = {} @@ -27,13 +27,15 @@ class BotoConnection(): # Change the cache path from the default of ~/.aws/boto/cache to the one used by awscli session_vars = {} if profile: - session_vars['profile'] = (None, None, profile, None) - if region and region != 'global': - session_vars['region'] = (None, None, region, None) + session_vars["profile"] = (None, None, profile, None) + if region and region != "global": + session_vars["region"] = (None, None, region, None) session = botocore.session.Session(session_vars=session_vars) - cli_cache = os.path.join(os.path.expanduser('~'), '.aws/cli/cache') - session.get_component('credential_provider').get_provider('assume-role').cache = credentials.JSONFileCache(cli_cache) + cli_cache = os.path.join(os.path.expanduser("~"), ".aws/cli/cache") + session.get_component("credential_provider").get_provider( + "assume-role" + ).cache = credentials.JSONFileCache(cli_cache) self._sessions[(profile, region)] = session @@ -41,7 +43,9 @@ class BotoConnection(): def _get_client(self, service, profile=None, region=None): if self._clients.get((profile, region, service)): - logger.debug("Reusing boto session for {} {} {}".format(profile, region, service)) + logger.debug( + "Reusing boto session for {} {} {}".format(profile, region, service) + ) return self._clients[(profile, region, service)] session = self._get_session(profile, region) @@ -59,8 +63,12 @@ class BotoConnection(): return getattr(client, command)(**kwargs) except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] == 'Throttling': - logger.warning("Throttling exception occured during {} - retry after 3s".format(command)) + if e.response["Error"]["Code"] == "Throttling": + logger.warning( + "Throttling exception occured during {} - retry after 3s".format( + command + ) + ) time.sleep(3) pass else: diff --git a/cloudbender/core.py b/cloudbender/core.py index cefead6..cb272d9 100644 --- a/cloudbender/core.py +++ b/cloudbender/core.py @@ -9,7 +9,8 @@ logger = logging.getLogger(__name__) class CloudBender(object): - """ Config Class to handle recursive conf/* config tree """ + """Config Class to handle recursive conf/* config tree""" + def __init__(self, root_path): self.root = pathlib.Path(root_path) self.sg = None @@ -20,28 +21,39 @@ class CloudBender(object): "hooks_path": self.root.joinpath("hooks"), "docs_path": self.root.joinpath("docs"), "outputs_path": self.root.joinpath("outputs"), - "artifact_paths": [self.root.joinpath("artifacts")] + "artifact_paths": [self.root.joinpath("artifacts")], } - if not self.ctx['config_path'].is_dir(): - raise InvalidProjectDir("Check '{0}' exists and is a valid CloudBender project folder.".format(self.ctx['config_path'])) + if not self.ctx["config_path"].is_dir(): + raise InvalidProjectDir( + "Check '{0}' exists and is a valid CloudBender project folder.".format( + self.ctx["config_path"] + ) + ) def read_config(self): - """Load the /config.yaml, /*.yaml as stacks, sub-folders are sub-groups """ + """Load the /config.yaml, /*.yaml as stacks, sub-folders are sub-groups""" # Read top level config.yaml and extract CloudBender CTX - _config = read_config_file(self.ctx['config_path'].joinpath('config.yaml')) + _config = read_config_file(self.ctx["config_path"].joinpath("config.yaml")) # Legacy naming - if _config and _config.get('CloudBender'): - self.ctx.update(_config.get('CloudBender')) + if _config and _config.get("CloudBender"): + self.ctx.update(_config.get("CloudBender")) - if _config and _config.get('cloudbender'): - self.ctx.update(_config.get('cloudbender')) + if _config and _config.get("cloudbender"): + self.ctx.update(_config.get("cloudbender")) # Make sure all paths are abs for k, v in self.ctx.items(): - if k in ['config_path', 'template_path', 'hooks_path', 'docs_path', 'artifact_paths', 'outputs_path']: + if k in [ + "config_path", + "template_path", + "hooks_path", + "docs_path", + "artifact_paths", + "outputs_path", + ]: if isinstance(v, list): new_list = [] for p in v: @@ -56,7 +68,7 @@ class CloudBender(object): if not v.is_absolute(): self.ctx[k] = self.root.joinpath(v) - self.sg = StackGroup(self.ctx['config_path'], self.ctx) + self.sg = StackGroup(self.ctx["config_path"], self.ctx) self.sg.read_config() self.all_stacks = self.sg.get_stacks() @@ -77,15 +89,15 @@ class CloudBender(object): token = token[7:] # If path ends with yaml we look for stacks - if token.endswith('.yaml'): - stacks = self.sg.get_stacks(token, match_by='path') + if token.endswith(".yaml"): + stacks = self.sg.get_stacks(token, match_by="path") # otherwise assume we look for a group, if we find a group return all stacks below else: # Strip potential trailing slash - token = token.rstrip('/') + token = token.rstrip("/") - sg = self.sg.get_stackgroup(token, match_by='path') + sg = self.sg.get_stackgroup(token, match_by="path") if sg: stacks = sg.get_stacks() diff --git a/cloudbender/hooks.py b/cloudbender/hooks.py index 3969da7..f673801 100644 --- a/cloudbender/hooks.py +++ b/cloudbender/hooks.py @@ -8,6 +8,7 @@ from functools import wraps from .exceptions import InvalidHook import logging + logger = logging.getLogger(__name__) @@ -40,7 +41,9 @@ def pulumi_ws(func): @wraps(func) def decorated(self, *args, **kwargs): # setup temp workspace - self.work_dir = tempfile.mkdtemp(dir=tempfile.gettempdir(), prefix="cloudbender-") + self.work_dir = tempfile.mkdtemp( + dir=tempfile.gettempdir(), prefix="cloudbender-" + ) response = func(self, *args, **kwargs) @@ -63,4 +66,4 @@ def cmd(stack, arguments): hook = subprocess.run(arguments, stdout=subprocess.PIPE) logger.info(hook.stdout.decode("utf-8")) except TypeError: - raise InvalidHook('Invalid argument {}'.format(arguments)) + raise InvalidHook("Invalid argument {}".format(arguments)) diff --git a/cloudbender/jinja.py b/cloudbender/jinja.py index 497a297..34a621c 100644 --- a/cloudbender/jinja.py +++ b/cloudbender/jinja.py @@ -24,10 +24,10 @@ logger = logging.getLogger(__name__) @jinja2.contextfunction -def option(context, attribute, default_value=u'', source='options'): - """ Get attribute from options data structure, default_value otherwise """ +def option(context, attribute, default_value="", source="options"): + """Get attribute from options data structure, default_value otherwise""" environment = context.environment - options = environment.globals['_config'][source] + options = environment.globals["_config"][source] if not attribute: return default_value @@ -48,7 +48,7 @@ def option(context, attribute, default_value=u'', source='options'): @jinja2.contextfunction def include_raw_gz(context, files=None, gz=True, remove_comments=False): jenv = context.environment - output = '' + output = "" # For shell script we can even remove whitespaces so treat them individually # sed -e '2,$ {/^ *$/d ; /^ *#/d ; /^[ \t] *#/d ; /*^/d ; s/^[ \t]*// ; s/*[ \t]$// ; s/ $//}' @@ -57,33 +57,35 @@ def include_raw_gz(context, files=None, gz=True, remove_comments=False): if remove_comments: # Remove full line comments but not shebang - _re_comment = re.compile(r'^\s*#[^!]') - _re_blank = re.compile(r'^\s*$') - _re_keep = re.compile(r'^## template: jinja$') - stripped_output = '' + _re_comment = re.compile(r"^\s*#[^!]") + _re_blank = re.compile(r"^\s*$") + _re_keep = re.compile(r"^## template: jinja$") + stripped_output = "" for curline in output.splitlines(): if re.match(_re_blank, curline): continue elif re.match(_re_keep, curline): - stripped_output = stripped_output + curline + '\n' + stripped_output = stripped_output + curline + "\n" elif re.match(_re_comment, curline): logger.debug("Removed {}".format(curline)) else: - stripped_output = stripped_output + curline + '\n' + stripped_output = stripped_output + curline + "\n" output = stripped_output if not gz: - return(output) + return output buf = io.BytesIO() - f = gzip.GzipFile(mode='w', fileobj=buf, mtime=0) + f = gzip.GzipFile(mode="w", fileobj=buf, mtime=0) f.write(output.encode()) f.close() # MaxSize is 21847 - logger.info("Compressed user-data from {} to {}".format(len(output), len(buf.getvalue()))) - return base64.b64encode(buf.getvalue()).decode('utf-8') + logger.info( + "Compressed user-data from {} to {}".format(len(output), len(buf.getvalue())) + ) + return base64.b64encode(buf.getvalue()).decode("utf-8") @jinja2.contextfunction @@ -92,33 +94,33 @@ def raise_helper(context, msg): # Custom tests -def regex(value='', pattern='', ignorecase=False, match_type='search'): - ''' Expose `re` as a boolean filter using the `search` method by default. - This is likely only useful for `search` and `match` which already - have their own filters. - ''' +def regex(value="", pattern="", ignorecase=False, match_type="search"): + """Expose `re` as a boolean filter using the `search` method by default. + This is likely only useful for `search` and `match` which already + have their own filters. + """ if ignorecase: flags = re.I else: flags = 0 _re = re.compile(pattern, flags=flags) - if getattr(_re, match_type, 'search')(value) is not None: + if getattr(_re, match_type, "search")(value) is not None: return True return False -def match(value, pattern='', ignorecase=False): - ''' Perform a `re.match` returning a boolean ''' - return regex(value, pattern, ignorecase, 'match') +def match(value, pattern="", ignorecase=False): + """Perform a `re.match` returning a boolean""" + return regex(value, pattern, ignorecase, "match") -def search(value, pattern='', ignorecase=False): - ''' Perform a `re.search` returning a boolean ''' - return regex(value, pattern, ignorecase, 'search') +def search(value, pattern="", ignorecase=False): + """Perform a `re.search` returning a boolean""" + return regex(value, pattern, ignorecase, "search") # Custom filters -def sub(value='', pattern='', replace='', ignorecase=False): +def sub(value="", pattern="", replace="", ignorecase=False): if ignorecase: flags = re.I else: @@ -129,9 +131,16 @@ def sub(value='', pattern='', replace='', ignorecase=False): def pyminify(source, obfuscate=False, minify=True): # pyminifier options options = types.SimpleNamespace( - tabs=False, replacement_length=1, use_nonlatin=0, - obfuscate=0, obf_variables=1, obf_classes=0, obf_functions=0, - obf_import_methods=0, obf_builtins=0) + tabs=False, + replacement_length=1, + use_nonlatin=0, + obfuscate=0, + obf_variables=1, + obf_classes=0, + obf_functions=0, + obf_import_methods=0, + obf_builtins=0, + ) tokens = pyminifier.token_utils.listified_tokenizer(source) @@ -141,13 +150,17 @@ def pyminify(source, obfuscate=False, minify=True): if obfuscate: name_generator = pyminifier.obfuscate.obfuscation_machine(use_unicode=False) - pyminifier.obfuscate.obfuscate("__main__", tokens, options, name_generator=name_generator) + pyminifier.obfuscate.obfuscate( + "__main__", tokens, options, name_generator=name_generator + ) # source = pyminifier.obfuscate.apply_obfuscation(source) source = pyminifier.token_utils.untokenize(tokens) # logger.info(source) minified_source = pyminifier.compression.gz_pack(source) - logger.info("Compressed python code from {} to {}".format(len(source), len(minified_source))) + logger.info( + "Compressed python code from {} to {}".format(len(source), len(minified_source)) + ) return minified_source @@ -157,10 +170,12 @@ def inline_yaml(block): def JinjaEnv(template_locations=[]): LoggingUndefined = jinja2.make_logging_undefined(logger=logger, base=Undefined) - jenv = jinja2.Environment(trim_blocks=True, - lstrip_blocks=True, - undefined=LoggingUndefined, - extensions=['jinja2.ext.loopcontrols', 'jinja2.ext.do']) + jenv = jinja2.Environment( + trim_blocks=True, + lstrip_blocks=True, + undefined=LoggingUndefined, + extensions=["jinja2.ext.loopcontrols", "jinja2.ext.do"], + ) if template_locations: jinja_loaders = [] @@ -171,29 +186,29 @@ def JinjaEnv(template_locations=[]): else: jenv.loader = jinja2.BaseLoader() - jenv.globals['include_raw'] = include_raw_gz - jenv.globals['raise'] = raise_helper - jenv.globals['option'] = option + jenv.globals["include_raw"] = include_raw_gz + jenv.globals["raise"] = raise_helper + jenv.globals["option"] = option - jenv.filters['sub'] = sub - jenv.filters['pyminify'] = pyminify - jenv.filters['inline_yaml'] = inline_yaml + jenv.filters["sub"] = sub + jenv.filters["pyminify"] = pyminify + jenv.filters["inline_yaml"] = inline_yaml - jenv.tests['match'] = match - jenv.tests['regex'] = regex - jenv.tests['search'] = search + jenv.tests["match"] = match + jenv.tests["regex"] = regex + jenv.tests["search"] = search return jenv def read_config_file(path, variables={}): - """ reads yaml config file, passes it through jinja and returns data structre + """reads yaml config file, passes it through jinja and returns data structre - - OS ENV are available as {{ ENV. }} - - variables defined in parent configs are available as {{ }} + - OS ENV are available as {{ ENV. }} + - variables defined in parent configs are available as {{ }} """ jinja_variables = copy.deepcopy(variables) - jinja_variables['ENV'] = os.environ + jinja_variables["ENV"] = os.environ if path.exists(): logger.debug("Reading config file: {}".format(path)) @@ -205,7 +220,8 @@ def read_config_file(path, variables={}): auto_reload=False, loader=jinja2.FunctionLoader(_sops_loader), undefined=jinja2.StrictUndefined, - extensions=['jinja2.ext.loopcontrols']) + extensions=["jinja2.ext.loopcontrols"], + ) template = jenv.get_template(str(path)) rendered_template = template.render(jinja_variables) data = yaml.safe_load(rendered_template) @@ -220,26 +236,37 @@ def read_config_file(path, variables={}): def _sops_loader(path): - """ Tries to loads yaml file - If "sops" key is detected the file is piped through sops before returned + """Tries to loads yaml file + If "sops" key is detected the file is piped through sops before returned """ - with open(path, 'r') as f: + with open(path, "r") as f: config_raw = f.read() data = yaml.safe_load(config_raw) - if data and 'sops' in data and 'DISABLE_SOPS' not in os.environ: + if data and "sops" in data and "DISABLE_SOPS" not in os.environ: try: - result = subprocess.run([ - 'sops', - '--input-type', 'yaml', - '--output-type', 'yaml', - '--decrypt', '/dev/stdin' - ], stdout=subprocess.PIPE, input=config_raw.encode('utf-8'), - env=dict(os.environ, **{"AWS_SDK_LOAD_CONFIG": "1"})) + result = subprocess.run( + [ + "sops", + "--input-type", + "yaml", + "--output-type", + "yaml", + "--decrypt", + "/dev/stdin", + ], + stdout=subprocess.PIPE, + input=config_raw.encode("utf-8"), + env=dict(os.environ, **{"AWS_SDK_LOAD_CONFIG": "1"}), + ) except FileNotFoundError: - logger.exception("SOPS encrypted config {}, but unable to find sops binary! Try eg: https://github.com/mozilla/sops/releases/download/v3.5.0/sops-v3.5.0.linux".format(path)) + logger.exception( + "SOPS encrypted config {}, but unable to find sops binary! Try eg: https://github.com/mozilla/sops/releases/download/v3.5.0/sops-v3.5.0.linux".format( + path + ) + ) sys.exit(1) - return result.stdout.decode('utf-8') + return result.stdout.decode("utf-8") else: return config_raw diff --git a/cloudbender/pulumi.py b/cloudbender/pulumi.py index 3d30e99..9bba8a1 100644 --- a/cloudbender/pulumi.py +++ b/cloudbender/pulumi.py @@ -7,30 +7,38 @@ import pkg_resources import pulumi import logging + logger = logging.getLogger(__name__) def pulumi_init(stack): # Fail early if pulumi binaries are not available - if not shutil.which('pulumi'): - raise FileNotFoundError("Cannot find pulumi binary, see https://www.pulumi.com/docs/get-started/install/") + if not shutil.which("pulumi"): + raise FileNotFoundError( + "Cannot find pulumi binary, see https://www.pulumi.com/docs/get-started/install/" + ) # add all artifact_paths/pulumi to the search path for easier imports in the pulumi code - for artifacts_path in stack.ctx['artifact_paths']: - _path = '{}/pulumi'.format(artifacts_path.resolve()) + for artifacts_path in stack.ctx["artifact_paths"]: + _path = "{}/pulumi".format(artifacts_path.resolve()) sys.path.append(_path) # Try local implementation first, similar to Jinja2 mode _found = False try: - _stack = importlib.import_module('config.{}.{}'.format(stack.rel_path, stack.template).replace('/', '.')) + _stack = importlib.import_module( + "config.{}.{}".format(stack.rel_path, stack.template).replace("/", ".") + ) _found = True except ImportError: - for artifacts_path in stack.ctx['artifact_paths']: + for artifacts_path in stack.ctx["artifact_paths"]: try: - spec = importlib.util.spec_from_file_location("_stack", '{}/pulumi/{}.py'.format(artifacts_path.resolve(), stack.template)) + spec = importlib.util.spec_from_file_location( + "_stack", + "{}/pulumi/{}.py".format(artifacts_path.resolve(), stack.template), + ) _stack = importlib.util.module_from_spec(spec) spec.loader.exec_module(_stack) _found = True @@ -39,36 +47,61 @@ def pulumi_init(stack): pass if not _found: - raise FileNotFoundError("Cannot find Pulumi implementation for {}".format(stack.stackname)) + raise FileNotFoundError( + "Cannot find Pulumi implementation for {}".format(stack.stackname) + ) - project_name = stack.parameters['Conglomerate'] + project_name = stack.parameters["Conglomerate"] # Remove stacknameprefix if equals Conglomerate as Pulumi implicitly prefixes project_name - pulumi_stackname = re.sub(r'^' + project_name + '-?', '', stack.stackname) + pulumi_stackname = re.sub(r"^" + project_name + "-?", "", stack.stackname) try: - pulumi_backend = '{}/{}/{}'.format(stack.pulumi['backend'], project_name, stack.region) + pulumi_backend = "{}/{}/{}".format( + stack.pulumi["backend"], project_name, stack.region + ) except KeyError: - raise KeyError('Missing pulumi.backend setting !') + raise KeyError("Missing pulumi.backend setting !") - account_id = stack.connection_manager.call('sts', 'get_caller_identity', profile=stack.profile, region=stack.region)['Account'] + account_id = stack.connection_manager.call( + "sts", "get_caller_identity", profile=stack.profile, region=stack.region + )["Account"] # Ugly hack as Pulumi currently doesnt support MFA_TOKENs during role assumptions # Do NOT set them via 'aws:secretKey' as they end up in the stack.json in plain text !!! - if stack.connection_manager._sessions[(stack.profile, stack.region)].get_credentials().token: - os.environ['AWS_SESSION_TOKEN'] = stack.connection_manager._sessions[(stack.profile, stack.region)].get_credentials().token + if ( + stack.connection_manager._sessions[(stack.profile, stack.region)] + .get_credentials() + .token + ): + os.environ["AWS_SESSION_TOKEN"] = ( + stack.connection_manager._sessions[(stack.profile, stack.region)] + .get_credentials() + .token + ) - os.environ['AWS_ACCESS_KEY_ID'] = stack.connection_manager._sessions[(stack.profile, stack.region)].get_credentials().access_key - os.environ['AWS_SECRET_ACCESS_KEY'] = stack.connection_manager._sessions[(stack.profile, stack.region)].get_credentials().secret_key - os.environ['AWS_DEFAULT_REGION'] = stack.region + os.environ["AWS_ACCESS_KEY_ID"] = ( + stack.connection_manager._sessions[(stack.profile, stack.region)] + .get_credentials() + .access_key + ) + os.environ["AWS_SECRET_ACCESS_KEY"] = ( + stack.connection_manager._sessions[(stack.profile, stack.region)] + .get_credentials() + .secret_key + ) + os.environ["AWS_DEFAULT_REGION"] = stack.region # Secrets provider try: - secrets_provider = stack.pulumi['secretsProvider'] - if secrets_provider == 'passphrase' and 'PULUMI_CONFIG_PASSPHRASE' not in os.environ: - raise ValueError('Missing PULUMI_CONFIG_PASSPHRASE environment variable!') + secrets_provider = stack.pulumi["secretsProvider"] + if ( + secrets_provider == "passphrase" + and "PULUMI_CONFIG_PASSPHRASE" not in os.environ + ): + raise ValueError("Missing PULUMI_CONFIG_PASSPHRASE environment variable!") except KeyError: - logger.warning('Missing pulumi.secretsProvider setting, secrets disabled !') + logger.warning("Missing pulumi.secretsProvider setting, secrets disabled !") secrets_provider = None # Set tag for stack file name and version @@ -76,9 +109,11 @@ def pulumi_init(stack): try: _version = _stack.VERSION except AttributeError: - _version = 'undefined' + _version = "undefined" - _tags['zero-downtime.net/cloudbender'] = '{}:{}'.format(os.path.basename(_stack.__file__), _version) + _tags["zero-downtime.net/cloudbender"] = "{}:{}".format( + os.path.basename(_stack.__file__), _version + ) _config = { "aws:region": stack.region, @@ -90,27 +125,36 @@ def pulumi_init(stack): # inject all parameters as config in the namespace for p in stack.parameters: - _config['{}:{}'.format(stack.parameters['Conglomerate'], p)] = stack.parameters[p] + _config["{}:{}".format(stack.parameters["Conglomerate"], p)] = stack.parameters[ + p + ] stack_settings = pulumi.automation.StackSettings( config=_config, secrets_provider=secrets_provider, - encryption_salt=stack.pulumi.get('encryptionsalt', None), - encrypted_key=stack.pulumi.get('encryptedkey', None) + encryption_salt=stack.pulumi.get("encryptionsalt", None), + encrypted_key=stack.pulumi.get("encryptedkey", None), ) project_settings = pulumi.automation.ProjectSettings( - name=project_name, - runtime="python", - backend={"url": pulumi_backend}) + name=project_name, runtime="python", backend={"url": pulumi_backend} + ) ws_opts = pulumi.automation.LocalWorkspaceOptions( work_dir=stack.work_dir, project_settings=project_settings, stack_settings={pulumi_stackname: stack_settings}, - secrets_provider=secrets_provider) + secrets_provider=secrets_provider, + ) - stack = pulumi.automation.create_or_select_stack(stack_name=pulumi_stackname, project_name=project_name, program=_stack.pulumi_program, opts=ws_opts) - stack.workspace.install_plugin("aws", pkg_resources.get_distribution("pulumi_aws").version) + stack = pulumi.automation.create_or_select_stack( + stack_name=pulumi_stackname, + project_name=project_name, + program=_stack.pulumi_program, + opts=ws_opts, + ) + stack.workspace.install_plugin( + "aws", pkg_resources.get_distribution("pulumi_aws").version + ) return stack diff --git a/cloudbender/stack.py b/cloudbender/stack.py index 26661c3..64e7d11 100644 --- a/cloudbender/stack.py +++ b/cloudbender/stack.py @@ -29,6 +29,7 @@ import importlib.resources as pkg_resources from . import templates import logging + logger = logging.getLogger(__name__) @@ -53,16 +54,16 @@ class Stack(object): self.parameters = {} self.outputs = {} self.options = {} - self.region = 'global' - self.profile = 'default' - self.onfailure = 'DELETE' + self.region = "global" + self.profile = "default" + self.onfailure = "DELETE" self.notfication_sns = [] self.id = (self.profile, self.region, self.stackname) self.aws_stackid = None self.md5 = None - self.mode = 'CloudBender' + self.mode = "CloudBender" self.provides = template self.cfn_template = None self.cfn_parameters = [] @@ -71,7 +72,12 @@ class Stack(object): self.status = None self.store_outputs = False self.dependencies = set() - self.hooks = {'post_create': [], 'post_update': [], 'pre_create': [], 'pre_update': []} + self.hooks = { + "post_create": [], + "post_update": [], + "pre_create": [], + "pre_update": [], + } self.default_lock = None self.multi_delete = True self.template_bucket_url = None @@ -82,22 +88,32 @@ class Stack(object): logger.debug("".format(self.id, pprint.pformat(vars(self)))) def read_config(self, sg_config={}): - """ reads stack config """ + """reads stack config""" # First set various attributes based on parent stackgroup config - self.tags.update(sg_config.get('tags', {})) - self.parameters.update(sg_config.get('parameters', {})) - self.options.update(sg_config.get('options', {})) - self.pulumi.update(sg_config.get('pulumi', {})) + self.tags.update(sg_config.get("tags", {})) + self.parameters.update(sg_config.get("parameters", {})) + self.options.update(sg_config.get("options", {})) + self.pulumi.update(sg_config.get("pulumi", {})) # by default inherit parent group settings - for p in ['region', 'profile', 'notfication_sns', 'template_bucket_url']: + for p in ["region", "profile", "notfication_sns", "template_bucket_url"]: if p in sg_config: setattr(self, p, sg_config[p]) # now override stack specific settings - _config = read_config_file(self.path, sg_config.get('variables', {})) - for p in ["region", "stackname", "template", "default_lock", "multi_delete", "provides", "onfailure", "notification_sns", "template_bucket_url"]: + _config = read_config_file(self.path, sg_config.get("variables", {})) + for p in [ + "region", + "stackname", + "template", + "default_lock", + "multi_delete", + "provides", + "onfailure", + "notification_sns", + "template_bucket_url", + ]: if p in _config: setattr(self, p, _config[p]) @@ -106,25 +122,27 @@ class Stack(object): setattr(self, p, dict_merge(getattr(self, p), _config[p])) # Inject Artifact if not explicitly set - if 'Artifact' not in self.tags: - self.tags['Artifact'] = self.provides + if "Artifact" not in self.tags: + self.tags["Artifact"] = self.provides - if 'options' in _config: - self.options = dict_merge(self.options, _config['options']) + if "options" in _config: + self.options = dict_merge(self.options, _config["options"]) - if 'Mode' in self.options: - self.mode = self.options['Mode'] + if "Mode" in self.options: + self.mode = self.options["Mode"] - if 'StoreOutputs' in self.options and self.options['StoreOutputs']: + if "StoreOutputs" in self.options and self.options["StoreOutputs"]: self.store_outputs = True - if 'dependencies' in _config: - for dep in _config['dependencies']: + if "dependencies" in _config: + for dep in _config["dependencies"]: self.dependencies.add(dep) # Some sanity checks if self.onfailure not in ["DO_NOTHING", "ROLLBACK", "DELETE"]: - raise ParameterIllegalValue("onfailure must be one of DO_NOTHING | ROLLBACK | DELETE") + raise ParameterIllegalValue( + "onfailure must be one of DO_NOTHING | ROLLBACK | DELETE" + ) logger.debug("Stack {} added.".format(self.id)) @@ -132,18 +150,22 @@ class Stack(object): """Renders the cfn jinja template for this stack""" template_metadata = { - 'Template.Name': self.template, - 'Template.Hash': "__HASH__", - 'CloudBender.Version': __version__ + "Template.Name": self.template, + "Template.Hash": "__HASH__", + "CloudBender.Version": __version__, + } + _config = { + "mode": self.mode, + "options": self.options, + "metadata": template_metadata, } - _config = {'mode': self.mode, 'options': self.options, 'metadata': template_metadata} - jenv = JinjaEnv(self.ctx['artifact_paths']) - jenv.globals['_config'] = _config + jenv = JinjaEnv(self.ctx["artifact_paths"]) + jenv.globals["_config"] = _config - template = jenv.get_template('{0}{1}'.format(self.template, '.yaml.jinja')) + template = jenv.get_template("{0}{1}".format(self.template, ".yaml.jinja")) - logger.info('Rendering %s', template.filename) + logger.info("Rendering %s", template.filename) try: self.cfn_template = template.render(_config) @@ -153,20 +175,26 @@ class Stack(object): if self.cfn_template: _output = "" for i, line in enumerate(self.cfn_template.splitlines(), start=1): - _output = _output + '{}: {}\n'.format(i, line) + _output = _output + "{}: {}\n".format(i, line) logger.error(_output) raise e - if not re.search('CloudBender::', self.cfn_template) and not re.search('Iterate:', self.cfn_template): - logger.info("CloudBender not required -> removing Transform and Conglomerate parameter") - self.cfn_template = self.cfn_template.replace('Transform: [CloudBender]', '') + if not re.search("CloudBender::", self.cfn_template) and not re.search( + "Iterate:", self.cfn_template + ): + logger.info( + "CloudBender not required -> removing Transform and Conglomerate parameter" + ) + self.cfn_template = self.cfn_template.replace( + "Transform: [CloudBender]", "" + ) _res = """ Conglomerate: Type: String Description: Project / Namespace this stack is part of """ - self.cfn_template = re.sub(_res, '', self.cfn_template) + self.cfn_template = re.sub(_res, "", self.cfn_template) else: self.dependencies.add("CloudBender") @@ -175,31 +203,38 @@ class Stack(object): if self.mode == "Piped" and len(include): _res = "" for attr in include: - _res = _res + """ + _res = ( + _res + + """ {0}: Type: String - Description: Parameter to provide remote stack attribute {0}""".format(attr) + Description: Parameter to provide remote stack attribute {0}""".format( + attr + ) + ) - self.cfn_template = re.sub(r'Parameters:', r'Parameters:' + _res + '\n', self.cfn_template) + self.cfn_template = re.sub( + r"Parameters:", r"Parameters:" + _res + "\n", self.cfn_template + ) logger.info("Piped mode: Added parameters for remote stack references") # Re-read updated template self.cfn_data = yaml.load(self.cfn_template, Loader=SafeLoaderIgnoreUnknown) # Check for empty top level Parameters, Outputs and Conditions and remove - for key in ['Parameters', 'Outputs', 'Conditions']: + for key in ["Parameters", "Outputs", "Conditions"]: if key in self.cfn_data and not self.cfn_data[key]: del self.cfn_data[key] - self.cfn_template = self.cfn_template.replace('\n' + key + ":", '') + self.cfn_template = self.cfn_template.replace("\n" + key + ":", "") # Remove and condense multiple empty lines - self.cfn_template = re.sub(r'\n\s*\n', '\n\n', self.cfn_template) - self.cfn_template = re.sub(r'^\s*', '', self.cfn_template) - self.cfn_template = re.sub(r'\s*$', '', self.cfn_template) + self.cfn_template = re.sub(r"\n\s*\n", "\n\n", self.cfn_template) + self.cfn_template = re.sub(r"^\s*", "", self.cfn_template) + self.cfn_template = re.sub(r"\s*$", "", self.cfn_template) # set md5 last - self.md5 = hashlib.md5(self.cfn_template.encode('utf-8')).hexdigest() - self.cfn_template = self.cfn_template.replace('__HASH__', self.md5) + self.md5 = hashlib.md5(self.cfn_template.encode("utf-8")).hexdigest() + self.cfn_template = self.cfn_template.replace("__HASH__", self.md5) # Update internal data structures self._parse_metadata() @@ -207,7 +242,7 @@ class Stack(object): def _parse_metadata(self): # Extract dependencies try: - for dep in self.cfn_data['Metadata']['CloudBender']['Dependencies']: + for dep in self.cfn_data["Metadata"]["CloudBender"]["Dependencies"]: self.dependencies.add(dep) except KeyError: pass @@ -215,13 +250,19 @@ class Stack(object): # Get checksum if not self.md5: try: - self.md5 = self.cfn_data['Metadata']['Template']['Hash'] + self.md5 = self.cfn_data["Metadata"]["Template"]["Hash"] # Verify embedded md5 hash - source_cfn = re.sub('Hash: [0-9a-f]{32}', 'Hash: __HASH__', self.cfn_template) - our_md5 = hashlib.md5(source_cfn.encode('utf-8')).hexdigest() - if (our_md5 != self.md5): - raise ChecksumError("Template hash checksum mismatch! Expected: {} Got: {}".format(self.md5, our_md5)) from None + source_cfn = re.sub( + "Hash: [0-9a-f]{32}", "Hash: __HASH__", self.cfn_template + ) + our_md5 = hashlib.md5(source_cfn.encode("utf-8")).hexdigest() + if our_md5 != self.md5: + raise ChecksumError( + "Template hash checksum mismatch! Expected: {} Got: {}".format( + self.md5, our_md5 + ) + ) from None except KeyError: raise ChecksumError("Template missing Hash checksum!") from None @@ -231,14 +272,14 @@ class Stack(object): search_refs(self.cfn_data, include, self.mode) for ref in include: if self.mode != "Piped": - self.dependencies.add(ref.split('.')[0]) + self.dependencies.add(ref.split(".")[0]) else: - self.dependencies.add(ref.split('DoT')[0]) + self.dependencies.add(ref.split("DoT")[0]) # Extract hooks try: - for hook, func in self.cfn_data['Metadata']['Hooks'].items(): - if hook in ['post_update', 'post_create', 'pre_create', 'pre_update']: + for hook, func in self.cfn_data["Metadata"]["Hooks"].items(): + if hook in ["post_update", "post_create", "pre_create", "pre_update"]: if isinstance(func, list): self.hooks[hook].extend(func) else: @@ -248,86 +289,133 @@ class Stack(object): def write_template_file(self): if self.cfn_template: - yaml_file = os.path.join(self.ctx['template_path'], self.rel_path, self.stackname + ".yaml") - ensure_dir(os.path.join(self.ctx['template_path'], self.rel_path)) - with open(yaml_file, 'w') as yaml_contents: + yaml_file = os.path.join( + self.ctx["template_path"], self.rel_path, self.stackname + ".yaml" + ) + ensure_dir(os.path.join(self.ctx["template_path"], self.rel_path)) + with open(yaml_file, "w") as yaml_contents: yaml_contents.write(self.cfn_template) - logger.info('Wrote %s to %s', self.template, yaml_file) + logger.info("Wrote %s to %s", self.template, yaml_file) # upload template to s3 if set if self.template_bucket_url: try: - (bucket, path) = get_s3_url(self.template_bucket_url, self.rel_path, self.stackname + ".yaml") + (bucket, path) = get_s3_url( + self.template_bucket_url, + self.rel_path, + self.stackname + ".yaml", + ) self.connection_manager.call( - 's3', 'put_object', - {'Bucket': bucket, - 'Key': path, - 'Body': self.cfn_template, - 'ServerSideEncryption': 'AES256'}, - profile=self.profile, region=self.region) + "s3", + "put_object", + { + "Bucket": bucket, + "Key": path, + "Body": self.cfn_template, + "ServerSideEncryption": "AES256", + }, + profile=self.profile, + region=self.region, + ) logger.info("Uploaded template to s3://{}/{}".format(bucket, path)) except ClientError as e: - logger.error("Error trying to upload template so S3: {}, {}".format(self.template_bucket_url, e)) + logger.error( + "Error trying to upload template so S3: {}, {}".format( + self.template_bucket_url, e + ) + ) else: if len(self.cfn_template) > 51200: - logger.warning("template_bucket_url not set and rendered template exceeds maximum allowed size of 51200, actual size: {} !".format(len(self.cfn_template))) + logger.warning( + "template_bucket_url not set and rendered template exceeds maximum allowed size of 51200, actual size: {} !".format( + len(self.cfn_template) + ) + ) else: - logger.error('No cfn template rendered yet for stack {}.'.format(self.stackname)) + logger.error( + "No cfn template rendered yet for stack {}.".format(self.stackname) + ) def delete_template_file(self): - yaml_file = os.path.join(self.ctx['template_path'], self.rel_path, self.stackname + ".yaml") + yaml_file = os.path.join( + self.ctx["template_path"], self.rel_path, self.stackname + ".yaml" + ) try: os.remove(yaml_file) - logger.debug('Deleted cfn template %s.', yaml_file) + logger.debug("Deleted cfn template %s.", yaml_file) except OSError: pass if self.template_bucket_url: try: - (bucket, path) = get_s3_url(self.template_bucket_url, self.rel_path, self.stackname + ".yaml") + (bucket, path) = get_s3_url( + self.template_bucket_url, self.rel_path, self.stackname + ".yaml" + ) self.connection_manager.call( - 's3', 'delete_object', - {'Bucket': bucket, - 'Key': path}, - profile=self.profile, region=self.region) + "s3", + "delete_object", + {"Bucket": bucket, "Key": path}, + profile=self.profile, + region=self.region, + ) logger.info("Deleted template from s3://{}/{}".format(bucket, path)) except ClientError as e: - logger.error("Error trying to delete template from S3: {}, {}".format(self.template_bucket_url, e)) + logger.error( + "Error trying to delete template from S3: {}, {}".format( + self.template_bucket_url, e + ) + ) def read_template_file(self): - """ Reads rendered yaml template from disk or s3 and extracts metadata """ + """Reads rendered yaml template from disk or s3 and extracts metadata""" if not self.cfn_template: if self.template_bucket_url: try: - (bucket, path) = get_s3_url(self.template_bucket_url, self.rel_path, self.stackname + ".yaml") + (bucket, path) = get_s3_url( + self.template_bucket_url, + self.rel_path, + self.stackname + ".yaml", + ) template = self.connection_manager.call( - 's3', 'get_object', - {'Bucket': bucket, - 'Key': path}, - profile=self.profile, region=self.region) + "s3", + "get_object", + {"Bucket": bucket, "Key": path}, + profile=self.profile, + region=self.region, + ) logger.debug("Got template from s3://{}/{}".format(bucket, path)) - self.cfn_template = template['Body'].read().decode('utf-8') + self.cfn_template = template["Body"].read().decode("utf-8") # Overwrite local copy - yaml_file = os.path.join(self.ctx['template_path'], self.rel_path, self.stackname + ".yaml") - ensure_dir(os.path.join(self.ctx['template_path'], self.rel_path)) - with open(yaml_file, 'w') as yaml_contents: + yaml_file = os.path.join( + self.ctx["template_path"], + self.rel_path, + self.stackname + ".yaml", + ) + ensure_dir(os.path.join(self.ctx["template_path"], self.rel_path)) + with open(yaml_file, "w") as yaml_contents: yaml_contents.write(self.cfn_template) except ClientError as e: - logger.error("Could not find template file on S3: {}/{}, {}".format(bucket, path, e)) + logger.error( + "Could not find template file on S3: {}/{}, {}".format( + bucket, path, e + ) + ) else: - yaml_file = os.path.join(self.ctx['template_path'], self.rel_path, self.stackname + ".yaml") + yaml_file = os.path.join( + self.ctx["template_path"], self.rel_path, self.stackname + ".yaml" + ) try: - with open(yaml_file, 'r') as yaml_contents: + with open(yaml_file, "r") as yaml_contents: self.cfn_template = yaml_contents.read() - logger.debug('Read cfn template %s.', yaml_file) + logger.debug("Read cfn template %s.", yaml_file) except FileNotFoundError as e: logger.warn("Could not find template file: {}".format(yaml_file)) raise e @@ -336,40 +424,50 @@ class Stack(object): self._parse_metadata() else: - logger.debug('Using cached cfn template %s.', self.stackname) + logger.debug("Using cached cfn template %s.", self.stackname) def validate(self): """Validates the rendered template via cfn-lint""" self.read_template_file() try: - ignore_checks = self.cfn_data['Metadata']['cfnlint_ignore'] + ignore_checks = self.cfn_data["Metadata"]["cfnlint_ignore"] except KeyError: ignore_checks = [] # Ignore some more checks around injected parameters as we generate these if self.mode == "Piped": - ignore_checks = ignore_checks + ['W2505', 'W2509', 'W2507'] + ignore_checks = ignore_checks + ["W2505", "W2509", "W2507"] # Ignore checks regarding overloaded properties if self.mode == "CloudBender": - ignore_checks = ignore_checks + ['E3035', 'E3002', 'E3012', 'W2001', 'E3001', 'E0002', 'E1012'] + ignore_checks = ignore_checks + [ + "E3035", + "E3002", + "E3012", + "W2001", + "E3001", + "E0002", + "E1012", + ] - filename = os.path.join(self.ctx['template_path'], self.rel_path, self.stackname + ".yaml") - logger.info('Validating {0}'.format(filename)) + filename = os.path.join( + self.ctx["template_path"], self.rel_path, self.stackname + ".yaml" + ) + logger.info("Validating {0}".format(filename)) - lint_args = ['--template', filename] + lint_args = ["--template", filename] if ignore_checks: - lint_args.append('--ignore-checks') + lint_args.append("--ignore-checks") lint_args = lint_args + ignore_checks - logger.info('Ignoring checks: {}'.format(','.join(ignore_checks))) + logger.info("Ignoring checks: {}".format(",".join(ignore_checks))) (args, filenames, formatter) = cfnlint.core.get_args_filenames(lint_args) (template, rules, matches) = cfnlint.core.get_template_rules(filename, args) region = self.region - if region == 'global': - region = 'us-east-1' + if region == "global": + region = "us-east-1" if not matches: matches.extend(cfnlint.core.run_checks(filename, template, rules, [region])) @@ -381,10 +479,10 @@ class Stack(object): logger.info("Passed.") return 0 - def get_outputs(self, include='.*', values=False): - """ gets outputs of the stack """ + def get_outputs(self, include=".*", values=False): + """gets outputs of the stack""" - if self.mode == 'pulumi': + if self.mode == "pulumi": stack = pulumi_init(self) self.outputs = stack.outputs() @@ -399,13 +497,19 @@ class Stack(object): stacks = self.connection_manager.call( "cloudformation", "describe_stacks", - {'StackName': self.stackname}, - profile=self.profile, region=self.region)['Stacks'] + {"StackName": self.stackname}, + profile=self.profile, + region=self.region, + )["Stacks"] try: - for output in stacks[0]['Outputs']: - self.outputs[output['OutputKey']] = output['OutputValue'] - logger.debug("Stack outputs for {} in {}: {}".format(self.stackname, self.region, self.outputs)) + for output in stacks[0]["Outputs"]: + self.outputs[output["OutputKey"]] = output["OutputValue"] + logger.debug( + "Stack outputs for {} in {}: {}".format( + self.stackname, self.region, self.outputs + ) + ) except KeyError: pass @@ -414,31 +518,46 @@ class Stack(object): pass if self.outputs: - logger.info('{} {} Outputs:\n{}'.format(self.region, self.stackname, pprint.pformat(self.outputs, indent=2))) + logger.info( + "{} {} Outputs:\n{}".format( + self.region, self.stackname, pprint.pformat(self.outputs, indent=2) + ) + ) if self.store_outputs: try: - filename = self.cfn_data['Metadata']['CustomOutputs']['Name'] - my_template = self.cfn_data['Metadata']['CustomOutputs']['Template'] + filename = self.cfn_data["Metadata"]["CustomOutputs"]["Name"] + my_template = self.cfn_data["Metadata"]["CustomOutputs"]["Template"] except (TypeError, KeyError): filename = self.stackname + ".yaml" - my_template = pkg_resources.read_text(templates, 'outputs.yaml') + my_template = pkg_resources.read_text(templates, "outputs.yaml") - output_file = os.path.join(self.ctx['outputs_path'], self.rel_path, filename) - ensure_dir(os.path.join(self.ctx['outputs_path'], self.rel_path)) + output_file = os.path.join( + self.ctx["outputs_path"], self.rel_path, filename + ) + ensure_dir(os.path.join(self.ctx["outputs_path"], self.rel_path)) jenv = JinjaEnv() template = jenv.from_string(my_template) - data = {'stackname': "/".join([self.rel_path, self.stackname]), 'timestamp': datetime.strftime(datetime.now(tzutc()), "%d/%m/%y %H:%M"), 'outputs': self.outputs, 'parameters': self.parameters} + data = { + "stackname": "/".join([self.rel_path, self.stackname]), + "timestamp": datetime.strftime( + datetime.now(tzutc()), "%d/%m/%y %H:%M" + ), + "outputs": self.outputs, + "parameters": self.parameters, + } - with open(output_file, 'w') as output_contents: + with open(output_file, "w") as output_contents: output_contents.write(template.render(**data)) - logger.info('Wrote outputs for %s to %s', self.stackname, output_file) + logger.info( + "Wrote outputs for %s to %s", self.stackname, output_file + ) def create_docs(self, template=False, graph=False): - """ Read rendered template, parse documentation fragments, eg. parameter description - and create a mardown doc file for the stack - same idea as eg. helm-docs for values.yaml - """ + """Read rendered template, parse documentation fragments, eg. parameter description + and create a mardown doc file for the stack + same idea as eg. helm-docs for values.yaml + """ try: self.read_template_file() @@ -446,65 +565,74 @@ class Stack(object): return if not template: - doc_template = pkg_resources.read_text(templates, 'stack-doc.md') + doc_template = pkg_resources.read_text(templates, "stack-doc.md") jenv = JinjaEnv() template = jenv.from_string(doc_template) data = {} else: doc_template = template - data['name'] = self.stackname - data['description'] = self.cfn_data['Description'] - data['dependencies'] = self.dependencies + data["name"] = self.stackname + data["description"] = self.cfn_data["Description"] + data["dependencies"] = self.dependencies - if 'Parameters' in self.cfn_data: - data['parameters'] = self.cfn_data['Parameters'] + if "Parameters" in self.cfn_data: + data["parameters"] = self.cfn_data["Parameters"] set_parameters = self.resolve_parameters() for p in set_parameters: - data['parameters'][p]['value'] = set_parameters[p] + data["parameters"][p]["value"] = set_parameters[p] - if 'Outputs' in self.cfn_data: - data['outputs'] = self.cfn_data['Outputs'] + if "Outputs" in self.cfn_data: + data["outputs"] = self.cfn_data["Outputs"] # Check for existing outputs yaml, if found add current value column and set header to timestamp from outputs file - output_file = os.path.join(self.ctx['outputs_path'], self.rel_path, self.stackname + ".yaml") + output_file = os.path.join( + self.ctx["outputs_path"], self.rel_path, self.stackname + ".yaml" + ) try: - with open(output_file, 'r') as yaml_contents: + with open(output_file, "r") as yaml_contents: outputs = yaml.safe_load(yaml_contents.read()) - for p in outputs['Outputs']: - data['outputs'][p]['last_value'] = outputs['Outputs'][p] - data['timestamp'] = outputs['TimeStamp'] + for p in outputs["Outputs"]: + data["outputs"][p]["last_value"] = outputs["Outputs"][p] + data["timestamp"] = outputs["TimeStamp"] except (FileNotFoundError, KeyError, TypeError): pass - doc_file = os.path.join(self.ctx['docs_path'], self.rel_path, self.stackname + ".md") - ensure_dir(os.path.join(self.ctx['docs_path'], self.rel_path)) + doc_file = os.path.join( + self.ctx["docs_path"], self.rel_path, self.stackname + ".md" + ) + ensure_dir(os.path.join(self.ctx["docs_path"], self.rel_path)) - with open(doc_file, 'w') as doc_contents: + with open(doc_file, "w") as doc_contents: doc_contents.write(template.render(**data)) - logger.info('Wrote documentation for %s to %s', self.stackname, doc_file) + logger.info("Wrote documentation for %s to %s", self.stackname, doc_file) # Write Graph in Dot format if graph: - filename = os.path.join(self.ctx['template_path'], self.rel_path, self.stackname + ".yaml") + filename = os.path.join( + self.ctx["template_path"], self.rel_path, self.stackname + ".yaml" + ) - lint_args = ['--template', filename] + lint_args = ["--template", filename] (args, filenames, formatter) = cfnlint.core.get_args_filenames(lint_args) (template, rules, matches) = cfnlint.core.get_template_rules(filename, args) template_obj = cfnlint.template.Template(filename, template, [self.region]) - path = os.path.join(self.ctx['docs_path'], self.rel_path, self.stackname + ".dot") + path = os.path.join( + self.ctx["docs_path"], self.rel_path, self.stackname + ".dot" + ) g = cfnlint.graph.Graph(template_obj) try: g.to_dot(path) - logger.info('DOT representation of the graph written to %s', path) + logger.info("DOT representation of the graph written to %s", path) except ImportError: logger.error( - 'Could not write the graph in DOT format. Please install either `pygraphviz` or `pydot` modules.') + "Could not write the graph in DOT format. Please install either `pygraphviz` or `pydot` modules." + ) def resolve_parameters(self): - """ Renders parameters for the stack based on the source template and the environment configuration """ + """Renders parameters for the stack based on the source template and the environment configuration""" self.read_template_file() @@ -512,23 +640,25 @@ class Stack(object): if self.mode == "Piped": stack_outputs = {} try: - stack_outputs = self._inspect_stacks(self.tags['Conglomerate']) + stack_outputs = self._inspect_stacks(self.tags["Conglomerate"]) except KeyError: pass _found = {} - if 'Parameters' in self.cfn_data: + if "Parameters" in self.cfn_data: _errors = [] self.cfn_parameters = [] - for p in self.cfn_data['Parameters']: + for p in self.cfn_data["Parameters"]: # In Piped mode we try to resolve all Paramters first via stack_outputs if self.mode == "Piped": try: # first reverse the rename due to AWS alphanumeric restriction for parameter names - _p = p.replace('DoT', '.') + _p = p.replace("DoT", ".") value = str(stack_outputs[_p]) - self.cfn_parameters.append({'ParameterKey': p, 'ParameterValue': value}) - logger.info('Got {} = {} from running stack'.format(p, value)) + self.cfn_parameters.append( + {"ParameterKey": p, "ParameterValue": value} + ) + logger.info("Got {} = {} from running stack".format(p, value)) continue except KeyError: pass @@ -536,31 +666,42 @@ class Stack(object): # Key name in config tree is: stacks..parameters. if p in self.parameters: value = str(self.parameters[p]) - self.cfn_parameters.append({'ParameterKey': p, 'ParameterValue': value}) + self.cfn_parameters.append( + {"ParameterKey": p, "ParameterValue": value} + ) # Hide NoEcho parameters in shell output - if 'NoEcho' in self.cfn_data['Parameters'][p] and self.cfn_data['Parameters'][p]['NoEcho']: - value = '****' + if ( + "NoEcho" in self.cfn_data["Parameters"][p] + and self.cfn_data["Parameters"][p]["NoEcho"] + ): + value = "****" _found[p] = value else: # If we have a Default defined in the CFN skip, as AWS will use it - if 'Default' not in self.cfn_data['Parameters'][p]: + if "Default" not in self.cfn_data["Parameters"][p]: _errors.append(p) if _errors: - raise ParameterNotFound('Cannot find value for parameters: {0}'.format(_errors)) + raise ParameterNotFound( + "Cannot find value for parameters: {0}".format(_errors) + ) # Warning of excessive parameters, might be useful to spot typos early _warnings = [] for p in self.parameters.keys(): - if p not in self.cfn_data['Parameters']: + if p not in self.cfn_data["Parameters"]: _warnings.append(p) - logger.info('{} {} set parameters:\n{}'.format(self.region, self.stackname, pprint.pformat(_found, indent=2))) + logger.info( + "{} {} set parameters:\n{}".format( + self.region, self.stackname, pprint.pformat(_found, indent=2) + ) + ) if _warnings: - logger.warning('Ignored additional parameters: {}.'.format(_warnings)) + logger.warning("Ignored additional parameters: {}.".format(_warnings)) # Return dict of explicitly set parameters return _found @@ -568,9 +709,9 @@ class Stack(object): @pulumi_ws @exec_hooks def create(self): - """Creates a stack """ + """Creates a stack""" - if self.mode == 'pulumi': + if self.mode == "pulumi": stack = pulumi_init(self) stack.up(on_output=self._log_pulumi) @@ -578,17 +719,30 @@ class Stack(object): # Prepare parameters self.resolve_parameters() - logger.info('Creating {0} {1}'.format(self.region, self.stackname)) - kwargs = {'StackName': self.stackname, - 'Parameters': self.cfn_parameters, - 'OnFailure': self.onfailure, - 'NotificationARNs': self.notfication_sns, - 'Tags': [{"Key": str(k), "Value": str(v)} for k, v in self.tags.items()], - 'Capabilities': ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']} + logger.info("Creating {0} {1}".format(self.region, self.stackname)) + kwargs = { + "StackName": self.stackname, + "Parameters": self.cfn_parameters, + "OnFailure": self.onfailure, + "NotificationARNs": self.notfication_sns, + "Tags": [ + {"Key": str(k), "Value": str(v)} for k, v in self.tags.items() + ], + "Capabilities": [ + "CAPABILITY_IAM", + "CAPABILITY_NAMED_IAM", + "CAPABILITY_AUTO_EXPAND", + ], + } kwargs = self._add_template_arg(kwargs) self.aws_stackid = self.connection_manager.call( - 'cloudformation', 'create_stack', kwargs, profile=self.profile, region=self.region) + "cloudformation", + "create_stack", + kwargs, + profile=self.profile, + region=self.region, + ) status = self._wait_for_completion() self.get_outputs() @@ -598,31 +752,48 @@ class Stack(object): @pulumi_ws @exec_hooks def update(self): - """Updates an existing stack """ + """Updates an existing stack""" # We cannot migrate directly so bail out if CFN stack still exists - if self.mode == 'pulumi': - logger.error("Cloudformation stack {} still exists, cannot use Pulumi!".format(self.stackname)) + if self.mode == "pulumi": + logger.error( + "Cloudformation stack {} still exists, cannot use Pulumi!".format( + self.stackname + ) + ) return # Prepare parameters self.resolve_parameters() - logger.info('Updating {0} {1}'.format(self.region, self.stackname)) + logger.info("Updating {0} {1}".format(self.region, self.stackname)) try: - kwargs = {'StackName': self.stackname, - 'Parameters': self.cfn_parameters, - 'NotificationARNs': self.notfication_sns, - 'Tags': [{"Key": str(k), "Value": str(v)} for k, v in self.tags.items()], - 'Capabilities': ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']} + kwargs = { + "StackName": self.stackname, + "Parameters": self.cfn_parameters, + "NotificationARNs": self.notfication_sns, + "Tags": [ + {"Key": str(k), "Value": str(v)} for k, v in self.tags.items() + ], + "Capabilities": [ + "CAPABILITY_IAM", + "CAPABILITY_NAMED_IAM", + "CAPABILITY_AUTO_EXPAND", + ], + } kwargs = self._add_template_arg(kwargs) self.aws_stackid = self.connection_manager.call( - 'cloudformation', 'update_stack', kwargs, profile=self.profile, region=self.region) + "cloudformation", + "update_stack", + kwargs, + profile=self.profile, + region=self.region, + ) except ClientError as e: - if 'No updates are to be performed' in e.response['Error']['Message']: - logger.info('No updates for {0}'.format(self.stackname)) + if "No updates are to be performed" in e.response["Error"]["Message"]: + logger.info("No updates for {0}".format(self.stackname)) return "COMPLETE" else: raise e @@ -635,26 +806,30 @@ class Stack(object): @pulumi_ws @exec_hooks def delete(self): - """ Deletes a stack """ + """Deletes a stack""" - logger.info('Deleting {0} {1}'.format(self.region, self.stackname)) + logger.info("Deleting {0} {1}".format(self.region, self.stackname)) - if self.mode == 'pulumi': + if self.mode == "pulumi": stack = pulumi_init(self) stack.destroy(on_output=self._log_pulumi) return self.aws_stackid = self.connection_manager.call( - 'cloudformation', 'delete_stack', {'StackName': self.stackname}, - profile=self.profile, region=self.region) + "cloudformation", + "delete_stack", + {"StackName": self.stackname}, + profile=self.profile, + region=self.region, + ) status = self._wait_for_completion() return status @pulumi_ws def refresh(self): - """ Refreshes a Pulumi stack """ + """Refreshes a Pulumi stack""" stack = pulumi_init(self) stack.refresh(on_output=self._log_pulumi) @@ -663,7 +838,7 @@ class Stack(object): @pulumi_ws def preview(self): - """ Preview a Pulumi stack up operation""" + """Preview a Pulumi stack up operation""" stack = pulumi_init(self) stack.preview(on_output=self._log_pulumi) @@ -672,15 +847,15 @@ class Stack(object): @pulumi_ws def export(self, reset): - """ Exports a Pulumi stack """ + """Exports a Pulumi stack""" stack = pulumi_init(self) deployment = stack.export_stack() if reset: - deployment.deployment.pop('pending_operations', None) + deployment.deployment.pop("pending_operations", None) stack.import_stack(deployment) - logger.info('Removed all pending_operations from %s' % self.stackname) + logger.info("Removed all pending_operations from %s" % self.stackname) else: print(json.dumps(deployment.deployment)) @@ -688,7 +863,7 @@ class Stack(object): @pulumi_ws def set_config(self, key, value, secret): - """ Set a config or secret """ + """Set a config or secret""" stack = pulumi_init(self) stack.set_config(key, pulumi.automation.ConfigValue(value, secret)) @@ -700,17 +875,19 @@ class Stack(object): with open(self.path, "r") as file: settings = yaml.safe_load(file) - if 'pulumi' not in settings: - settings['pulumi'] = {} + if "pulumi" not in settings: + settings["pulumi"] = {} - if 'encryptionsalt' in pulumi_settings: - settings['pulumi']['encryptionsalt'] = pulumi_settings['encryptionsalt'] - if 'encryptedkey' in pulumi_settings: - settings['pulumi']['encryptedkey'] = pulumi_settings['encryptedkey'] + if "encryptionsalt" in pulumi_settings: + settings["pulumi"]["encryptionsalt"] = pulumi_settings["encryptionsalt"] + if "encryptedkey" in pulumi_settings: + settings["pulumi"]["encryptedkey"] = pulumi_settings["encryptedkey"] - if 'parameters' not in settings: - settings['parameters'] = {} - settings['parameters'][key] = pulumi_settings['config']['{}:{}'.format(self.parameters['Conglomerate'], key)] + if "parameters" not in settings: + settings["parameters"] = {} + settings["parameters"][key] = pulumi_settings["config"][ + "{}:{}".format(self.parameters["Conglomerate"], key) + ] with open(self.path, "w") as file: yaml.dump(settings, stream=file) @@ -719,28 +896,39 @@ class Stack(object): @pulumi_ws def get_config(self, key): - """ Get a config or secret """ + """Get a config or secret""" stack = pulumi_init(self) print(stack.get_config(key).value) def create_change_set(self, change_set_name): - """ Creates a Change Set with the name ``change_set_name``. """ + """Creates a Change Set with the name ``change_set_name``.""" # Prepare parameters self.resolve_parameters() self.read_template_file() - logger.info('Creating change set {0} for stack {1}'.format(change_set_name, self.stackname)) - kwargs = {'StackName': self.stackname, - 'ChangeSetName': change_set_name, - 'Parameters': self.cfn_parameters, - 'Tags': [{"Key": str(k), "Value": str(v)} for k, v in self.tags.items()], - 'Capabilities': ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']} + logger.info( + "Creating change set {0} for stack {1}".format( + change_set_name, self.stackname + ) + ) + kwargs = { + "StackName": self.stackname, + "ChangeSetName": change_set_name, + "Parameters": self.cfn_parameters, + "Tags": [{"Key": str(k), "Value": str(v)} for k, v in self.tags.items()], + "Capabilities": ["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"], + } kwargs = self._add_template_arg(kwargs) self.connection_manager.call( - 'cloudformation', 'create_change_set', kwargs, profile=self.profile, region=self.region) + "cloudformation", + "create_change_set", + kwargs, + profile=self.profile, + region=self.region, + ) return self._wait_for_completion() def get_status(self): @@ -753,7 +941,9 @@ class Stack(object): "cloudformation", "describe_stacks", {"StackName": self.stackname}, - profile=self.profile, region=self.region)["Stacks"][0]["StackStatus"] + profile=self.profile, + region=self.region, + )["Stacks"][0]["StackStatus"] except ClientError as e: if e.response["Error"]["Message"].endswith("does not exist"): return None @@ -771,7 +961,9 @@ class Stack(object): "cloudformation", "describe_stack_events", {"StackName": self.stackname}, - profile=self.profile, region=self.region) + profile=self.profile, + region=self.region, + ) except ClientError as e: if e.response["Error"]["Message"].endswith("does not exist"): return None @@ -792,9 +984,7 @@ class Stack(object): status = "IN_PROGRESS" - self.most_recent_event_datetime = ( - datetime.now(tzutc()) - timedelta(seconds=3) - ) + self.most_recent_event_datetime = datetime.now(tzutc()) - timedelta(seconds=3) elapsed = 0 while status == "IN_PROGRESS" and not timed_out(elapsed): status = self._get_simplified_status(self.get_status()) @@ -809,7 +999,7 @@ class Stack(object): @staticmethod def _get_simplified_status(status): - """ Returns the simplified Stack Status. """ + """Returns the simplified Stack Status.""" if status: if status.endswith("ROLLBACK_COMPLETE"): return "FAILED" @@ -820,7 +1010,7 @@ class Stack(object): elif status.endswith("_FAILED"): return "FAILED" else: - return 'Unknown' + return "Unknown" def _log_new_events(self): """ @@ -831,18 +1021,23 @@ class Stack(object): events = events["StackEvents"] events.reverse() new_events = [ - event for event in events + event + for event in events if event["Timestamp"] > self.most_recent_event_datetime ] for event in new_events: - logger.info(" ".join([ - self.region, - self.stackname, - event["LogicalResourceId"], - event["ResourceType"], - event["ResourceStatus"], - event.get("ResourceStatusReason", "") - ])) + logger.info( + " ".join( + [ + self.region, + self.stackname, + event["LogicalResourceId"], + event["ResourceType"], + event["ResourceStatus"], + event.get("ResourceStatusReason", ""), + ] + ) + ) self.most_recent_event_datetime = event["Timestamp"] # stackoutput inspection @@ -851,12 +1046,14 @@ class Stack(object): running_stacks = self.connection_manager.call( "cloudformation", "describe_stacks", - profile=self.profile, region=self.region) + profile=self.profile, + region=self.region, + ) stacks = [] - for stack in running_stacks['Stacks']: - for tag in stack['Tags']: - if tag['Key'] == 'Conglomerate' and tag['Value'] == conglomerate: + for stack in running_stacks["Stacks"]: + for tag in stack["Tags"]: + if tag["Key"] == "Conglomerate" and tag["Value"] == conglomerate: stacks.append(stack) break @@ -865,9 +1062,9 @@ class Stack(object): for stack in stacks: # If stack has an Artifact Tag put resources into the namespace Artifact.Resource artifact = None - for tag in stack['Tags']: - if tag['Key'] == 'Artifact': - artifact = tag['Value'] + for tag in stack["Tags"]: + if tag["Key"] == "Artifact": + artifact = tag["Value"] if artifact: key_prefix = "{}.".format(artifact) @@ -875,9 +1072,11 @@ class Stack(object): key_prefix = "" try: - for output in stack['Outputs']: + for output in stack["Outputs"]: # Gather all outputs of the stack into one dimensional key=value structure - stack_outputs[key_prefix + output['OutputKey']] = output['OutputValue'] + stack_outputs[key_prefix + output["OutputKey"]] = output[ + "OutputValue" + ] except KeyError: pass @@ -888,19 +1087,35 @@ class Stack(object): if self.template_bucket_url: # https://bucket-name.s3.Region.amazonaws.com/key name # so we need the region, AWS as usual - (bucket, path) = get_s3_url(self.template_bucket_url, self.rel_path, self.stackname + ".yaml") - bucket_region = self.connection_manager.call('s3', 'get_bucket_location', {'Bucket': bucket}, profile=self.profile, region=self.region)['LocationConstraint'] + (bucket, path) = get_s3_url( + self.template_bucket_url, self.rel_path, self.stackname + ".yaml" + ) + bucket_region = self.connection_manager.call( + "s3", + "get_bucket_location", + {"Bucket": bucket}, + profile=self.profile, + region=self.region, + )["LocationConstraint"] # If bucket is in us-east-1 AWS returns 'none' cause reasons grrr if not bucket_region: - bucket_region = 'us-east-1' + bucket_region = "us-east-1" - kwargs['TemplateURL'] = 'https://{}.s3.{}.amazonaws.com/{}'.format(bucket, bucket_region, path) + kwargs["TemplateURL"] = "https://{}.s3.{}.amazonaws.com/{}".format( + bucket, bucket_region, path + ) else: - kwargs['TemplateBody'] = self.cfn_template + kwargs["TemplateBody"] = self.cfn_template return kwargs def _log_pulumi(self, text): - text = re.sub(r'pulumi:pulumi:Stack\s*{}-{}\s*'.format(self.parameters['Conglomerate'], self.stackname), '', text) + text = re.sub( + r"pulumi:pulumi:Stack\s*{}-{}\s*".format( + self.parameters["Conglomerate"], self.stackname + ), + "", + text, + ) if text and not text.isspace(): logger.info(" ".join([self.region, self.stackname, text])) diff --git a/cloudbender/stackgroup.py b/cloudbender/stackgroup.py index 5a3c007..262ec30 100644 --- a/cloudbender/stackgroup.py +++ b/cloudbender/stackgroup.py @@ -13,19 +13,21 @@ class StackGroup(object): self.name = None self.ctx = ctx self.path = path - self.rel_path = path.relative_to(ctx['config_path']) + self.rel_path = path.relative_to(ctx["config_path"]) self.config = {} self.sgs = [] self.stacks = [] - if self.rel_path == '.': - self.rel_path = '' + if self.rel_path == ".": + self.rel_path = "" def dump_config(self): for sg in self.sgs: sg.dump_config() - logger.debug("StackGroup {}: {}".format(self.rel_path, pprint.pformat(self.config))) + logger.debug( + "StackGroup {}: {}".format(self.rel_path, pprint.pformat(self.config)) + ) for s in self.stacks: s.dump_config() @@ -35,7 +37,9 @@ class StackGroup(object): return None # First read config.yaml if present - _config = read_config_file(self.path.joinpath('config.yaml'), parent_config.get('variables', {})) + _config = read_config_file( + self.path.joinpath("config.yaml"), parent_config.get("variables", {}) + ) # Stack Group name if not explicit via config is derived from subfolder, or in case of root object the parent folder if "stackgroupname" in _config: @@ -45,19 +49,25 @@ class StackGroup(object): # Merge config with parent config self.config = dict_merge(parent_config, _config) - stackname_prefix = self.config.get('stacknameprefix', '') + stackname_prefix = self.config.get("stacknameprefix", "") logger.debug("StackGroup {} added.".format(self.name)) # Add stacks - stacks = [s for s in self.path.glob('*.yaml') if not s.name == "config.yaml"] + stacks = [s for s in self.path.glob("*.yaml") if not s.name == "config.yaml"] for stack_path in stacks: - stackname = stack_path.name.split('.')[0] + stackname = stack_path.name.split(".")[0] template = stackname if stackname_prefix: stackname = stackname_prefix + stackname - new_stack = Stack(name=stackname, template=template, path=stack_path, rel_path=str(self.rel_path), ctx=self.ctx) + new_stack = Stack( + name=stackname, + template=template, + path=stack_path, + rel_path=str(self.rel_path), + ctx=self.ctx, + ) new_stack.read_config(self.config) self.stacks.append(new_stack) @@ -68,22 +78,24 @@ class StackGroup(object): self.sgs.append(sg) - def get_stacks(self, name=None, recursive=True, match_by='name'): - """ Returns [stack] matching stack_name or [all] """ + def get_stacks(self, name=None, recursive=True, match_by="name"): + """Returns [stack] matching stack_name or [all]""" stacks = [] if name: logger.debug("Looking for stack {} in group {}".format(name, self.name)) for s in self.stacks: if name: - if match_by == 'name' and s.stackname != name: + if match_by == "name" and s.stackname != name: continue - if match_by == 'path' and not s.path.match(name): + if match_by == "path" and not s.path.match(name): continue if self.rel_path: - logger.debug("Found stack {} in group {}".format(s.stackname, self.rel_path)) + logger.debug( + "Found stack {} in group {}".format(s.stackname, self.rel_path) + ) else: logger.debug("Found stack {}".format(s.stackname)) stacks.append(s) @@ -96,14 +108,20 @@ class StackGroup(object): return stacks - def get_stackgroup(self, name=None, recursive=True, match_by='name'): - """ Returns stack group matching stackgroup_name or all if None """ - if not name or (self.name == name and match_by == 'name') or (self.path.match(name) and match_by == 'path'): + def get_stackgroup(self, name=None, recursive=True, match_by="name"): + """Returns stack group matching stackgroup_name or all if None""" + if ( + not name + or (self.name == name and match_by == "name") + or (self.path.match(name) and match_by == "path") + ): logger.debug("Found stack_group {}".format(self.name)) return self - if name and self.name != 'config': - logger.debug("Looking for stack_group {} in group {}".format(name, self.name)) + if name and self.name != "config": + logger.debug( + "Looking for stack_group {} in group {}".format(name, self.name) + ) if recursive: for sg in self.sgs: diff --git a/cloudbender/utils.py b/cloudbender/utils.py index 4b41d8e..5bdff59 100644 --- a/cloudbender/utils.py +++ b/cloudbender/utils.py @@ -5,7 +5,7 @@ import re def dict_merge(a, b): - """ Deep merge to allow proper inheritance for config files""" + """Deep merge to allow proper inheritance for config files""" if not a: return b @@ -36,16 +36,14 @@ def setup_logging(debug): logging.getLogger("botocore").setLevel(logging.INFO) formatter = logging.Formatter( - fmt="[%(asctime)s] %(name)s %(message)s", - datefmt="%Y-%m-%d %H:%M:%S" + fmt="[%(asctime)s] %(name)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S" ) else: our_level = logging.INFO logging.getLogger("botocore").setLevel(logging.CRITICAL) formatter = logging.Formatter( - fmt="[%(asctime)s] %(message)s", - datefmt="%Y-%m-%d %H:%M:%S" + fmt="[%(asctime)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S" ) log_handler = logging.StreamHandler() @@ -57,8 +55,8 @@ def setup_logging(debug): def search_refs(template, attributes, mode): - """ Traverses a template and searches for any remote references and - adds them to the attributes set + """Traverses a template and searches for any remote references and + adds them to the attributes set """ if isinstance(template, dict): for k, v in template.items(): @@ -70,7 +68,7 @@ def search_refs(template, attributes, mode): # CloudBender::StackRef if k == "CloudBender::StackRef": try: - attributes.append(v['StackTags']['Artifact']) + attributes.append(v["StackTags"]["Artifact"]) except KeyError: pass @@ -91,11 +89,11 @@ def get_s3_url(url, *args): bucket = None path = None - m = re.match('^(s3://)?([^/]*)(/.*)?', url) + m = re.match("^(s3://)?([^/]*)(/.*)?", url) bucket = m[2] if m[3]: - path = m[3].lstrip('/') + path = m[3].lstrip("/") path = os.path.join(path, *args) - return(bucket, path) + return (bucket, path)