CloudBender/cloudbender/stack.py

1273 lines
44 KiB
Python
Raw Normal View History

2018-11-22 18:31:59 +00:00
import os
import re
import hashlib
import json
import yaml
2018-11-22 18:31:59 +00:00
import time
import pathlib
import pprint
2021-09-20 14:19:14 +00:00
import pulumi
2022-06-29 08:52:25 +00:00
import importlib
import pkg_resources
2018-11-22 18:31:59 +00:00
from datetime import datetime, timedelta
from dateutil.tz import tzutc
from botocore.exceptions import ClientError
import ruamel.yaml
2020-08-12 15:07:56 +00:00
from .utils import dict_merge, search_refs, ensure_dir, get_s3_url
2018-11-22 18:31:59 +00:00
from .connection import BotoConnection
from .jinja import JinjaEnv, read_config_file, render_docs
2018-11-22 18:31:59 +00:00
from . import __version__
from .exceptions import ParameterNotFound, ParameterIllegalValue, ChecksumError
from .hooks import exec_hooks
from .pulumi import pulumi_ws, resolve_outputs
2018-11-22 18:31:59 +00:00
import cfnlint.core
import cfnlint.template
2018-11-22 18:31:59 +00:00
from . import templates
2018-11-22 18:31:59 +00:00
import logging
2022-02-22 10:04:29 +00:00
2018-11-22 18:31:59 +00:00
logger = logging.getLogger(__name__)
# Ignore any !<Constructors> during re-loading of CFN templates
class SafeLoaderIgnoreUnknown(yaml.SafeLoader):
def ignore_unknown(self, node):
return node.tag
SafeLoaderIgnoreUnknown.add_constructor(None, SafeLoaderIgnoreUnknown.ignore_unknown)
2018-11-22 18:31:59 +00:00
class Stack(object):
def __init__(self, name, template, path, rel_path, ctx):
2018-11-22 18:31:59 +00:00
self.stackname = name
self.template = template
self.path = pathlib.Path(path)
2018-11-22 18:31:59 +00:00
self.rel_path = rel_path
self.ctx = ctx
self.tags = {}
self.parameters = {}
self.outputs = {}
2021-02-12 11:06:43 +00:00
self.options = {}
2022-02-22 10:04:29 +00:00
self.region = "global"
self.profile = None
2022-02-22 10:04:29 +00:00
self.onfailure = "DELETE"
self.notfication_sns = []
self.aws_stackid = None
2019-04-18 16:30:50 +00:00
self.md5 = None
2022-02-22 10:04:29 +00:00
self.mode = "CloudBender"
2018-11-22 18:31:59 +00:00
self.provides = template
self.cfn_template = None
self.cfn_parameters = []
self.cfn_data = None
self.connection_manager = None
2018-11-22 18:31:59 +00:00
self.status = None
self.store_outputs = False
2018-11-22 18:31:59 +00:00
self.dependencies = set()
2022-02-22 10:04:29 +00:00
self.hooks = {
"post_create": [],
"post_update": [],
"pre_create": [],
"pre_update": [],
}
2018-11-22 18:31:59 +00:00
self.default_lock = None
self.multi_delete = True
2020-08-12 15:07:56 +00:00
self.template_bucket_url = None
2021-09-20 14:19:14 +00:00
self.work_dir = None
self.pulumi = {}
self._pulumi_stack = None
self.pulumi_stackname = ""
self.pulumi_config = {}
self.pulumi_ws_opts = None
2018-11-22 18:31:59 +00:00
def dump_config(self):
logger.debug("<Stack {}: {}>".format(self.id, pprint.pformat(vars(self))))
2018-11-22 18:31:59 +00:00
def read_config(self, sg_config={}):
2022-02-22 10:04:29 +00:00
"""reads stack config"""
# First set various attributes based on parent stackgroup config
2022-02-22 10:04:29 +00:00
self.tags.update(sg_config.get("tags", {}))
self.parameters.update(sg_config.get("parameters", {}))
self.options.update(sg_config.get("options", {}))
self.pulumi.update(sg_config.get("pulumi", {}))
2020-08-12 15:07:56 +00:00
# by default inherit parent group settings
for p in ["region", "notfication_sns", "template_bucket_url"]:
2020-08-12 15:07:56 +00:00
if p in sg_config:
setattr(self, p, sg_config[p])
# profile and region need special treatment due to cmd line overwrite option
if self.ctx["region"]:
self.region = self.ctx["region"]
if self.ctx["profile"]:
self.profile = self.ctx["profile"]
else:
if "profile" in sg_config:
self.profile = sg_config["profile"]
else:
self.profile = "default"
2020-08-12 15:07:56 +00:00
# now override stack specific settings
2022-02-22 10:04:29 +00:00
_config = read_config_file(self.path, sg_config.get("variables", {}))
for p in [
"region",
"stackname",
"template",
"default_lock",
"multi_delete",
"provides",
"onfailure",
"notification_sns",
"template_bucket_url",
]:
2018-11-22 18:31:59 +00:00
if p in _config:
setattr(self, p, _config[p])
2021-09-20 14:19:14 +00:00
for p in ["parameters", "tags", "pulumi"]:
2018-11-22 18:31:59 +00:00
if p in _config:
setattr(self, p, dict_merge(getattr(self, p), _config[p]))
2019-04-18 16:30:50 +00:00
# Inject Artifact if not explicitly set
2022-02-22 10:04:29 +00:00
if "Artifact" not in self.tags:
self.tags["Artifact"] = self.provides
2018-11-22 18:31:59 +00:00
2022-02-22 10:04:29 +00:00
if "options" in _config:
self.options = dict_merge(self.options, _config["options"])
2022-02-22 10:04:29 +00:00
if "Mode" in self.options:
self.mode = self.options["Mode"]
2018-11-22 18:31:59 +00:00
2022-02-22 10:04:29 +00:00
if "StoreOutputs" in self.options and self.options["StoreOutputs"]:
self.store_outputs = True
2022-02-22 10:04:29 +00:00
if "dependencies" in _config:
for dep in _config["dependencies"]:
self.dependencies.add(dep)
2019-07-03 13:15:18 +00:00
# Some sanity checks
if self.onfailure not in ["DO_NOTHING", "ROLLBACK", "DELETE"]:
2022-02-22 10:04:29 +00:00
raise ParameterIllegalValue(
"onfailure must be one of DO_NOTHING | ROLLBACK | DELETE"
)
2019-07-03 13:15:18 +00:00
self.id = (self.profile, self.region, self.stackname)
self.connection_manager = BotoConnection(self.profile, self.region)
2019-01-21 15:24:18 +00:00
logger.debug("Stack {} added.".format(self.id))
2018-11-22 18:31:59 +00:00
def render(self):
"""Renders the cfn jinja template for this stack"""
template_metadata = {
2022-02-22 10:04:29 +00:00
"Template.Name": self.template,
"Template.Hash": "__HASH__",
"CloudBender.Version": __version__,
}
_config = {
"mode": self.mode,
"options": self.options,
"metadata": template_metadata,
2018-11-22 18:31:59 +00:00
}
2022-02-22 10:04:29 +00:00
jenv = JinjaEnv(self.ctx["artifact_paths"])
jenv.globals["_config"] = _config
2018-11-22 18:31:59 +00:00
2022-02-22 10:04:29 +00:00
template = jenv.get_template("{0}{1}".format(self.template, ".yaml.jinja"))
2018-11-22 18:31:59 +00:00
2022-02-22 10:04:29 +00:00
logger.info("Rendering %s", template.filename)
2018-11-22 18:31:59 +00:00
try:
2019-04-18 16:30:50 +00:00
self.cfn_template = template.render(_config)
self.cfn_data = yaml.load(self.cfn_template, Loader=SafeLoaderIgnoreUnknown)
except Exception as e:
2018-11-22 18:31:59 +00:00
# In case we rendered invalid yaml this helps to debug
2019-04-18 16:30:50 +00:00
if self.cfn_template:
2019-08-03 21:31:17 +00:00
_output = ""
for i, line in enumerate(self.cfn_template.splitlines(), start=1):
2022-02-22 10:04:29 +00:00
_output = _output + "{}: {}\n".format(i, line)
2019-08-03 21:31:17 +00:00
logger.error(_output)
raise e
2018-11-22 18:31:59 +00:00
2022-02-22 10:04:29 +00:00
if not re.search("CloudBender::", self.cfn_template) and not re.search(
"Iterate:", self.cfn_template
):
logger.info(
"CloudBender not required -> removing Transform and Conglomerate parameter"
)
self.cfn_template = self.cfn_template.replace(
"Transform: [CloudBender]", ""
)
2019-04-18 16:30:50 +00:00
_res = """
Conglomerate:
Type: String
Description: Project / Namespace this stack is part of
"""
2022-02-22 10:04:29 +00:00
self.cfn_template = re.sub(_res, "", self.cfn_template)
2022-01-24 11:01:50 +00:00
else:
self.dependencies.add("CloudBender")
2019-04-18 16:30:50 +00:00
include = []
2019-06-27 12:10:42 +00:00
search_refs(self.cfn_data, include, self.mode)
2021-02-12 11:06:43 +00:00
if self.mode == "Piped" and len(include):
2019-06-27 12:10:42 +00:00
_res = ""
for attr in include:
2022-02-22 10:04:29 +00:00
_res = (
_res
+ """
2019-06-27 12:10:42 +00:00
{0}:
Type: String
2022-02-22 10:04:29 +00:00
Description: Parameter to provide remote stack attribute {0}""".format(
attr
)
)
self.cfn_template = re.sub(
r"Parameters:", r"Parameters:" + _res + "\n", self.cfn_template
)
2019-06-27 12:10:42 +00:00
logger.info("Piped mode: Added parameters for remote stack references")
# Re-read updated template
self.cfn_data = yaml.load(self.cfn_template, Loader=SafeLoaderIgnoreUnknown)
# Check for empty top level Parameters, Outputs and Conditions and remove
2022-02-22 10:04:29 +00:00
for key in ["Parameters", "Outputs", "Conditions"]:
if key in self.cfn_data and not self.cfn_data[key]:
del self.cfn_data[key]
2022-02-22 10:04:29 +00:00
self.cfn_template = self.cfn_template.replace("\n" + key + ":", "")
# Remove and condense multiple empty lines
2022-02-22 10:04:29 +00:00
self.cfn_template = re.sub(r"\n\s*\n", "\n\n", self.cfn_template)
self.cfn_template = re.sub(r"^\s*", "", self.cfn_template)
self.cfn_template = re.sub(r"\s*$", "", self.cfn_template)
# set md5 last
2022-02-22 10:04:29 +00:00
self.md5 = hashlib.md5(self.cfn_template.encode("utf-8")).hexdigest()
self.cfn_template = self.cfn_template.replace("__HASH__", self.md5)
2018-11-22 18:31:59 +00:00
# Update internal data structures
self._parse_metadata()
def _parse_metadata(self):
# Extract dependencies
try:
2022-02-22 10:04:29 +00:00
for dep in self.cfn_data["Metadata"]["CloudBender"]["Dependencies"]:
self.dependencies.add(dep)
except KeyError:
pass
# Get checksum
if not self.md5:
try:
2022-02-22 10:04:29 +00:00
self.md5 = self.cfn_data["Metadata"]["Template"]["Hash"]
# Verify embedded md5 hash
2022-02-22 10:04:29 +00:00
source_cfn = re.sub(
"Hash: [0-9a-f]{32}", "Hash: __HASH__", self.cfn_template
)
our_md5 = hashlib.md5(source_cfn.encode("utf-8")).hexdigest()
if our_md5 != self.md5:
raise ChecksumError(
"Template hash checksum mismatch! Expected: {} Got: {}".format(
self.md5, our_md5
)
) from None
except KeyError:
raise ChecksumError("Template missing Hash checksum!") from None
2021-02-12 11:06:43 +00:00
# Add CloudBender dependencies
include = []
2019-06-27 12:10:42 +00:00
search_refs(self.cfn_data, include, self.mode)
for ref in include:
2019-06-27 12:10:42 +00:00
if self.mode != "Piped":
2022-02-22 10:04:29 +00:00
self.dependencies.add(ref.split(".")[0])
2019-06-27 12:10:42 +00:00
else:
2022-02-22 10:04:29 +00:00
self.dependencies.add(ref.split("DoT")[0])
# Extract hooks
try:
2022-02-22 10:04:29 +00:00
for hook, func in self.cfn_data["Metadata"]["Hooks"].items():
if hook in ["post_update", "post_create", "pre_create", "pre_update"]:
if isinstance(func, list):
self.hooks[hook].extend(func)
else:
self.hooks[hook].append(func)
except KeyError:
pass
2018-11-22 18:31:59 +00:00
def write_template_file(self):
if self.cfn_template:
2022-02-22 10:04:29 +00:00
yaml_file = os.path.join(
self.ctx["template_path"], self.rel_path, self.stackname + ".yaml"
)
ensure_dir(os.path.join(self.ctx["template_path"], self.rel_path))
with open(yaml_file, "w") as yaml_contents:
2018-11-22 18:31:59 +00:00
yaml_contents.write(self.cfn_template)
2022-02-22 10:04:29 +00:00
logger.info("Wrote %s to %s", self.template, yaml_file)
2018-11-22 18:31:59 +00:00
2020-08-12 15:07:56 +00:00
# upload template to s3 if set
if self.template_bucket_url:
try:
2022-02-22 10:04:29 +00:00
(bucket, path) = get_s3_url(
self.template_bucket_url,
self.rel_path,
self.stackname + ".yaml",
)
2020-08-12 15:07:56 +00:00
self.connection_manager.call(
2022-02-22 10:04:29 +00:00
"s3",
"put_object",
{
"Bucket": bucket,
"Key": path,
"Body": self.cfn_template,
"ServerSideEncryption": "AES256",
},
profile=self.profile,
region=self.region,
)
2020-08-12 15:07:56 +00:00
logger.info("Uploaded template to s3://{}/{}".format(bucket, path))
except ClientError as e:
2022-02-22 10:04:29 +00:00
logger.error(
"Error trying to upload template so S3: {}, {}".format(
self.template_bucket_url, e
)
)
2020-08-12 15:07:56 +00:00
else:
if len(self.cfn_template) > 51200:
2022-02-22 10:04:29 +00:00
logger.warning(
"template_bucket_url not set and rendered template exceeds maximum allowed size of 51200, actual size: {} !".format(
len(self.cfn_template)
)
)
2018-11-22 18:31:59 +00:00
else:
2022-02-22 10:04:29 +00:00
logger.error(
"No cfn template rendered yet for stack {}.".format(self.stackname)
)
2018-11-22 18:31:59 +00:00
def delete_template_file(self):
2022-02-22 10:04:29 +00:00
yaml_file = os.path.join(
self.ctx["template_path"], self.rel_path, self.stackname + ".yaml"
)
2018-11-22 18:31:59 +00:00
try:
os.remove(yaml_file)
2022-02-22 10:04:29 +00:00
logger.debug("Deleted cfn template %s.", yaml_file)
2018-11-22 18:31:59 +00:00
except OSError:
pass
2020-08-12 15:07:56 +00:00
if self.template_bucket_url:
try:
2022-02-22 10:04:29 +00:00
(bucket, path) = get_s3_url(
self.template_bucket_url, self.rel_path, self.stackname + ".yaml"
)
2020-08-12 15:07:56 +00:00
self.connection_manager.call(
2022-02-22 10:04:29 +00:00
"s3",
"delete_object",
{"Bucket": bucket, "Key": path},
profile=self.profile,
region=self.region,
)
2020-08-12 15:07:56 +00:00
logger.info("Deleted template from s3://{}/{}".format(bucket, path))
except ClientError as e:
2022-02-22 10:04:29 +00:00
logger.error(
"Error trying to delete template from S3: {}, {}".format(
self.template_bucket_url, e
)
)
2020-08-12 15:07:56 +00:00
2018-11-22 18:31:59 +00:00
def read_template_file(self):
2022-02-22 10:04:29 +00:00
"""Reads rendered yaml template from disk or s3 and extracts metadata"""
if not self.cfn_template:
2020-08-12 15:07:56 +00:00
if self.template_bucket_url:
try:
2022-02-22 10:04:29 +00:00
(bucket, path) = get_s3_url(
self.template_bucket_url,
self.rel_path,
self.stackname + ".yaml",
)
2020-08-12 15:07:56 +00:00
template = self.connection_manager.call(
2022-02-22 10:04:29 +00:00
"s3",
"get_object",
{"Bucket": bucket, "Key": path},
profile=self.profile,
region=self.region,
)
2020-08-12 15:07:56 +00:00
logger.debug("Got template from s3://{}/{}".format(bucket, path))
2022-02-22 10:04:29 +00:00
self.cfn_template = template["Body"].read().decode("utf-8")
# Overwrite local copy
2022-02-22 10:04:29 +00:00
yaml_file = os.path.join(
self.ctx["template_path"],
self.rel_path,
self.stackname + ".yaml",
)
ensure_dir(os.path.join(self.ctx["template_path"], self.rel_path))
with open(yaml_file, "w") as yaml_contents:
yaml_contents.write(self.cfn_template)
2020-08-12 15:07:56 +00:00
except ClientError as e:
2022-02-22 10:04:29 +00:00
logger.error(
"Could not find template file on S3: {}/{}, {}".format(
bucket, path, e
)
)
2020-08-12 15:07:56 +00:00
else:
2022-02-22 10:04:29 +00:00
yaml_file = os.path.join(
self.ctx["template_path"], self.rel_path, self.stackname + ".yaml"
)
2020-08-12 15:07:56 +00:00
try:
2022-02-22 10:04:29 +00:00
with open(yaml_file, "r") as yaml_contents:
2020-08-12 15:07:56 +00:00
self.cfn_template = yaml_contents.read()
2022-02-22 10:04:29 +00:00
logger.debug("Read cfn template %s.", yaml_file)
2020-08-12 15:07:56 +00:00
except FileNotFoundError as e:
logger.warn("Could not find template file: {}".format(yaml_file))
raise e
self.cfn_data = yaml.load(self.cfn_template, Loader=SafeLoaderIgnoreUnknown)
2020-08-12 15:07:56 +00:00
self._parse_metadata()
else:
2022-02-22 10:04:29 +00:00
logger.debug("Using cached cfn template %s.", self.stackname)
2018-11-22 18:31:59 +00:00
def validate(self):
"""Validates the rendered template via cfn-lint"""
self.read_template_file()
2018-11-22 18:31:59 +00:00
try:
2022-02-22 10:04:29 +00:00
ignore_checks = self.cfn_data["Metadata"]["cfnlint_ignore"]
2018-11-22 18:31:59 +00:00
except KeyError:
ignore_checks = []
# Ignore some more checks around injected parameters as we generate these
2019-04-18 16:30:50 +00:00
if self.mode == "Piped":
2022-02-22 10:04:29 +00:00
ignore_checks = ignore_checks + ["W2505", "W2509", "W2507"]
2018-11-22 18:31:59 +00:00
2019-04-18 16:30:50 +00:00
# Ignore checks regarding overloaded properties
if self.mode == "CloudBender":
2022-02-22 10:04:29 +00:00
ignore_checks = ignore_checks + [
"E3035",
"E3002",
"E3012",
"W2001",
"E3001",
"E0002",
"E1012",
]
2019-04-18 16:30:50 +00:00
2022-02-22 10:04:29 +00:00
filename = os.path.join(
self.ctx["template_path"], self.rel_path, self.stackname + ".yaml"
)
logger.info("Validating {0}".format(filename))
2018-11-22 18:31:59 +00:00
2022-02-22 10:04:29 +00:00
lint_args = ["--template", filename]
2018-11-22 18:31:59 +00:00
if ignore_checks:
2022-02-22 10:04:29 +00:00
lint_args.append("--ignore-checks")
lint_args = lint_args + ignore_checks
2022-02-22 10:04:29 +00:00
logger.info("Ignoring checks: {}".format(",".join(ignore_checks)))
2018-11-22 18:31:59 +00:00
(args, filenames, formatter) = cfnlint.core.get_args_filenames(lint_args)
(template, rules, matches) = cfnlint.core.get_template_rules(filename, args)
region = self.region
2022-02-22 10:04:29 +00:00
if region == "global":
region = "us-east-1"
2018-11-22 18:31:59 +00:00
if not matches:
matches.extend(cfnlint.core.run_checks(filename, template, rules, [region]))
2018-11-22 18:31:59 +00:00
if len(matches):
for match in matches:
logger.error(formatter._format(match))
return 1
2018-11-22 18:31:59 +00:00
else:
logger.info("Passed.")
return 0
2018-11-22 18:31:59 +00:00
2023-05-18 12:19:51 +00:00
@pulumi_ws
2022-02-22 10:04:29 +00:00
def get_outputs(self, include=".*", values=False):
"""gets outputs of the stack"""
2020-02-25 20:40:12 +00:00
2022-02-22 10:04:29 +00:00
if self.mode == "pulumi":
self.outputs = self._get_pulumi_stack().outputs()
2020-02-25 20:40:12 +00:00
2021-09-20 14:19:14 +00:00
else:
self.read_template_file()
2020-02-25 20:40:12 +00:00
try:
2021-09-20 14:19:14 +00:00
stacks = self.connection_manager.call(
"cloudformation",
"describe_stacks",
2022-02-22 10:04:29 +00:00
{"StackName": self.stackname},
profile=self.profile,
region=self.region,
)["Stacks"]
2020-02-25 20:40:12 +00:00
2021-09-20 14:19:14 +00:00
try:
2022-02-22 10:04:29 +00:00
for output in stacks[0]["Outputs"]:
self.outputs[output["OutputKey"]] = output["OutputValue"]
logger.debug(
"Stack outputs for {} in {}: {}".format(
self.stackname, self.region, self.outputs
)
)
2021-09-20 14:19:14 +00:00
except KeyError:
pass
except ClientError:
logger.warn("Could not get outputs of {}".format(self.stackname))
pass
2020-02-25 20:40:12 +00:00
if self.outputs:
if self.store_outputs:
filename = self.stackname + ".yaml"
my_template = importlib.resources.read_text(templates, "outputs.yaml")
2022-02-22 10:04:29 +00:00
output_file = os.path.join(
self.ctx["outputs_path"], self.rel_path, filename
)
ensure_dir(os.path.join(self.ctx["outputs_path"], self.rel_path))
# Blacklist at least AWS SecretKeys from leaking into git
# Pulumi to the rescue soon
blacklist = [".*SecretAccessKey.*"]
sanitized_outputs = {}
for k in self.outputs.keys():
sanitized_outputs[k] = self.outputs[k]
for val in blacklist:
if re.match(val, k, re.IGNORECASE):
sanitized_outputs[k] = "<Redacted>"
jenv = JinjaEnv()
template = jenv.from_string(my_template)
2022-02-22 10:04:29 +00:00
data = {
"stackname": "/".join([self.rel_path, self.stackname]),
"timestamp": datetime.strftime(
datetime.now(tzutc()), "%d/%m/%y %H:%M"
),
"outputs": sanitized_outputs,
2022-02-22 10:04:29 +00:00
"parameters": self.parameters,
}
with open(output_file, "w") as output_contents:
output_contents.write(template.render(**data))
2022-02-22 10:04:29 +00:00
logger.info(
"Wrote outputs for %s to %s", self.stackname, output_file
)
# If secrets replace with clear values for now, display ONLY
for k in self.outputs.keys():
if hasattr(self.outputs[k], "secret") and self.outputs[k].secret:
self.outputs[k] = self.outputs[k].value
logger.info(
"{} {} Outputs:\n{}".format(
self.region, self.stackname, pprint.pformat(self.outputs, indent=2)
)
)
@pulumi_ws
def docs(self, template=False):
2022-02-22 10:04:29 +00:00
"""Read rendered template, parse documentation fragments, eg. parameter description
and create a mardown doc file for the stack. Same idea as helm-docs for the values.yaml
2022-02-22 10:04:29 +00:00
"""
doc_file = os.path.join(
self.ctx["docs_path"], self.rel_path, self.stackname + ".md"
)
ensure_dir(os.path.join(self.ctx["docs_path"], self.rel_path))
# For pulumi we use the embedded docstrings
if self.mode == "pulumi":
try:
pulumi_stack = self._get_pulumi_stack()
outputs = pulumi_stack.outputs()
except pulumi.automation.errors.StackNotFoundError:
outputs = {}
pass
if vars(self._pulumi_code)["__doc__"]:
docs_out = render_docs(
vars(self._pulumi_code)["__doc__"], resolve_outputs(outputs)
)
else:
docs_out = "No stack documentation available."
# collect all __doc__ from available _execute_ functions
headerAdded = False
for k in vars(self._pulumi_code).keys():
if k.startswith("_execute_"):
if not headerAdded:
docs_out = docs_out + "\n# Available *execute* functions: \n"
headerAdded = True
docstring = vars(self._pulumi_code)[k].__doc__
docs_out = docs_out + f"\n* {docstring}"
# Cloudformation we use the stack-doc template similar to helm-docs
else:
try:
self.read_template_file()
except FileNotFoundError:
return
if not template:
doc_template = importlib.resources.read_text(templates, "stack-doc.md")
jenv = JinjaEnv()
template = jenv.from_string(doc_template)
data = {}
else:
doc_template = template
data["name"] = self.stackname
data["description"] = self.cfn_data["Description"]
data["dependencies"] = self.dependencies
if "Parameters" in self.cfn_data:
data["parameters"] = self.cfn_data["Parameters"]
set_parameters = self.resolve_parameters()
for p in set_parameters:
data["parameters"][p]["value"] = set_parameters[p]
if "Outputs" in self.cfn_data:
data["outputs"] = self.cfn_data["Outputs"]
# Check for existing outputs yaml, if found add current value column and set header to timestamp from outputs file
output_file = os.path.join(
self.ctx["outputs_path"], self.rel_path, self.stackname + ".yaml"
)
try:
with open(output_file, "r") as yaml_contents:
outputs = yaml.safe_load(yaml_contents.read())
for p in outputs["Outputs"]:
data["outputs"][p]["last_value"] = outputs["Outputs"][p]
data["timestamp"] = outputs["TimeStamp"]
except (FileNotFoundError, KeyError, TypeError):
pass
docs_out = template.render(**data)
# Finally write docs to file
with open(doc_file, "w") as doc_contents:
doc_contents.write(docs_out)
logger.info("Wrote documentation for %s to %s", self.stackname, doc_file)
2018-11-22 18:31:59 +00:00
def resolve_parameters(self):
2022-02-22 10:04:29 +00:00
"""Renders parameters for the stack based on the source template and the environment configuration"""
2018-11-22 18:31:59 +00:00
self.read_template_file()
2018-11-22 18:31:59 +00:00
2019-06-27 13:31:51 +00:00
# if we run in Piped Mode, inspect all outputs of the running Conglomerate members
if self.mode == "Piped":
stack_outputs = {}
try:
2022-02-22 10:04:29 +00:00
stack_outputs = self._inspect_stacks(self.tags["Conglomerate"])
2019-06-27 13:31:51 +00:00
except KeyError:
pass
2018-11-22 18:31:59 +00:00
2020-06-25 13:09:27 +00:00
_found = {}
2022-02-22 10:04:29 +00:00
if "Parameters" in self.cfn_data:
2019-06-27 13:31:51 +00:00
_errors = []
2018-11-22 18:31:59 +00:00
self.cfn_parameters = []
2022-02-22 10:04:29 +00:00
for p in self.cfn_data["Parameters"]:
2018-11-22 18:31:59 +00:00
# In Piped mode we try to resolve all Paramters first via stack_outputs
2019-06-27 13:31:51 +00:00
if self.mode == "Piped":
try:
# first reverse the rename due to AWS alphanumeric restriction for parameter names
2022-02-22 10:04:29 +00:00
_p = p.replace("DoT", ".")
2019-06-27 13:31:51 +00:00
value = str(stack_outputs[_p])
2022-02-22 10:04:29 +00:00
self.cfn_parameters.append(
{"ParameterKey": p, "ParameterValue": value}
)
logger.info("Got {} = {} from running stack".format(p, value))
2019-06-27 13:31:51 +00:00
continue
except KeyError:
pass
2018-11-22 18:31:59 +00:00
# Key name in config tree is: stacks.<self.stackname>.parameters.<parameter>
2019-06-27 13:31:51 +00:00
if p in self.parameters:
2018-11-22 18:31:59 +00:00
value = str(self.parameters[p])
2022-02-22 10:04:29 +00:00
self.cfn_parameters.append(
{"ParameterKey": p, "ParameterValue": value}
)
# Hide NoEcho parameters in shell output
2022-02-22 10:04:29 +00:00
if (
"NoEcho" in self.cfn_data["Parameters"][p]
and self.cfn_data["Parameters"][p]["NoEcho"]
):
value = "****"
_found[p] = value
2019-06-27 13:31:51 +00:00
else:
2018-11-22 18:31:59 +00:00
# If we have a Default defined in the CFN skip, as AWS will use it
2022-02-22 10:04:29 +00:00
if "Default" not in self.cfn_data["Parameters"][p]:
2019-06-27 13:31:51 +00:00
_errors.append(p)
if _errors:
2022-02-22 10:04:29 +00:00
raise ParameterNotFound(
"Cannot find value for parameters: {0}".format(_errors)
)
2018-11-22 18:31:59 +00:00
2020-07-16 22:56:09 +00:00
# Warning of excessive parameters, might be useful to spot typos early
_warnings = []
for p in self.parameters.keys():
2022-02-22 10:04:29 +00:00
if p not in self.cfn_data["Parameters"]:
2020-07-16 22:56:09 +00:00
_warnings.append(p)
2022-02-22 10:04:29 +00:00
logger.info(
"{} {} set parameters:\n{}".format(
self.region, self.stackname, pprint.pformat(_found, indent=2)
)
)
2020-07-16 22:56:09 +00:00
if _warnings:
2022-02-22 10:04:29 +00:00
logger.warning("Ignored additional parameters: {}.".format(_warnings))
2020-07-16 22:56:09 +00:00
# Return dict of explicitly set parameters
return _found
2021-09-20 14:19:14 +00:00
@pulumi_ws
@exec_hooks
2018-11-22 18:31:59 +00:00
def create(self):
2022-02-22 10:04:29 +00:00
"""Creates a stack"""
2018-11-22 18:31:59 +00:00
2022-02-22 10:04:29 +00:00
if self.mode == "pulumi":
kwargs = self._set_pulumi_args()
self._get_pulumi_stack(create=True).up(**kwargs)
2018-11-22 18:31:59 +00:00
2021-09-20 14:19:14 +00:00
else:
# Prepare parameters
self.resolve_parameters()
2022-02-22 10:04:29 +00:00
logger.info("Creating {0} {1}".format(self.region, self.stackname))
kwargs = {
"StackName": self.stackname,
"Parameters": self.cfn_parameters,
"OnFailure": self.onfailure,
"NotificationARNs": self.notfication_sns,
"Tags": [
{"Key": str(k), "Value": str(v)} for k, v in self.tags.items()
],
"Capabilities": [
"CAPABILITY_IAM",
"CAPABILITY_NAMED_IAM",
"CAPABILITY_AUTO_EXPAND",
],
}
2021-09-20 14:19:14 +00:00
kwargs = self._add_template_arg(kwargs)
2018-11-22 18:31:59 +00:00
2021-09-20 14:19:14 +00:00
self.aws_stackid = self.connection_manager.call(
2022-02-22 10:04:29 +00:00
"cloudformation",
"create_stack",
kwargs,
profile=self.profile,
region=self.region,
)
2021-09-20 14:19:14 +00:00
status = self._wait_for_completion()
self.get_outputs()
return status
2018-11-22 18:31:59 +00:00
@exec_hooks
2018-11-22 18:31:59 +00:00
def update(self):
2022-02-22 10:04:29 +00:00
"""Updates an existing stack"""
2018-11-22 18:31:59 +00:00
# Prepare parameters
self.resolve_parameters()
2022-02-22 10:04:29 +00:00
logger.info("Updating {0} {1}".format(self.region, self.stackname))
2018-11-22 18:31:59 +00:00
try:
2022-02-22 10:04:29 +00:00
kwargs = {
"StackName": self.stackname,
"Parameters": self.cfn_parameters,
"NotificationARNs": self.notfication_sns,
"Tags": [
{"Key": str(k), "Value": str(v)} for k, v in self.tags.items()
],
"Capabilities": [
"CAPABILITY_IAM",
"CAPABILITY_NAMED_IAM",
"CAPABILITY_AUTO_EXPAND",
],
}
kwargs = self._add_template_arg(kwargs)
self.aws_stackid = self.connection_manager.call(
2022-02-22 10:04:29 +00:00
"cloudformation",
"update_stack",
kwargs,
profile=self.profile,
region=self.region,
)
2018-11-22 18:31:59 +00:00
except ClientError as e:
2022-02-22 10:04:29 +00:00
if "No updates are to be performed" in e.response["Error"]["Message"]:
logger.info("No updates for {0}".format(self.stackname))
return "COMPLETE"
2018-11-22 18:31:59 +00:00
else:
raise e
status = self._wait_for_completion()
self.get_outputs()
2018-11-22 18:31:59 +00:00
return status
2021-09-20 14:19:14 +00:00
@pulumi_ws
@exec_hooks
2018-11-22 18:31:59 +00:00
def delete(self):
2022-02-22 10:04:29 +00:00
"""Deletes a stack"""
2018-11-22 18:31:59 +00:00
2022-02-22 10:04:29 +00:00
logger.info("Deleting {0} {1}".format(self.region, self.stackname))
2021-09-20 14:19:14 +00:00
2022-02-22 10:04:29 +00:00
if self.mode == "pulumi":
try:
pulumi_stack = self._get_pulumi_stack()
except pulumi.automation.errors.StackNotFoundError:
logger.warning("Could not find Pulumi stack {}".format(self.stackname))
return
pulumi_stack.destroy(on_output=self._log_pulumi)
pulumi_stack.workspace.remove_stack(pulumi_stack.name)
2021-09-20 14:19:14 +00:00
return
self.aws_stackid = self.connection_manager.call(
2022-02-22 10:04:29 +00:00
"cloudformation",
"delete_stack",
{"StackName": self.stackname},
profile=self.profile,
region=self.region,
)
2018-11-22 18:31:59 +00:00
status = self._wait_for_completion()
return status
2018-11-22 18:31:59 +00:00
2021-09-20 14:19:14 +00:00
@pulumi_ws
def refresh(self):
2022-02-22 10:04:29 +00:00
"""Refreshes a Pulumi stack"""
2021-09-20 14:19:14 +00:00
self._get_pulumi_stack().refresh(on_output=self._log_pulumi)
2021-09-20 14:19:14 +00:00
return
@pulumi_ws
def preview(self):
2022-02-22 10:04:29 +00:00
"""Preview a Pulumi stack up operation"""
2021-09-20 14:19:14 +00:00
kwargs = self._set_pulumi_args()
self._get_pulumi_stack(create=True).preview(**kwargs)
return
@pulumi_ws
def execute(self, function, args):
"""
Executes custom Python function within a Pulumi stack
These plugin functions are executed within the stack environment and are provided with all stack input parameters as well as current outputs.
Think of "docker exec" into an existing container...
"""
if not function:
logger.error("No function specified !")
headerAdded = False
for k in vars(self._pulumi_code).keys():
if k.startswith("_execute_"):
if not headerAdded:
logger.info("Available execute functions:")
headerAdded = True
logger.info("{}".format(k.replace("_execute_", "- ")))
return
exec_function = f"_execute_{function}"
if exec_function in vars(self._pulumi_code):
pulumi_stack = self._get_pulumi_stack()
try:
vars(self._pulumi_code)[exec_function](
config=pulumi_stack.get_all_config(),
outputs=pulumi_stack.outputs(),
args=args,
)
except Exception as e:
return e.returncode
else:
logger.error("{} is not defined in {}".format(function, self._pulumi_code))
@pulumi_ws
def assimilate(self):
"""Import resources into Pulumi stack"""
pulumi_stack = self._get_pulumi_stack(create=True)
# now lets import each defined resource
for r in self._pulumi_code.RESOURCES:
r_id = r["id"]
if not r_id:
r_id = input(
"Please enter ID for {} ({}):".format(r["name"], r["type"])
)
logger.info("Importing {} ({}) as {}".format(r_id, r["type"], r["name"]))
args = ["import", r["type"], r["name"], r_id, "--yes"]
pulumi_stack._run_pulumi_cmd_sync(args)
2021-09-20 14:19:14 +00:00
return
@pulumi_ws
def export(self, remove_pending_operations):
2022-02-22 10:04:29 +00:00
"""Exports a Pulumi stack"""
pulumi_stack = self._get_pulumi_stack()
deployment = pulumi_stack.export_stack()
if remove_pending_operations:
2022-02-22 10:04:29 +00:00
deployment.deployment.pop("pending_operations", None)
pulumi_stack.import_stack(deployment)
2022-02-22 10:04:29 +00:00
logger.info("Removed all pending_operations from %s" % self.stackname)
else:
print(json.dumps(deployment.deployment))
return
2021-09-20 14:19:14 +00:00
@pulumi_ws
def set_config(self, key, value, secret):
2022-02-22 10:04:29 +00:00
"""Set a config or secret"""
2021-09-20 14:19:14 +00:00
ryaml = ruamel.yaml.YAML()
ryaml.indent(mapping=2)
ryaml.preserve_quotes = True
pulumi_stack = self._get_pulumi_stack(create=True)
2023-10-24 11:34:41 +00:00
pulumi_stack.set_config(key, pulumi.automation.ConfigValue(value, secret)) # Pulumi bug https://github.com/pulumi/pulumi/issues/13063 so no: , path=True)
2021-09-20 14:19:14 +00:00
# Store salt or key and encrypted value in CloudBender stack config
settings = None
pulumi_settings = pulumi_stack.workspace.stack_settings(
pulumi_stack.name
)._serialize()
2021-09-20 14:19:14 +00:00
with open(self.path, "r") as file:
settings = ryaml.load(file)
2021-09-20 14:19:14 +00:00
2022-02-22 10:04:29 +00:00
if "pulumi" not in settings:
settings["pulumi"] = {}
2021-10-04 15:51:16 +00:00
2022-02-22 10:04:29 +00:00
if "encryptionsalt" in pulumi_settings:
settings["pulumi"]["encryptionsalt"] = pulumi_settings["encryptionsalt"]
if "encryptedkey" in pulumi_settings:
settings["pulumi"]["encryptedkey"] = pulumi_settings["encryptedkey"]
2021-09-20 14:19:14 +00:00
2022-02-22 10:04:29 +00:00
if "parameters" not in settings:
settings["parameters"] = {}
2023-10-24 11:34:41 +00:00
# hack for bug above, we support one level of nested values for now
_val = pulumi_settings["config"]["{}:{}".format(self.parameters["Conglomerate"], key)]
if '.' in key:
(root,leaf) = key.split('.')
if root not in settings["parameters"]:
settings["parameters"][root] = {}
settings["parameters"][root][leaf] = _val
else:
settings["parameters"][key] = _val
2021-09-20 14:19:14 +00:00
with open(self.path, "w") as file:
ryaml.dump(settings, stream=file)
2021-09-20 14:19:14 +00:00
return
@pulumi_ws
def get_config(self, key):
2022-02-22 10:04:29 +00:00
"""Get a config or secret"""
2021-09-20 14:19:14 +00:00
2023-10-24 11:34:41 +00:00
print(self._get_pulumi_stack().get_config(key, path=True).value)
2021-09-20 14:19:14 +00:00
2019-01-30 13:00:06 +00:00
def create_change_set(self, change_set_name):
2022-02-22 10:04:29 +00:00
"""Creates a Change Set with the name ``change_set_name``."""
2019-01-30 13:00:06 +00:00
# Prepare parameters
self.resolve_parameters()
self.read_template_file()
2019-01-30 13:00:06 +00:00
2022-02-22 10:04:29 +00:00
logger.info(
"Creating change set {0} for stack {1}".format(
change_set_name, self.stackname
)
)
kwargs = {
"StackName": self.stackname,
"ChangeSetName": change_set_name,
"Parameters": self.cfn_parameters,
"Tags": [{"Key": str(k), "Value": str(v)} for k, v in self.tags.items()],
"Capabilities": ["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"],
}
kwargs = self._add_template_arg(kwargs)
self.connection_manager.call(
2022-02-22 10:04:29 +00:00
"cloudformation",
"create_change_set",
kwargs,
profile=self.profile,
region=self.region,
)
2019-01-30 13:00:06 +00:00
return self._wait_for_completion()
2018-11-22 18:31:59 +00:00
def get_status(self):
"""
Returns the stack's status.
:returns: The stack's status.
"""
try:
status = self.connection_manager.call(
"cloudformation",
"describe_stacks",
{"StackName": self.stackname},
2022-02-22 10:04:29 +00:00
profile=self.profile,
region=self.region,
)["Stacks"][0]["StackStatus"]
2018-11-22 18:31:59 +00:00
except ClientError as e:
if e.response["Error"]["Message"].endswith("does not exist"):
return None
else:
raise e
return status
def describe_events(self):
"""
Returns a dictionary contianing the stack events.
:returns: The CloudFormation events for a stack.
"""
try:
status = self.connection_manager.call(
"cloudformation",
"describe_stack_events",
{"StackName": self.stackname},
2022-02-22 10:04:29 +00:00
profile=self.profile,
region=self.region,
)
2018-11-22 18:31:59 +00:00
except ClientError as e:
if e.response["Error"]["Message"].endswith("does not exist"):
return None
else:
raise e
return status
def _wait_for_completion(self, timeout=0):
"""
Waits for a stack operation to finish. Prints CloudFormation events while it waits.
:param timeout: Timeout before returning
:returns: The final stack status.
"""
def timed_out(elapsed):
return elapsed >= timeout if timeout else False
status = "IN_PROGRESS"
2018-11-22 18:31:59 +00:00
2022-02-22 10:04:29 +00:00
self.most_recent_event_datetime = datetime.now(tzutc()) - timedelta(seconds=3)
2018-11-22 18:31:59 +00:00
elapsed = 0
while status == "IN_PROGRESS" and not timed_out(elapsed):
2018-11-22 18:31:59 +00:00
status = self._get_simplified_status(self.get_status())
if not status:
return None
self._log_new_events()
time.sleep(4)
elapsed += 4
return status
@staticmethod
def _get_simplified_status(status):
2022-02-22 10:04:29 +00:00
"""Returns the simplified Stack Status."""
2018-11-22 18:31:59 +00:00
if status:
if status.endswith("ROLLBACK_COMPLETE"):
return "FAILED"
2018-11-22 18:31:59 +00:00
elif status.endswith("_COMPLETE"):
return "COMPLETE"
2018-11-22 18:31:59 +00:00
elif status.endswith("_IN_PROGRESS"):
return "IN_PROGRESS"
2018-11-22 18:31:59 +00:00
elif status.endswith("_FAILED"):
return "FAILED"
2018-11-22 18:31:59 +00:00
else:
2022-02-22 10:04:29 +00:00
return "Unknown"
2018-11-22 18:31:59 +00:00
def _log_new_events(self):
"""
Log the latest stack events while the stack is being built.
"""
events = self.describe_events()
if events:
events = events["StackEvents"]
events.reverse()
new_events = [
2022-02-22 10:04:29 +00:00
event
for event in events
2018-11-22 18:31:59 +00:00
if event["Timestamp"] > self.most_recent_event_datetime
]
for event in new_events:
2022-02-22 10:04:29 +00:00
logger.info(
" ".join(
[
self.region,
self.stackname,
event["LogicalResourceId"],
event["ResourceType"],
event["ResourceStatus"],
event.get("ResourceStatusReason", ""),
]
)
)
2018-11-22 18:31:59 +00:00
self.most_recent_event_datetime = event["Timestamp"]
2019-06-27 13:31:51 +00:00
# stackoutput inspection
def _inspect_stacks(self, conglomerate):
# Get all stacks of the conglomertate
running_stacks = self.connection_manager.call(
"cloudformation",
"describe_stacks",
2022-02-22 10:04:29 +00:00
profile=self.profile,
region=self.region,
)
2019-04-18 16:30:50 +00:00
2019-06-27 13:31:51 +00:00
stacks = []
2022-02-22 10:04:29 +00:00
for stack in running_stacks["Stacks"]:
for tag in stack["Tags"]:
if tag["Key"] == "Conglomerate" and tag["Value"] == conglomerate:
2019-06-27 13:31:51 +00:00
stacks.append(stack)
break
2021-02-12 11:06:43 +00:00
# Gather stack outputs, use Tag['Artifact'] as name space: Artifact.OutputName
2019-06-27 13:31:51 +00:00
stack_outputs = {}
for stack in stacks:
# If stack has an Artifact Tag put resources into the namespace Artifact.Resource
artifact = None
2022-02-22 10:04:29 +00:00
for tag in stack["Tags"]:
if tag["Key"] == "Artifact":
artifact = tag["Value"]
2019-06-27 13:31:51 +00:00
if artifact:
key_prefix = "{}.".format(artifact)
else:
key_prefix = ""
try:
2022-02-22 10:04:29 +00:00
for output in stack["Outputs"]:
2019-06-27 13:31:51 +00:00
# Gather all outputs of the stack into one dimensional key=value structure
2022-02-22 10:04:29 +00:00
stack_outputs[key_prefix + output["OutputKey"]] = output[
"OutputValue"
]
2019-06-27 13:31:51 +00:00
except KeyError:
pass
# Add outputs from stacks into the data for jinja under StackOutput
return stack_outputs
def _add_template_arg(self, kwargs):
if self.template_bucket_url:
# https://bucket-name.s3.Region.amazonaws.com/key name
# so we need the region, AWS as usual
2022-02-22 10:04:29 +00:00
(bucket, path) = get_s3_url(
self.template_bucket_url, self.rel_path, self.stackname + ".yaml"
)
bucket_region = self.connection_manager.call(
"s3",
"get_bucket_location",
{"Bucket": bucket},
profile=self.profile,
region=self.region,
)["LocationConstraint"]
# If bucket is in us-east-1 AWS returns 'none' cause reasons grrr
if not bucket_region:
2022-02-22 10:04:29 +00:00
bucket_region = "us-east-1"
2022-02-22 10:04:29 +00:00
kwargs["TemplateURL"] = "https://{}.s3.{}.amazonaws.com/{}".format(
bucket, bucket_region, path
)
else:
2022-02-22 10:04:29 +00:00
kwargs["TemplateBody"] = self.cfn_template
return kwargs
2021-09-20 14:19:14 +00:00
def _log_pulumi(self, text):
2022-02-22 10:04:29 +00:00
text = re.sub(
r"pulumi:pulumi:Stack\s*{}-{}\s*".format(
self.parameters["Conglomerate"], self.stackname
),
"",
text,
)
if text and not text.isspace():
2021-10-04 15:51:16 +00:00
logger.info(" ".join([self.region, self.stackname, text]))
def _get_pulumi_stack(self, create=False):
if create:
pulumi_stack = pulumi.automation.create_or_select_stack(
stack_name=self.pulumi_stackname,
project_name=self.parameters["Conglomerate"],
program=self._pulumi_code.pulumi_program,
opts=self.pulumi_ws_opts,
)
pulumi_stack.workspace.install_plugin(
"aws", pkg_resources.get_distribution("pulumi_aws").version
)
else:
pulumi_stack = pulumi.automation.select_stack(
stack_name=self.pulumi_stackname,
project_name=self.parameters["Conglomerate"],
program=self._pulumi_code.pulumi_program,
opts=self.pulumi_ws_opts,
)
return pulumi_stack
def _set_pulumi_args(self, kwargs={}):
kwargs["on_output"] = self._log_pulumi
kwargs["policy_packs"] = []
kwargs["policy_pack_configs"] = []
# Try to find policies in each artifact location
if "policies" in self.pulumi:
for policy in self.pulumi["policies"]:
found = False
for artifacts_path in self.ctx["artifact_paths"]:
path = "{}/pulumi/policies/{}".format(
artifacts_path.resolve(), policy
)
if os.path.exists(path):
kwargs["policy_packs"].append(path)
found = True
if not found:
logger.error(f"Could not find policy implementation for {policy}!")
raise FileNotFoundError
try:
kwargs["policy_pack_configs"] = self.pulumi["policy_configs"]
except KeyError:
pass
return kwargs