feat: New command , cleanup, allow region overwrite
ZeroDownTime/CloudBender/pipeline/head This commit looks good Details

This commit is contained in:
Stefan Reimer 2022-07-04 16:15:14 +02:00
parent 1e7665f2bb
commit 60bcc25f52
6 changed files with 145 additions and 89 deletions

View File

@ -23,34 +23,46 @@ logger = logging.getLogger(__name__)
"profile",
help="Use named AWS .config profile, overwrites any stack config",
)
@click.option(
"--region",
"region",
help="Use region, overwrites any stack config",
)
@click.option("--dir", "directory", help="Specify cloudbender project directory.")
@click.option("--debug", is_flag=True, help="Turn on debug logging.")
@click.pass_context
def cli(ctx, profile, debug, directory):
def cli(ctx, profile, region, debug, directory):
setup_logging(debug)
# Skip parsing all the things if we just want the versions
if ctx.invoked_subcommand != "version":
# Make sure our root is abs
if directory:
if not os.path.isabs(directory):
directory = os.path.normpath(os.path.join(os.getcwd(), directory))
elif os.getenv("CLOUDBENDER_PROJECT_ROOT"):
directory = os.getenv("CLOUDBENDER_PROJECT_ROOT")
else:
directory = os.getcwd()
if ctx.invoked_subcommand == "version":
return
# Read global config
try:
cb = CloudBender(directory, profile)
except InvalidProjectDir as e:
logger.error(e)
sys.exit(1)
# Make sure our root is abs
if directory:
if not os.path.isabs(directory):
directory = os.path.normpath(os.path.join(os.getcwd(), directory))
elif os.getenv("CLOUDBENDER_PROJECT_ROOT"):
directory = os.getenv("CLOUDBENDER_PROJECT_ROOT")
else:
directory = os.getcwd()
# Read global config
try:
cb = CloudBender(directory, profile, region)
except InvalidProjectDir as e:
logger.error(e)
sys.exit(1)
# Only load stackgroups to get profile and region
if ctx.invoked_subcommand == "wrap":
cb.read_config(loadStacks=False)
else:
cb.read_config()
cb.dump_config()
ctx.obj = cb
cb.dump_config()
ctx.obj = cb
@click.command()
@ -193,7 +205,9 @@ def execute(cb, stack_name, function, args):
s.execute(function, args)
else:
logger.info(
"{} uses Cloudformation, no exec feature available.".format(s.stackname)
"{} uses Cloudformation, no execute feature available.".format(
s.stackname
)
)
@ -311,6 +325,17 @@ def delete(cb, stack_names, multi):
future.result()
@click.command()
@click.argument("stack_group", nargs=1, required=True)
@click.argument("cmd", nargs=-1, required=True)
@click.pass_obj
def wrap(cb, stack_group, cmd):
"""Execute custom external program"""
sg = cb.sg.get_stackgroup(stack_group)
cb.wrap(sg, " ".join(cmd))
@click.command()
@click.pass_obj
def clean(cb):
@ -458,6 +483,7 @@ cli.add_command(get_config)
cli.add_command(export)
cli.add_command(assimilate)
cli.add_command(execute)
cli.add_command(wrap)
if __name__ == "__main__":
cli(obj={})

View File

@ -9,18 +9,18 @@ import logging
logger = logging.getLogger(__name__)
sessions = {}
clients = {}
class BotoConnection:
_sessions = {}
_clients = {}
def __init__(self, profile=None, region=None):
self.region = region
self.profile = profile
def _get_session(self, profile=None, region=None):
if self._sessions.get((profile, region)):
return self._sessions[(profile, region)]
if sessions.get((profile, region)):
return sessions[(profile, region)]
# Construct botocore session with cache
# Setup boto to cache STS tokens for MFA
@ -37,22 +37,22 @@ class BotoConnection:
"assume-role"
).cache = credentials.JSONFileCache(cli_cache)
self._sessions[(profile, region)] = session
sessions[(profile, region)] = session
return session
def _get_client(self, service, profile=None, region=None):
if self._clients.get((profile, region, service)):
if clients.get((profile, region, service)):
logger.debug(
"Reusing boto session for {} {} {}".format(profile, region, service)
)
return self._clients[(profile, region, service)]
return clients[(profile, region, service)]
session = self._get_session(profile, region)
client = boto3.Session(botocore_session=session).client(service)
logger.debug("New boto session for {} {} {}".format(profile, region, service))
self._clients[(profile, region, service)] = client
clients[(profile, region, service)] = client
return client
def call(self, service, command, kwargs={}, profile=None, region=None):
@ -73,3 +73,20 @@ class BotoConnection:
pass
else:
raise e
def exportProfileEnv(self):
"""
Set AWS os.env variables based on our connection profile to allow external programs use
same profile, region. Eg. Pulumi or Steampipe
"""
credentials = self._get_session(self.profile, self.region).get_credentials()
if credentials.token:
os.environ["AWS_SESSION_TOKEN"] = credentials.token
os.environ["AWS_ACCESS_KEY_ID"] = credentials.access_key
os.environ["AWS_SECRET_ACCESS_KEY"] = credentials.secret_key
if self.region and self.region != "global":
os.environ["AWS_DEFAULT_REGION"] = self.region

View File

@ -1,7 +1,10 @@
import os
import pathlib
import logging
import pexpect
from .stackgroup import StackGroup
from .connection import BotoConnection
from .jinja import read_config_file
from .exceptions import InvalidProjectDir
@ -11,7 +14,7 @@ logger = logging.getLogger(__name__)
class CloudBender(object):
"""Config Class to handle recursive conf/* config tree"""
def __init__(self, root_path, profile):
def __init__(self, root_path, profile, region):
self.root = pathlib.Path(root_path)
self.sg = None
self.all_stacks = []
@ -23,11 +26,15 @@ class CloudBender(object):
"outputs_path": self.root.joinpath("outputs"),
"artifact_paths": [self.root.joinpath("artifacts")],
"profile": profile,
"region": region,
}
if profile:
logger.info("Profile overwrite: using {}".format(self.ctx["profile"]))
if region:
logger.info("Region overwrite: using {}".format(self.ctx["region"]))
if not self.ctx["config_path"].is_dir():
raise InvalidProjectDir(
"Check '{0}' exists and is a valid CloudBender project folder.".format(
@ -35,7 +42,7 @@ class CloudBender(object):
)
)
def read_config(self):
def read_config(self, loadStacks=True):
"""Load the <path>/config.yaml, <path>/*.yaml as stacks, sub-folders are sub-groups"""
# Read top level config.yaml and extract CloudBender CTX
@ -73,7 +80,7 @@ class CloudBender(object):
self.ctx[k] = self.root.joinpath(v)
self.sg = StackGroup(self.ctx["config_path"], self.ctx)
self.sg.read_config()
self.sg.read_config(loadStacks=loadStacks)
self.all_stacks = self.sg.get_stacks()
@ -127,3 +134,17 @@ class CloudBender(object):
matching_stacks.append(s)
return matching_stacks
def wrap(self, stack_group, cmd):
"""
Set AWS environment based on profile before executing a custom command, eg. steampipe
"""
profile = stack_group.config.get("profile", "default")
region = stack_group.config.get("region", "global")
connection_manager = BotoConnection(profile, region)
connection_manager.exportProfileEnv()
child = pexpect.spawn(cmd)
child.interact()

View File

@ -34,7 +34,7 @@ def get_pulumi_version():
def resolve_outputs(outputs):
my_outputs = {}
for k,v in outputs.items():
for k, v in outputs.items():
if type(v) == pulumi.automation._output.OutputValue:
if v.secret:
my_outputs[k] = "***"
@ -45,6 +45,7 @@ def resolve_outputs(outputs):
return my_outputs
def pulumi_ws(func):
@wraps(func)
def decorated(self, *args, **kwargs):
@ -114,28 +115,7 @@ def pulumi_ws(func):
# Ugly hack as Pulumi currently doesnt support MFA_TOKENs during role assumptions
# Do NOT set them via 'aws:secretKey' as they end up in the self.json in plain text !!!
if (
self.connection_manager._sessions[(self.profile, self.region)]
.get_credentials()
.token
):
os.environ["AWS_SESSION_TOKEN"] = (
self.connection_manager._sessions[(self.profile, self.region)]
.get_credentials()
.token
)
os.environ["AWS_ACCESS_KEY_ID"] = (
self.connection_manager._sessions[(self.profile, self.region)]
.get_credentials()
.access_key
)
os.environ["AWS_SECRET_ACCESS_KEY"] = (
self.connection_manager._sessions[(self.profile, self.region)]
.get_credentials()
.secret_key
)
os.environ["AWS_DEFAULT_REGION"] = self.region
self.connection_manager.exportProfileEnv()
# Secrets provider
if "secretsProvider" in self.pulumi:

View File

@ -60,7 +60,6 @@ class Stack(object):
self.onfailure = "DELETE"
self.notfication_sns = []
self.id = (self.profile, self.region, self.stackname)
self.aws_stackid = None
self.md5 = None
@ -69,7 +68,7 @@ class Stack(object):
self.cfn_template = None
self.cfn_parameters = []
self.cfn_data = None
self.connection_manager = BotoConnection(self.profile, self.region)
self.connection_manager = None
self.status = None
self.store_outputs = False
self.dependencies = set()
@ -107,7 +106,10 @@ class Stack(object):
if p in sg_config:
setattr(self, p, sg_config[p])
# profile needs special treatment due to cmd line overwrite option
# profile and region need special treatment due to cmd line overwrite option
if self.ctx["region"]:
self.region = self.ctx["region"]
if self.ctx["profile"]:
self.profile = self.ctx["profile"]
else:
@ -159,6 +161,9 @@ class Stack(object):
"onfailure must be one of DO_NOTHING | ROLLBACK | DELETE"
)
self.id = (self.profile, self.region, self.stackname)
self.connection_manager = BotoConnection(self.profile, self.region)
logger.debug("Stack {} added.".format(self.id))
def render(self):
@ -583,13 +588,15 @@ class Stack(object):
if self.mode == "pulumi":
try:
pulumi_stack = self._get_pulumi_stack()
outputs=pulumi_stack.outputs()
outputs = pulumi_stack.outputs()
except pulumi.automation.errors.StackNotFoundError:
outputs = {}
pass
if vars(self._pulumi_code)["__doc__"]:
output= render_docs(vars(self._pulumi_code)["__doc__"], resolve_outputs(outputs))
output = render_docs(
vars(self._pulumi_code)["__doc__"], resolve_outputs(outputs)
)
else:
output = "No template documentation found."
@ -907,7 +914,7 @@ class Stack(object):
"""
Executes custom Python function within a Pulumi stack
These functions are executed within the stack environment and are provided with all stack input parameters as well as current outputs.
These plugin functions are executed within the stack environment and are provided with all stack input parameters as well as current outputs.
Think of "docker exec" into an existing container...
"""

View File

@ -32,7 +32,7 @@ class StackGroup(object):
for s in self.stacks:
s.dump_config()
def read_config(self, parent_config={}):
def read_config(self, parent_config={}, loadStacks=True):
if not self.path.is_dir():
return None
@ -51,30 +51,40 @@ class StackGroup(object):
self.config = dict_merge(parent_config, _config)
stackname_prefix = self.config.get("stacknameprefix", "")
# profile and region need special treatment due to cmd line overwrite option
if self.ctx["region"]:
self.config["region"] = self.ctx["region"]
if self.ctx["profile"]:
self.config["profile"] = self.ctx["profile"]
logger.debug("StackGroup {} added.".format(self.name))
# Add stacks
stacks = [s for s in self.path.glob("*.yaml") if not s.name == "config.yaml"]
for stack_path in stacks:
stackname = stack_path.name.split(".")[0]
template = stackname
if stackname_prefix:
stackname = stackname_prefix + stackname
if loadStacks:
stacks = [
s for s in self.path.glob("*.yaml") if not s.name == "config.yaml"
]
for stack_path in stacks:
stackname = stack_path.name.split(".")[0]
template = stackname
if stackname_prefix:
stackname = stackname_prefix + stackname
new_stack = Stack(
name=stackname,
template=template,
path=stack_path,
rel_path=str(self.rel_path),
ctx=self.ctx,
)
new_stack.read_config(self.config)
self.stacks.append(new_stack)
new_stack = Stack(
name=stackname,
template=template,
path=stack_path,
rel_path=str(self.rel_path),
ctx=self.ctx,
)
new_stack.read_config(self.config)
self.stacks.append(new_stack)
# Create StackGroups recursively
for sub_group in [s for s in self.path.iterdir() if s.is_dir()]:
sg = StackGroup(sub_group, self.ctx)
sg.read_config(self.config)
sg.read_config(self.config, loadStacks=loadStacks)
self.sgs.append(sg)
@ -108,25 +118,20 @@ class StackGroup(object):
return stacks
def get_stackgroup(self, name=None, recursive=True, match_by="name"):
def get_stackgroup(self, name=None, match_by="path"):
"""Returns stack group matching stackgroup_name or all if None"""
if (
not name
or (self.name == name and match_by == "name")
or (self.path.match(name) and match_by == "path")
):
if self.path.match(name):
logger.debug("Found stack_group {}".format(self.name))
return self
if name and self.name != "config":
if name and name != "config":
logger.debug(
"Looking for stack_group {} in group {}".format(name, self.name)
)
if recursive:
for sg in self.sgs:
s = sg.get_stackgroup(name, recursive, match_by)
if s:
return s
for sg in self.sgs:
s = sg.get_stackgroup(name, match_by)
if s:
return s
return None