feat: New command , cleanup, allow region overwrite
ZeroDownTime/CloudBender/pipeline/head This commit looks good Details

This commit is contained in:
Stefan Reimer 2022-07-04 16:15:14 +02:00
parent 1e7665f2bb
commit 60bcc25f52
6 changed files with 145 additions and 89 deletions

View File

@ -23,34 +23,46 @@ logger = logging.getLogger(__name__)
"profile", "profile",
help="Use named AWS .config profile, overwrites any stack config", help="Use named AWS .config profile, overwrites any stack config",
) )
@click.option(
"--region",
"region",
help="Use region, overwrites any stack config",
)
@click.option("--dir", "directory", help="Specify cloudbender project directory.") @click.option("--dir", "directory", help="Specify cloudbender project directory.")
@click.option("--debug", is_flag=True, help="Turn on debug logging.") @click.option("--debug", is_flag=True, help="Turn on debug logging.")
@click.pass_context @click.pass_context
def cli(ctx, profile, debug, directory): def cli(ctx, profile, region, debug, directory):
setup_logging(debug) setup_logging(debug)
# Skip parsing all the things if we just want the versions # Skip parsing all the things if we just want the versions
if ctx.invoked_subcommand != "version": if ctx.invoked_subcommand == "version":
# Make sure our root is abs return
if directory:
if not os.path.isabs(directory):
directory = os.path.normpath(os.path.join(os.getcwd(), directory))
elif os.getenv("CLOUDBENDER_PROJECT_ROOT"):
directory = os.getenv("CLOUDBENDER_PROJECT_ROOT")
else:
directory = os.getcwd()
# Read global config # Make sure our root is abs
try: if directory:
cb = CloudBender(directory, profile) if not os.path.isabs(directory):
except InvalidProjectDir as e: directory = os.path.normpath(os.path.join(os.getcwd(), directory))
logger.error(e) elif os.getenv("CLOUDBENDER_PROJECT_ROOT"):
sys.exit(1) directory = os.getenv("CLOUDBENDER_PROJECT_ROOT")
else:
directory = os.getcwd()
# Read global config
try:
cb = CloudBender(directory, profile, region)
except InvalidProjectDir as e:
logger.error(e)
sys.exit(1)
# Only load stackgroups to get profile and region
if ctx.invoked_subcommand == "wrap":
cb.read_config(loadStacks=False)
else:
cb.read_config() cb.read_config()
cb.dump_config()
ctx.obj = cb cb.dump_config()
ctx.obj = cb
@click.command() @click.command()
@ -193,7 +205,9 @@ def execute(cb, stack_name, function, args):
s.execute(function, args) s.execute(function, args)
else: else:
logger.info( logger.info(
"{} uses Cloudformation, no exec feature available.".format(s.stackname) "{} uses Cloudformation, no execute feature available.".format(
s.stackname
)
) )
@ -311,6 +325,17 @@ def delete(cb, stack_names, multi):
future.result() future.result()
@click.command()
@click.argument("stack_group", nargs=1, required=True)
@click.argument("cmd", nargs=-1, required=True)
@click.pass_obj
def wrap(cb, stack_group, cmd):
"""Execute custom external program"""
sg = cb.sg.get_stackgroup(stack_group)
cb.wrap(sg, " ".join(cmd))
@click.command() @click.command()
@click.pass_obj @click.pass_obj
def clean(cb): def clean(cb):
@ -458,6 +483,7 @@ cli.add_command(get_config)
cli.add_command(export) cli.add_command(export)
cli.add_command(assimilate) cli.add_command(assimilate)
cli.add_command(execute) cli.add_command(execute)
cli.add_command(wrap)
if __name__ == "__main__": if __name__ == "__main__":
cli(obj={}) cli(obj={})

View File

@ -9,18 +9,18 @@ import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
sessions = {}
clients = {}
class BotoConnection: class BotoConnection:
_sessions = {}
_clients = {}
def __init__(self, profile=None, region=None): def __init__(self, profile=None, region=None):
self.region = region self.region = region
self.profile = profile self.profile = profile
def _get_session(self, profile=None, region=None): def _get_session(self, profile=None, region=None):
if self._sessions.get((profile, region)): if sessions.get((profile, region)):
return self._sessions[(profile, region)] return sessions[(profile, region)]
# Construct botocore session with cache # Construct botocore session with cache
# Setup boto to cache STS tokens for MFA # Setup boto to cache STS tokens for MFA
@ -37,22 +37,22 @@ class BotoConnection:
"assume-role" "assume-role"
).cache = credentials.JSONFileCache(cli_cache) ).cache = credentials.JSONFileCache(cli_cache)
self._sessions[(profile, region)] = session sessions[(profile, region)] = session
return session return session
def _get_client(self, service, profile=None, region=None): def _get_client(self, service, profile=None, region=None):
if self._clients.get((profile, region, service)): if clients.get((profile, region, service)):
logger.debug( logger.debug(
"Reusing boto session for {} {} {}".format(profile, region, service) "Reusing boto session for {} {} {}".format(profile, region, service)
) )
return self._clients[(profile, region, service)] return clients[(profile, region, service)]
session = self._get_session(profile, region) session = self._get_session(profile, region)
client = boto3.Session(botocore_session=session).client(service) client = boto3.Session(botocore_session=session).client(service)
logger.debug("New boto session for {} {} {}".format(profile, region, service)) logger.debug("New boto session for {} {} {}".format(profile, region, service))
self._clients[(profile, region, service)] = client clients[(profile, region, service)] = client
return client return client
def call(self, service, command, kwargs={}, profile=None, region=None): def call(self, service, command, kwargs={}, profile=None, region=None):
@ -73,3 +73,20 @@ class BotoConnection:
pass pass
else: else:
raise e raise e
def exportProfileEnv(self):
"""
Set AWS os.env variables based on our connection profile to allow external programs use
same profile, region. Eg. Pulumi or Steampipe
"""
credentials = self._get_session(self.profile, self.region).get_credentials()
if credentials.token:
os.environ["AWS_SESSION_TOKEN"] = credentials.token
os.environ["AWS_ACCESS_KEY_ID"] = credentials.access_key
os.environ["AWS_SECRET_ACCESS_KEY"] = credentials.secret_key
if self.region and self.region != "global":
os.environ["AWS_DEFAULT_REGION"] = self.region

View File

@ -1,7 +1,10 @@
import os
import pathlib import pathlib
import logging import logging
import pexpect
from .stackgroup import StackGroup from .stackgroup import StackGroup
from .connection import BotoConnection
from .jinja import read_config_file from .jinja import read_config_file
from .exceptions import InvalidProjectDir from .exceptions import InvalidProjectDir
@ -11,7 +14,7 @@ logger = logging.getLogger(__name__)
class CloudBender(object): class CloudBender(object):
"""Config Class to handle recursive conf/* config tree""" """Config Class to handle recursive conf/* config tree"""
def __init__(self, root_path, profile): def __init__(self, root_path, profile, region):
self.root = pathlib.Path(root_path) self.root = pathlib.Path(root_path)
self.sg = None self.sg = None
self.all_stacks = [] self.all_stacks = []
@ -23,11 +26,15 @@ class CloudBender(object):
"outputs_path": self.root.joinpath("outputs"), "outputs_path": self.root.joinpath("outputs"),
"artifact_paths": [self.root.joinpath("artifacts")], "artifact_paths": [self.root.joinpath("artifacts")],
"profile": profile, "profile": profile,
"region": region,
} }
if profile: if profile:
logger.info("Profile overwrite: using {}".format(self.ctx["profile"])) logger.info("Profile overwrite: using {}".format(self.ctx["profile"]))
if region:
logger.info("Region overwrite: using {}".format(self.ctx["region"]))
if not self.ctx["config_path"].is_dir(): if not self.ctx["config_path"].is_dir():
raise InvalidProjectDir( raise InvalidProjectDir(
"Check '{0}' exists and is a valid CloudBender project folder.".format( "Check '{0}' exists and is a valid CloudBender project folder.".format(
@ -35,7 +42,7 @@ class CloudBender(object):
) )
) )
def read_config(self): def read_config(self, loadStacks=True):
"""Load the <path>/config.yaml, <path>/*.yaml as stacks, sub-folders are sub-groups""" """Load the <path>/config.yaml, <path>/*.yaml as stacks, sub-folders are sub-groups"""
# Read top level config.yaml and extract CloudBender CTX # Read top level config.yaml and extract CloudBender CTX
@ -73,7 +80,7 @@ class CloudBender(object):
self.ctx[k] = self.root.joinpath(v) self.ctx[k] = self.root.joinpath(v)
self.sg = StackGroup(self.ctx["config_path"], self.ctx) self.sg = StackGroup(self.ctx["config_path"], self.ctx)
self.sg.read_config() self.sg.read_config(loadStacks=loadStacks)
self.all_stacks = self.sg.get_stacks() self.all_stacks = self.sg.get_stacks()
@ -127,3 +134,17 @@ class CloudBender(object):
matching_stacks.append(s) matching_stacks.append(s)
return matching_stacks return matching_stacks
def wrap(self, stack_group, cmd):
"""
Set AWS environment based on profile before executing a custom command, eg. steampipe
"""
profile = stack_group.config.get("profile", "default")
region = stack_group.config.get("region", "global")
connection_manager = BotoConnection(profile, region)
connection_manager.exportProfileEnv()
child = pexpect.spawn(cmd)
child.interact()

View File

@ -34,7 +34,7 @@ def get_pulumi_version():
def resolve_outputs(outputs): def resolve_outputs(outputs):
my_outputs = {} my_outputs = {}
for k,v in outputs.items(): for k, v in outputs.items():
if type(v) == pulumi.automation._output.OutputValue: if type(v) == pulumi.automation._output.OutputValue:
if v.secret: if v.secret:
my_outputs[k] = "***" my_outputs[k] = "***"
@ -45,6 +45,7 @@ def resolve_outputs(outputs):
return my_outputs return my_outputs
def pulumi_ws(func): def pulumi_ws(func):
@wraps(func) @wraps(func)
def decorated(self, *args, **kwargs): def decorated(self, *args, **kwargs):
@ -114,28 +115,7 @@ def pulumi_ws(func):
# Ugly hack as Pulumi currently doesnt support MFA_TOKENs during role assumptions # Ugly hack as Pulumi currently doesnt support MFA_TOKENs during role assumptions
# Do NOT set them via 'aws:secretKey' as they end up in the self.json in plain text !!! # Do NOT set them via 'aws:secretKey' as they end up in the self.json in plain text !!!
if ( self.connection_manager.exportProfileEnv()
self.connection_manager._sessions[(self.profile, self.region)]
.get_credentials()
.token
):
os.environ["AWS_SESSION_TOKEN"] = (
self.connection_manager._sessions[(self.profile, self.region)]
.get_credentials()
.token
)
os.environ["AWS_ACCESS_KEY_ID"] = (
self.connection_manager._sessions[(self.profile, self.region)]
.get_credentials()
.access_key
)
os.environ["AWS_SECRET_ACCESS_KEY"] = (
self.connection_manager._sessions[(self.profile, self.region)]
.get_credentials()
.secret_key
)
os.environ["AWS_DEFAULT_REGION"] = self.region
# Secrets provider # Secrets provider
if "secretsProvider" in self.pulumi: if "secretsProvider" in self.pulumi:

View File

@ -60,7 +60,6 @@ class Stack(object):
self.onfailure = "DELETE" self.onfailure = "DELETE"
self.notfication_sns = [] self.notfication_sns = []
self.id = (self.profile, self.region, self.stackname)
self.aws_stackid = None self.aws_stackid = None
self.md5 = None self.md5 = None
@ -69,7 +68,7 @@ class Stack(object):
self.cfn_template = None self.cfn_template = None
self.cfn_parameters = [] self.cfn_parameters = []
self.cfn_data = None self.cfn_data = None
self.connection_manager = BotoConnection(self.profile, self.region) self.connection_manager = None
self.status = None self.status = None
self.store_outputs = False self.store_outputs = False
self.dependencies = set() self.dependencies = set()
@ -107,7 +106,10 @@ class Stack(object):
if p in sg_config: if p in sg_config:
setattr(self, p, sg_config[p]) setattr(self, p, sg_config[p])
# profile needs special treatment due to cmd line overwrite option # profile and region need special treatment due to cmd line overwrite option
if self.ctx["region"]:
self.region = self.ctx["region"]
if self.ctx["profile"]: if self.ctx["profile"]:
self.profile = self.ctx["profile"] self.profile = self.ctx["profile"]
else: else:
@ -159,6 +161,9 @@ class Stack(object):
"onfailure must be one of DO_NOTHING | ROLLBACK | DELETE" "onfailure must be one of DO_NOTHING | ROLLBACK | DELETE"
) )
self.id = (self.profile, self.region, self.stackname)
self.connection_manager = BotoConnection(self.profile, self.region)
logger.debug("Stack {} added.".format(self.id)) logger.debug("Stack {} added.".format(self.id))
def render(self): def render(self):
@ -583,13 +588,15 @@ class Stack(object):
if self.mode == "pulumi": if self.mode == "pulumi":
try: try:
pulumi_stack = self._get_pulumi_stack() pulumi_stack = self._get_pulumi_stack()
outputs=pulumi_stack.outputs() outputs = pulumi_stack.outputs()
except pulumi.automation.errors.StackNotFoundError: except pulumi.automation.errors.StackNotFoundError:
outputs = {} outputs = {}
pass pass
if vars(self._pulumi_code)["__doc__"]: if vars(self._pulumi_code)["__doc__"]:
output= render_docs(vars(self._pulumi_code)["__doc__"], resolve_outputs(outputs)) output = render_docs(
vars(self._pulumi_code)["__doc__"], resolve_outputs(outputs)
)
else: else:
output = "No template documentation found." output = "No template documentation found."
@ -907,7 +914,7 @@ class Stack(object):
""" """
Executes custom Python function within a Pulumi stack Executes custom Python function within a Pulumi stack
These functions are executed within the stack environment and are provided with all stack input parameters as well as current outputs. These plugin functions are executed within the stack environment and are provided with all stack input parameters as well as current outputs.
Think of "docker exec" into an existing container... Think of "docker exec" into an existing container...
""" """

View File

@ -32,7 +32,7 @@ class StackGroup(object):
for s in self.stacks: for s in self.stacks:
s.dump_config() s.dump_config()
def read_config(self, parent_config={}): def read_config(self, parent_config={}, loadStacks=True):
if not self.path.is_dir(): if not self.path.is_dir():
return None return None
@ -51,30 +51,40 @@ class StackGroup(object):
self.config = dict_merge(parent_config, _config) self.config = dict_merge(parent_config, _config)
stackname_prefix = self.config.get("stacknameprefix", "") stackname_prefix = self.config.get("stacknameprefix", "")
# profile and region need special treatment due to cmd line overwrite option
if self.ctx["region"]:
self.config["region"] = self.ctx["region"]
if self.ctx["profile"]:
self.config["profile"] = self.ctx["profile"]
logger.debug("StackGroup {} added.".format(self.name)) logger.debug("StackGroup {} added.".format(self.name))
# Add stacks # Add stacks
stacks = [s for s in self.path.glob("*.yaml") if not s.name == "config.yaml"] if loadStacks:
for stack_path in stacks: stacks = [
stackname = stack_path.name.split(".")[0] s for s in self.path.glob("*.yaml") if not s.name == "config.yaml"
template = stackname ]
if stackname_prefix: for stack_path in stacks:
stackname = stackname_prefix + stackname stackname = stack_path.name.split(".")[0]
template = stackname
if stackname_prefix:
stackname = stackname_prefix + stackname
new_stack = Stack( new_stack = Stack(
name=stackname, name=stackname,
template=template, template=template,
path=stack_path, path=stack_path,
rel_path=str(self.rel_path), rel_path=str(self.rel_path),
ctx=self.ctx, ctx=self.ctx,
) )
new_stack.read_config(self.config) new_stack.read_config(self.config)
self.stacks.append(new_stack) self.stacks.append(new_stack)
# Create StackGroups recursively # Create StackGroups recursively
for sub_group in [s for s in self.path.iterdir() if s.is_dir()]: for sub_group in [s for s in self.path.iterdir() if s.is_dir()]:
sg = StackGroup(sub_group, self.ctx) sg = StackGroup(sub_group, self.ctx)
sg.read_config(self.config) sg.read_config(self.config, loadStacks=loadStacks)
self.sgs.append(sg) self.sgs.append(sg)
@ -108,25 +118,20 @@ class StackGroup(object):
return stacks return stacks
def get_stackgroup(self, name=None, recursive=True, match_by="name"): def get_stackgroup(self, name=None, match_by="path"):
"""Returns stack group matching stackgroup_name or all if None""" """Returns stack group matching stackgroup_name or all if None"""
if ( if self.path.match(name):
not name
or (self.name == name and match_by == "name")
or (self.path.match(name) and match_by == "path")
):
logger.debug("Found stack_group {}".format(self.name)) logger.debug("Found stack_group {}".format(self.name))
return self return self
if name and self.name != "config": if name and name != "config":
logger.debug( logger.debug(
"Looking for stack_group {} in group {}".format(name, self.name) "Looking for stack_group {} in group {}".format(name, self.name)
) )
if recursive: for sg in self.sgs:
for sg in self.sgs: s = sg.get_stackgroup(name, match_by)
s = sg.get_stackgroup(name, recursive, match_by) if s:
if s: return s
return s
return None return None