Compare commits
11 Commits
Author | SHA1 | Date | |
---|---|---|---|
4768de1984 | |||
b774297ddb | |||
a3ddae8ca7 | |||
3d61e7b57c | |||
096e244171 | |||
ee27ba1774 | |||
2314e8a57b | |||
b07da4a40c | |||
249afa7cb9 | |||
81add9de29 | |||
a245d88f8c |
@ -14,7 +14,7 @@ include .ci/podman.mk
|
|||||||
|
|
||||||
Add subtree to your project:
|
Add subtree to your project:
|
||||||
```
|
```
|
||||||
git subtree add --prefix .ci https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git main --squash
|
git subtree add --prefix .ci https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,13 +1,3 @@
|
|||||||
SHELL := bash
|
|
||||||
.SHELLFLAGS := -eu -o pipefail -c
|
|
||||||
.DELETE_ON_ERROR:
|
|
||||||
.SILENT: ; # no need for @
|
|
||||||
.ONESHELL: ; # recipes execute in same shell
|
|
||||||
.NOTPARALLEL: ; # wait for this target to finish
|
|
||||||
.EXPORT_ALL_VARIABLES: ; # send all vars to shell
|
|
||||||
.PHONY: all # All targets are accessible for user
|
|
||||||
.DEFAULT: help # Running Make will run the help target
|
|
||||||
|
|
||||||
# Parse version from latest git semver tag
|
# Parse version from latest git semver tag
|
||||||
GIT_TAG ?= $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
|
GIT_TAG ?= $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
|
||||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||||
@ -33,6 +23,13 @@ ifneq ($(TRIVY_REMOTE),)
|
|||||||
TRIVY_OPTS ::= --server $(TRIVY_REMOTE)
|
TRIVY_OPTS ::= --server $(TRIVY_REMOTE)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
.SILENT: ; # no need for @
|
||||||
|
.ONESHELL: ; # recipes execute in same shell
|
||||||
|
.NOTPARALLEL: ; # wait for this target to finish
|
||||||
|
.EXPORT_ALL_VARIABLES: ; # send all vars to shell
|
||||||
|
.PHONY: all # All targets are accessible for user
|
||||||
|
.DEFAULT: help # Running Make will run the help target
|
||||||
|
|
||||||
help: ## Show Help
|
help: ## Show Help
|
||||||
grep -E '^[a-zA-Z_-]+:.*?## .*$$' .ci/podman.mk | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
grep -E '^[a-zA-Z_-]+:.*?## .*$$' .ci/podman.mk | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||||
|
|
||||||
@ -43,7 +40,7 @@ fmt:: ## auto format source
|
|||||||
lint:: ## Lint source
|
lint:: ## Lint source
|
||||||
|
|
||||||
build: ## Build the app
|
build: ## Build the app
|
||||||
podman build --rm --layers -t $(IMAGE):$(TAG)-$(_ARCH) --build-arg TAG=$(TAG) --build-arg ARCH=$(_ARCH) --platform linux/$(_ARCH) .
|
buildah build --rm --layers -t $(IMAGE):$(TAG)-$(_ARCH) --build-arg TAG=$(TAG) --build-arg ARCH=$(_ARCH) --platform linux/$(_ARCH) .
|
||||||
|
|
||||||
test:: ## test built artificats
|
test:: ## test built artificats
|
||||||
|
|
||||||
@ -54,17 +51,16 @@ scan: ## Scan image using trivy
|
|||||||
# first tag and push all actual images
|
# first tag and push all actual images
|
||||||
# create new manifest for each tag and add all available TAG-ARCH before pushing
|
# create new manifest for each tag and add all available TAG-ARCH before pushing
|
||||||
push: ecr-login ## push images to registry
|
push: ecr-login ## push images to registry
|
||||||
for t in $(TAG) latest $(EXTRA_TAGS); do
|
for t in $(TAG) latest $(EXTRA_TAGS); do \
|
||||||
echo "Tagging image with $(REGISTRY)/$(IMAGE):$${t}-$(ARCH)"
|
echo "Tagging image with $(REGISTRY)/$(IMAGE):$${t}-$(ARCH)"
|
||||||
podman tag $(IMAGE):$(TAG)-$(_ARCH) $(REGISTRY)/$(IMAGE):$${t}-$(_ARCH)
|
buildah tag $(IMAGE):$(TAG)-$(_ARCH) $(REGISTRY)/$(IMAGE):$${t}-$(_ARCH); \
|
||||||
podman manifest rm $(IMAGE):$$t || true
|
buildah manifest rm $(IMAGE):$$t || true; \
|
||||||
podman manifest create $(IMAGE):$$t
|
buildah manifest create $(IMAGE):$$t; \
|
||||||
for a in $(ALL_ARCHS); do
|
for a in $(ALL_ARCHS); do \
|
||||||
podman image exists $(REGISTRY)/$(IMAGE):$$t-$$a && \
|
buildah manifest add $(IMAGE):$$t $(REGISTRY)/$(IMAGE):$(TAG)-$$a; \
|
||||||
podman manifest add $(IMAGE):$$t containers-storage:$(REGISTRY)/$(IMAGE):$$t-$$a
|
done; \
|
||||||
done
|
|
||||||
echo "Pushing manifest $(IMAGE):$$t"
|
echo "Pushing manifest $(IMAGE):$$t"
|
||||||
podman manifest push --all $(IMAGE):$$t docker://$(REGISTRY)/$(IMAGE):$$t
|
buildah manifest push --all $(IMAGE):$$t docker://$(REGISTRY)/$(IMAGE):$$t; \
|
||||||
done
|
done
|
||||||
|
|
||||||
ecr-login: ## log into AWS ECR public
|
ecr-login: ## log into AWS ECR public
|
||||||
@ -77,15 +73,12 @@ rm-remote-untagged: ## delete all remote untagged and in-dev images, keep 10 tag
|
|||||||
clean:: ## clean up source folder
|
clean:: ## clean up source folder
|
||||||
|
|
||||||
rm-image:
|
rm-image:
|
||||||
for t in $(TAG) latest $(EXTRA_TAGS); do
|
test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH))" || podman image rm -f $(IMAGE):$(TAG)-$(_ARCH) > /dev/null
|
||||||
for a in $(ALL_ARCHS); do
|
test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH))" || echo "Error: Removing image failed"
|
||||||
podman image exists $(IMAGE):$$t-$$a && podman image rm -f $(IMAGE):$$t-$$a || true
|
|
||||||
done
|
|
||||||
done
|
|
||||||
|
|
||||||
## some useful tasks during development
|
## some useful tasks during development
|
||||||
ci-pull-upstream: ## pull latest shared .ci subtree
|
ci-pull-upstream: ## pull latest shared .ci subtree
|
||||||
git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git main --squash -m "Merge latest ci-tools-lib"
|
git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash -m "Merge latest ci-tools-lib"
|
||||||
|
|
||||||
create-repo: ## create new AWS ECR public repository
|
create-repo: ## create new AWS ECR public repository
|
||||||
aws ecr-public create-repository --repository-name $(IMAGE) --region $(REGION)
|
aws ecr-public create-repository --repository-name $(IMAGE) --region $(REGION)
|
||||||
|
@ -1,8 +1,7 @@
|
|||||||
secrets:
|
secrets:
|
||||||
- id: private-key
|
|
||||||
paths:
|
|
||||||
- "**/pulumi_aws/glue/connection.py"
|
|
||||||
- id: gcp-service-account
|
- id: gcp-service-account
|
||||||
paths:
|
paths:
|
||||||
- "**/pulumi_aws/glue/connection.py"
|
- "/venv/lib/python*/site-packages/pulumi_aws/glue/connection.py"
|
||||||
|
- id: private-key
|
||||||
|
paths:
|
||||||
|
- "/venv/lib/python*/site-packages/pulumi_aws/glue/connection.py"
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
ARG RUNTIME_VERSION="3.12"
|
ARG RUNTIME_VERSION="3.11"
|
||||||
ARG DISTRO_VERSION="3.20"
|
ARG DISTRO_VERSION="3.20"
|
||||||
|
|
||||||
FROM python:${RUNTIME_VERSION}-alpine${DISTRO_VERSION} AS builder
|
FROM python:${RUNTIME_VERSION}-alpine${DISTRO_VERSION} AS builder
|
||||||
ARG RUNTIME_VERSION="3.12"
|
ARG RUNTIME_VERSION="3.11"
|
||||||
|
|
||||||
RUN apk add --no-cache \
|
RUN apk add --no-cache \
|
||||||
autoconf \
|
autoconf \
|
||||||
@ -33,9 +33,6 @@ RUN curl -fsSL https://get.pulumi.com/ | sh -s -- --version $(pip show pulumi --
|
|||||||
# minimal pulumi
|
# minimal pulumi
|
||||||
RUN cd /root/.pulumi/bin && rm -f *dotnet *yaml *go *java && strip pulumi* || true
|
RUN cd /root/.pulumi/bin && rm -f *dotnet *yaml *go *java && strip pulumi* || true
|
||||||
|
|
||||||
# Remove AWS keys from docstring to prevent trivy alerts later
|
|
||||||
RUN sed -i -e 's/AKIA.*//' /venv/lib/python${RUNTIME_VERSION}/site-packages/pulumi_aws/lightsail/bucket_access_key.py
|
|
||||||
|
|
||||||
# Now build the final runtime, incl. running rootless containers
|
# Now build the final runtime, incl. running rootless containers
|
||||||
FROM python:${RUNTIME_VERSION}-alpine${DISTRO_VERSION}
|
FROM python:${RUNTIME_VERSION}-alpine${DISTRO_VERSION}
|
||||||
|
|
||||||
|
2
Jenkinsfile
vendored
2
Jenkinsfile
vendored
@ -1,4 +1,4 @@
|
|||||||
library identifier: 'zdt-lib@main', retriever: modernSCM(
|
library identifier: 'zdt-lib@master', retriever: modernSCM(
|
||||||
[$class: 'GitSCMSource',
|
[$class: 'GitSCMSource',
|
||||||
remote: 'https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git'])
|
remote: 'https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git'])
|
||||||
|
|
||||||
|
16
README.md
16
README.md
@ -1,21 +1,21 @@
|
|||||||
#  CloudBender
|
#  CloudBender
|
||||||
|
|
||||||
# About
|
# About
|
||||||
|
|
||||||
Toolset to deploy and maintain infrastructure in automated and trackable manner.
|
Toolset to deploy and maintain infrastructure in automated and trackable manner.
|
||||||
First class support for:
|
First class support for:
|
||||||
- [Pulumi](https://www.pulumi.com/docs/)
|
- [Pulumi](https://www.pulumi.com/docs/)
|
||||||
- [AWS CloudFormation](https://aws.amazon.com/cloudformation)
|
- [AWS CloudFormation](https://aws.amazon.com/cloudformation)
|
||||||
|
|
||||||
|
|
||||||
# Installation
|
# Installation
|
||||||
The preferred way of running CloudBender is using the public container. This ensure all tools and dependencies are in sync and underwent some basic testing during the development and build phase.
|
The preferred way of running CloudBender is using the public container. This ensure all tools and dependencies are in sync and underwent some basic testing during the development and build phase.
|
||||||
|
|
||||||
As a fall back CloudBender and its dependencies can be installed locally see step *1b* below.
|
As a fall back CloudBender and its dependencies can be installed locally see step *1b* below.
|
||||||
|
|
||||||
## 1a. Containerized
|
## 1a. Containerized
|
||||||
|
|
||||||
The command below tests the ability to run containers within containers on your local setup.
|
The command below tests the ability to run containers within containers on your local setup.
|
||||||
( This most likely only works on a recent Linux box/VM, which is capable of running rootless containers within containers.
|
( This most likely only works on a recent Linux box/VM, which is capable of running rootless containers within containers.
|
||||||
Requires kernel >= 5.12, Cgroups V2, podman, ... )
|
Requires kernel >= 5.12, Cgroups V2, podman, ... )
|
||||||
|
|
||||||
@ -36,7 +36,7 @@ and proceed with step 2)
|
|||||||
- either `podman` or `docker` depending on your platform
|
- either `podman` or `docker` depending on your platform
|
||||||
|
|
||||||
## 2. Test cli
|
## 2. Test cli
|
||||||
To verify that all pieces are in place run:
|
To verify that all pieces are in place run:
|
||||||
```
|
```
|
||||||
cloudbender version
|
cloudbender version
|
||||||
```
|
```
|
||||||
@ -84,10 +84,10 @@ Commands:
|
|||||||
The state for all Pulumi resources are stored on S3 in your account and in the same region as the resources being deployed.
|
The state for all Pulumi resources are stored on S3 in your account and in the same region as the resources being deployed.
|
||||||
No data is send to nor shared with the official Pulumi provided APIs.
|
No data is send to nor shared with the official Pulumi provided APIs.
|
||||||
|
|
||||||
CloudBender configures Pulumi with a local, temporary workspace on the fly. This incl. the injection of various common parameters like the AWS account ID and region etc.
|
CloudBender configures Pulumi with a local, temporary workspace on the fly. This incl. the injection of various common parameters like the AWS account ID and region etc.
|
||||||
|
|
||||||
### Cloudformation
|
### Cloudformation
|
||||||
All state is handled by AWS Cloudformation.
|
All state is handled by AWS Cloudformation.
|
||||||
The required account and region are determined by CloudBender automatically from the configuration.
|
The required account and region are determined by CloudBender automatically from the configuration.
|
||||||
|
|
||||||
|
|
||||||
|
@ -55,13 +55,12 @@ def cli(ctx, profile, region, debug, directory):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
# Only load stackgroups to get profile and region
|
# Only load stackgroups to get profile and region
|
||||||
if ctx.invoked_subcommand in ["wrap", "list_stacks"]:
|
if ctx.invoked_subcommand == "wrap":
|
||||||
cb.read_config(loadStacks=False)
|
cb.read_config(loadStacks=False)
|
||||||
else:
|
else:
|
||||||
cb.read_config()
|
cb.read_config()
|
||||||
|
|
||||||
if debug:
|
cb.dump_config()
|
||||||
cb.dump_config()
|
|
||||||
|
|
||||||
ctx.obj = cb
|
ctx.obj = cb
|
||||||
|
|
||||||
@ -213,21 +212,6 @@ def execute(cb, stack_name, function, args):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@click.command('import')
|
|
||||||
@click.argument("stack_name")
|
|
||||||
@click.argument("pulumi_state_file")
|
|
||||||
@click.pass_obj
|
|
||||||
def _import(cb, stack_name, pulumi_state_file):
|
|
||||||
"""Imports a Pulumi state file as stack"""
|
|
||||||
stacks = _find_stacks(cb, [stack_name])
|
|
||||||
|
|
||||||
for s in stacks:
|
|
||||||
if s.mode == "pulumi":
|
|
||||||
s._import(pulumi_state_file)
|
|
||||||
else:
|
|
||||||
logger.info("Cannot import as {} uses Cloudformation.".format(s.stackname))
|
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.argument("stack_name")
|
@click.argument("stack_name")
|
||||||
@click.option(
|
@click.option(
|
||||||
@ -350,16 +334,7 @@ def wrap(cb, stack_group, cmd):
|
|||||||
"""Execute custom external program"""
|
"""Execute custom external program"""
|
||||||
|
|
||||||
sg = cb.sg.get_stackgroup(stack_group)
|
sg = cb.sg.get_stackgroup(stack_group)
|
||||||
sg.wrap(" ".join(cmd))
|
cb.wrap(sg, " ".join(cmd))
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
|
||||||
@click.argument("stack_group", nargs=1, required=True)
|
|
||||||
@click.pass_obj
|
|
||||||
def list_stacks(cb, stack_group):
|
|
||||||
"""List all Pulumi stacks"""
|
|
||||||
sg = cb.sg.get_stackgroup(stack_group)
|
|
||||||
sg.list_stacks()
|
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@ -507,9 +482,7 @@ cli.add_command(refresh)
|
|||||||
cli.add_command(preview)
|
cli.add_command(preview)
|
||||||
cli.add_command(set_config)
|
cli.add_command(set_config)
|
||||||
cli.add_command(get_config)
|
cli.add_command(get_config)
|
||||||
cli.add_command(_import)
|
|
||||||
cli.add_command(export)
|
cli.add_command(export)
|
||||||
cli.add_command(list_stacks)
|
|
||||||
cli.add_command(assimilate)
|
cli.add_command(assimilate)
|
||||||
cli.add_command(execute)
|
cli.add_command(execute)
|
||||||
cli.add_command(wrap)
|
cli.add_command(wrap)
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
import pathlib
|
import pathlib
|
||||||
import logging
|
import logging
|
||||||
|
import pexpect
|
||||||
|
|
||||||
from .stackgroup import StackGroup
|
from .stackgroup import StackGroup
|
||||||
|
from .connection import BotoConnection
|
||||||
from .jinja import read_config_file
|
from .jinja import read_config_file
|
||||||
from .exceptions import InvalidProjectDir
|
from .exceptions import InvalidProjectDir
|
||||||
|
|
||||||
@ -131,3 +133,17 @@ class CloudBender(object):
|
|||||||
matching_stacks.append(s)
|
matching_stacks.append(s)
|
||||||
|
|
||||||
return matching_stacks
|
return matching_stacks
|
||||||
|
|
||||||
|
def wrap(self, stack_group, cmd):
|
||||||
|
"""
|
||||||
|
Set AWS environment based on profile before executing a custom command, eg. steampipe
|
||||||
|
"""
|
||||||
|
|
||||||
|
profile = stack_group.config.get("profile", "default")
|
||||||
|
region = stack_group.config.get("region", "global")
|
||||||
|
|
||||||
|
connection_manager = BotoConnection(profile, region)
|
||||||
|
connection_manager.exportProfileEnv()
|
||||||
|
|
||||||
|
child = pexpect.spawn(cmd)
|
||||||
|
child.interact()
|
||||||
|
@ -57,6 +57,8 @@ def pulumi_ws(func):
|
|||||||
self.work_dir = tempfile.mkdtemp(
|
self.work_dir = tempfile.mkdtemp(
|
||||||
dir=tempfile.gettempdir(), prefix="cloudbender-"
|
dir=tempfile.gettempdir(), prefix="cloudbender-"
|
||||||
)
|
)
|
||||||
|
cwd = os.getcwd()
|
||||||
|
os.chdir(self.work_dir)
|
||||||
|
|
||||||
# add all artifact_paths/pulumi to the search path for easier
|
# add all artifact_paths/pulumi to the search path for easier
|
||||||
# imports in the pulumi code
|
# imports in the pulumi code
|
||||||
@ -146,7 +148,8 @@ def pulumi_ws(func):
|
|||||||
try:
|
try:
|
||||||
_min_version = self._pulumi_code.MIN_CLOUDBENDER_VERSION
|
_min_version = self._pulumi_code.MIN_CLOUDBENDER_VERSION
|
||||||
if semver.compare(
|
if semver.compare(
|
||||||
semver.Version.parse(__version__.strip("v")).finalize_version(),
|
semver.Version.parse(
|
||||||
|
__version__.strip("v")).finalize_version(),
|
||||||
_min_version.strip("v")) < 0:
|
_min_version.strip("v")) < 0:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Minimal required CloudBender version is {_min_version}, but we are {__version__}!"
|
f"Minimal required CloudBender version is {_min_version}, but we are {__version__}!"
|
||||||
@ -199,11 +202,12 @@ def pulumi_ws(func):
|
|||||||
secrets_provider=secrets_provider,
|
secrets_provider=secrets_provider,
|
||||||
)
|
)
|
||||||
|
|
||||||
# self.pulumi_workspace = pulumi.automation.LocalWorkspace(self.pulumi_ws_opts)
|
|
||||||
|
|
||||||
response = func(self, *args, **kwargs)
|
response = func(self, *args, **kwargs)
|
||||||
|
|
||||||
# Cleanup temp workspace
|
# Cleanup temp workspace
|
||||||
|
if cwd:
|
||||||
|
os.chdir(cwd)
|
||||||
|
|
||||||
if self.work_dir and os.path.exists(self.work_dir):
|
if self.work_dir and os.path.exists(self.work_dir):
|
||||||
shutil.rmtree(self.work_dir)
|
shutil.rmtree(self.work_dir)
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@ import pathlib
|
|||||||
import pprint
|
import pprint
|
||||||
import pulumi
|
import pulumi
|
||||||
import importlib
|
import importlib
|
||||||
|
import pkg_resources
|
||||||
|
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from dateutil.tz import tzutc
|
from dateutil.tz import tzutc
|
||||||
@ -534,7 +535,6 @@ class Stack(object):
|
|||||||
logger.info("Passed.")
|
logger.info("Passed.")
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
@pulumi_ws
|
|
||||||
def get_outputs(self, include=".*", values=False):
|
def get_outputs(self, include=".*", values=False):
|
||||||
"""gets outputs of the stack"""
|
"""gets outputs of the stack"""
|
||||||
|
|
||||||
@ -851,6 +851,7 @@ class Stack(object):
|
|||||||
|
|
||||||
return status
|
return status
|
||||||
|
|
||||||
|
@pulumi_ws
|
||||||
@exec_hooks
|
@exec_hooks
|
||||||
def update(self):
|
def update(self):
|
||||||
"""Updates an existing stack"""
|
"""Updates an existing stack"""
|
||||||
@ -987,7 +988,7 @@ class Stack(object):
|
|||||||
def assimilate(self):
|
def assimilate(self):
|
||||||
"""Import resources into Pulumi stack"""
|
"""Import resources into Pulumi stack"""
|
||||||
|
|
||||||
pulumi_stack = self._get_pulumi_stack()
|
pulumi_stack = self._get_pulumi_stack(create=True)
|
||||||
|
|
||||||
# now lets import each defined resource
|
# now lets import each defined resource
|
||||||
for r in self._pulumi_code.RESOURCES:
|
for r in self._pulumi_code.RESOURCES:
|
||||||
@ -1024,19 +1025,6 @@ class Stack(object):
|
|||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
@pulumi_ws
|
|
||||||
def _import(self, pulumi_state_file):
|
|
||||||
"""Imports a Pulumi stack"""
|
|
||||||
|
|
||||||
pulumi_stack = self._get_pulumi_stack()
|
|
||||||
|
|
||||||
with open(pulumi_state_file, "r") as file:
|
|
||||||
state = json.loads(file.read())
|
|
||||||
deployment = pulumi.automation.Deployment(version=3, deployment=state)
|
|
||||||
pulumi_stack.import_stack(deployment)
|
|
||||||
|
|
||||||
return
|
|
||||||
|
|
||||||
@pulumi_ws
|
@pulumi_ws
|
||||||
def set_config(self, key, value, secret):
|
def set_config(self, key, value, secret):
|
||||||
"""Set a config or secret"""
|
"""Set a config or secret"""
|
||||||
@ -1071,18 +1059,11 @@ class Stack(object):
|
|||||||
|
|
||||||
if "parameters" not in settings:
|
if "parameters" not in settings:
|
||||||
settings["parameters"] = {}
|
settings["parameters"] = {}
|
||||||
|
|
||||||
# hack for bug above, we support one level of nested values for now
|
# hack for bug above, we support one level of nested values for now
|
||||||
_val = pulumi_settings["config"]["{}:{}".format(
|
_val = pulumi_settings["config"]["{}:{}".format(
|
||||||
self.parameters["Conglomerate"], key)]
|
self.parameters["Conglomerate"], key)]
|
||||||
if '.' in key:
|
if '.' in key:
|
||||||
try:
|
(root, leaf) = key.split('.')
|
||||||
(root, leaf) = key.split('.')
|
|
||||||
except ValueError:
|
|
||||||
raise ParameterIllegalValue(
|
|
||||||
"Currently only one level hierachies within parameters are supported!"
|
|
||||||
)
|
|
||||||
|
|
||||||
if root not in settings["parameters"]:
|
if root not in settings["parameters"]:
|
||||||
settings["parameters"][root] = {}
|
settings["parameters"][root] = {}
|
||||||
|
|
||||||
@ -1324,7 +1305,6 @@ class Stack(object):
|
|||||||
logger.info(" ".join([self.region, self.stackname, text]))
|
logger.info(" ".join([self.region, self.stackname, text]))
|
||||||
|
|
||||||
def _get_pulumi_stack(self, create=False):
|
def _get_pulumi_stack(self, create=False):
|
||||||
|
|
||||||
if create:
|
if create:
|
||||||
pulumi_stack = pulumi.automation.create_or_select_stack(
|
pulumi_stack = pulumi.automation.create_or_select_stack(
|
||||||
stack_name=self.pulumi_stackname,
|
stack_name=self.pulumi_stackname,
|
||||||
@ -1333,7 +1313,7 @@ class Stack(object):
|
|||||||
opts=self.pulumi_ws_opts,
|
opts=self.pulumi_ws_opts,
|
||||||
)
|
)
|
||||||
pulumi_stack.workspace.install_plugin(
|
pulumi_stack.workspace.install_plugin(
|
||||||
"aws", importlib.metadata.distribution("pulumi_aws").version
|
"aws", pkg_resources.get_distribution("pulumi_aws").version
|
||||||
)
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
@ -1,13 +1,6 @@
|
|||||||
import logging
|
import logging
|
||||||
import pprint
|
import pprint
|
||||||
import pexpect
|
|
||||||
import pulumi
|
|
||||||
import tempfile
|
|
||||||
|
|
||||||
import rich.table
|
|
||||||
import rich.console
|
|
||||||
|
|
||||||
from .connection import BotoConnection
|
|
||||||
from .utils import dict_merge
|
from .utils import dict_merge
|
||||||
from .jinja import read_config_file
|
from .jinja import read_config_file
|
||||||
from .stack import Stack
|
from .stack import Stack
|
||||||
@ -32,7 +25,7 @@ class StackGroup(object):
|
|||||||
for sg in self.sgs:
|
for sg in self.sgs:
|
||||||
sg.dump_config()
|
sg.dump_config()
|
||||||
|
|
||||||
logger.info(
|
logger.debug(
|
||||||
"StackGroup {}: {}".format(self.rel_path, pprint.pformat(self.config))
|
"StackGroup {}: {}".format(self.rel_path, pprint.pformat(self.config))
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -142,54 +135,3 @@ class StackGroup(object):
|
|||||||
return s
|
return s
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def wrap(self, cmd):
|
|
||||||
"""
|
|
||||||
Set AWS environment based on profile before executing a custom command, eg. steampipe
|
|
||||||
"""
|
|
||||||
|
|
||||||
profile = self.config.get("profile", "default")
|
|
||||||
region = self.config.get("region", "global")
|
|
||||||
|
|
||||||
connection_manager = BotoConnection(profile, region)
|
|
||||||
connection_manager.exportProfileEnv()
|
|
||||||
|
|
||||||
child = pexpect.spawn(cmd)
|
|
||||||
child.interact()
|
|
||||||
|
|
||||||
def list_stacks(self):
|
|
||||||
project_name = self.config["parameters"]["Conglomerate"]
|
|
||||||
pulumi_backend = "{}/{}/{}".format(self.config["pulumi"]["backend"], project_name, self.config["region"])
|
|
||||||
|
|
||||||
project_settings = pulumi.automation.ProjectSettings(
|
|
||||||
name=project_name, runtime="python", backend=pulumi.automation.ProjectBackend(url=pulumi_backend)
|
|
||||||
)
|
|
||||||
|
|
||||||
work_dir = tempfile.mkdtemp(
|
|
||||||
dir=tempfile.gettempdir(), prefix="cloudbender-"
|
|
||||||
)
|
|
||||||
|
|
||||||
# AWS setup
|
|
||||||
profile = self.config.get("profile", "default")
|
|
||||||
region = self.config.get("region", "global")
|
|
||||||
|
|
||||||
connection_manager = BotoConnection(profile, region)
|
|
||||||
connection_manager.exportProfileEnv()
|
|
||||||
|
|
||||||
pulumi_workspace = pulumi.automation.LocalWorkspace(
|
|
||||||
work_dir=work_dir,
|
|
||||||
project_settings=project_settings
|
|
||||||
)
|
|
||||||
|
|
||||||
stacks = pulumi_workspace.list_stacks()
|
|
||||||
|
|
||||||
table = rich.table.Table(title="Pulumi stacks")
|
|
||||||
table.add_column("Name")
|
|
||||||
table.add_column("Last Update")
|
|
||||||
table.add_column("Resources")
|
|
||||||
|
|
||||||
for s in stacks:
|
|
||||||
table.add_row(s.name, str(s.last_update), str(s.resource_count))
|
|
||||||
|
|
||||||
console = rich.console.Console()
|
|
||||||
console.print(table)
|
|
||||||
|
@ -11,23 +11,21 @@ authors = [
|
|||||||
description = "Deploy and maintain infrastructure in automated and trackable manner"
|
description = "Deploy and maintain infrastructure in automated and trackable manner"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
license = "AGPL-3.0-or-later"
|
license = "AGPL-3.0-or-later"
|
||||||
requires-python = ">=3.12"
|
requires-python = ">=3.11"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"boto3==1.35.70",
|
"boto3==1.35.15",
|
||||||
"mock==5.1.0",
|
"mock==5.1.0",
|
||||||
"Jinja2==3.1.4",
|
"Jinja2==3.1.4",
|
||||||
"click==8.1.7",
|
"click==8.1.7",
|
||||||
"pexpect==4.9.0",
|
"pexpect==4.9.0",
|
||||||
"python-minifier==2.11.3",
|
"python-minifier==2.9.0",
|
||||||
"cfn-lint==1.20.1",
|
"cfn-lint==1.12.4",
|
||||||
"ruamel.yaml==0.18.6",
|
"ruamel.yaml==0.18.6",
|
||||||
"rich==13.9.4",
|
"pulumi==3.131.0",
|
||||||
"pulumi==3.142.0",
|
"pulumi-aws==6.51.0",
|
||||||
"pulumi-aws==6.61.0",
|
"pulumi-aws-native==0.121.0",
|
||||||
"pulumi-aws-native==1.11.0",
|
"pulumi-policy==1.12.0",
|
||||||
"pulumi-policy==1.13.0",
|
|
||||||
"pulumi-command==1.0.1",
|
"pulumi-command==1.0.1",
|
||||||
"pulumi_random==4.16.7",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
classifiers = [
|
classifiers = [
|
||||||
|
Loading…
x
Reference in New Issue
Block a user