Compare commits

..

11 Commits

12 changed files with 75 additions and 173 deletions

View File

@ -14,7 +14,7 @@ include .ci/podman.mk
Add subtree to your project:
```
git subtree add --prefix .ci https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git main --squash
git subtree add --prefix .ci https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash
```

View File

@ -1,13 +1,3 @@
SHELL := bash
.SHELLFLAGS := -eu -o pipefail -c
.DELETE_ON_ERROR:
.SILENT: ; # no need for @
.ONESHELL: ; # recipes execute in same shell
.NOTPARALLEL: ; # wait for this target to finish
.EXPORT_ALL_VARIABLES: ; # send all vars to shell
.PHONY: all # All targets are accessible for user
.DEFAULT: help # Running Make will run the help target
# Parse version from latest git semver tag
GIT_TAG ?= $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
@ -33,6 +23,13 @@ ifneq ($(TRIVY_REMOTE),)
TRIVY_OPTS ::= --server $(TRIVY_REMOTE)
endif
.SILENT: ; # no need for @
.ONESHELL: ; # recipes execute in same shell
.NOTPARALLEL: ; # wait for this target to finish
.EXPORT_ALL_VARIABLES: ; # send all vars to shell
.PHONY: all # All targets are accessible for user
.DEFAULT: help # Running Make will run the help target
help: ## Show Help
grep -E '^[a-zA-Z_-]+:.*?## .*$$' .ci/podman.mk | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
@ -43,7 +40,7 @@ fmt:: ## auto format source
lint:: ## Lint source
build: ## Build the app
podman build --rm --layers -t $(IMAGE):$(TAG)-$(_ARCH) --build-arg TAG=$(TAG) --build-arg ARCH=$(_ARCH) --platform linux/$(_ARCH) .
buildah build --rm --layers -t $(IMAGE):$(TAG)-$(_ARCH) --build-arg TAG=$(TAG) --build-arg ARCH=$(_ARCH) --platform linux/$(_ARCH) .
test:: ## test built artificats
@ -54,17 +51,16 @@ scan: ## Scan image using trivy
# first tag and push all actual images
# create new manifest for each tag and add all available TAG-ARCH before pushing
push: ecr-login ## push images to registry
for t in $(TAG) latest $(EXTRA_TAGS); do
for t in $(TAG) latest $(EXTRA_TAGS); do \
echo "Tagging image with $(REGISTRY)/$(IMAGE):$${t}-$(ARCH)"
podman tag $(IMAGE):$(TAG)-$(_ARCH) $(REGISTRY)/$(IMAGE):$${t}-$(_ARCH)
podman manifest rm $(IMAGE):$$t || true
podman manifest create $(IMAGE):$$t
for a in $(ALL_ARCHS); do
podman image exists $(REGISTRY)/$(IMAGE):$$t-$$a && \
podman manifest add $(IMAGE):$$t containers-storage:$(REGISTRY)/$(IMAGE):$$t-$$a
done
buildah tag $(IMAGE):$(TAG)-$(_ARCH) $(REGISTRY)/$(IMAGE):$${t}-$(_ARCH); \
buildah manifest rm $(IMAGE):$$t || true; \
buildah manifest create $(IMAGE):$$t; \
for a in $(ALL_ARCHS); do \
buildah manifest add $(IMAGE):$$t $(REGISTRY)/$(IMAGE):$(TAG)-$$a; \
done; \
echo "Pushing manifest $(IMAGE):$$t"
podman manifest push --all $(IMAGE):$$t docker://$(REGISTRY)/$(IMAGE):$$t
buildah manifest push --all $(IMAGE):$$t docker://$(REGISTRY)/$(IMAGE):$$t; \
done
ecr-login: ## log into AWS ECR public
@ -77,15 +73,12 @@ rm-remote-untagged: ## delete all remote untagged and in-dev images, keep 10 tag
clean:: ## clean up source folder
rm-image:
for t in $(TAG) latest $(EXTRA_TAGS); do
for a in $(ALL_ARCHS); do
podman image exists $(IMAGE):$$t-$$a && podman image rm -f $(IMAGE):$$t-$$a || true
done
done
test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH))" || podman image rm -f $(IMAGE):$(TAG)-$(_ARCH) > /dev/null
test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH))" || echo "Error: Removing image failed"
## some useful tasks during development
ci-pull-upstream: ## pull latest shared .ci subtree
git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git main --squash -m "Merge latest ci-tools-lib"
git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash -m "Merge latest ci-tools-lib"
create-repo: ## create new AWS ECR public repository
aws ecr-public create-repository --repository-name $(IMAGE) --region $(REGION)

View File

@ -1,8 +1,7 @@
secrets:
- id: private-key
paths:
- "**/pulumi_aws/glue/connection.py"
- id: gcp-service-account
paths:
- "**/pulumi_aws/glue/connection.py"
- "/venv/lib/python*/site-packages/pulumi_aws/glue/connection.py"
- id: private-key
paths:
- "/venv/lib/python*/site-packages/pulumi_aws/glue/connection.py"

View File

@ -1,8 +1,8 @@
ARG RUNTIME_VERSION="3.12"
ARG RUNTIME_VERSION="3.11"
ARG DISTRO_VERSION="3.20"
FROM python:${RUNTIME_VERSION}-alpine${DISTRO_VERSION} AS builder
ARG RUNTIME_VERSION="3.12"
ARG RUNTIME_VERSION="3.11"
RUN apk add --no-cache \
autoconf \
@ -33,9 +33,6 @@ RUN curl -fsSL https://get.pulumi.com/ | sh -s -- --version $(pip show pulumi --
# minimal pulumi
RUN cd /root/.pulumi/bin && rm -f *dotnet *yaml *go *java && strip pulumi* || true
# Remove AWS keys from docstring to prevent trivy alerts later
RUN sed -i -e 's/AKIA.*//' /venv/lib/python${RUNTIME_VERSION}/site-packages/pulumi_aws/lightsail/bucket_access_key.py
# Now build the final runtime, incl. running rootless containers
FROM python:${RUNTIME_VERSION}-alpine${DISTRO_VERSION}

2
Jenkinsfile vendored
View File

@ -1,4 +1,4 @@
library identifier: 'zdt-lib@main', retriever: modernSCM(
library identifier: 'zdt-lib@master', retriever: modernSCM(
[$class: 'GitSCMSource',
remote: 'https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git'])

View File

@ -1,21 +1,21 @@
# ![Logo](https://git.zero-downtime.net/ZeroDownTime/CloudBender/media/branch/main/cloudbender.png) CloudBender
# ![Logo](https://git.zero-downtime.net/ZeroDownTime/CloudBender/media/branch/master/cloudbender.png) CloudBender
# About
Toolset to deploy and maintain infrastructure in automated and trackable manner.
First class support for:
Toolset to deploy and maintain infrastructure in automated and trackable manner.
First class support for:
- [Pulumi](https://www.pulumi.com/docs/)
- [AWS CloudFormation](https://aws.amazon.com/cloudformation)
# Installation
The preferred way of running CloudBender is using the public container. This ensure all tools and dependencies are in sync and underwent some basic testing during the development and build phase.
The preferred way of running CloudBender is using the public container. This ensure all tools and dependencies are in sync and underwent some basic testing during the development and build phase.
As a fall back CloudBender and its dependencies can be installed locally see step *1b* below.
## 1a. Containerized
The command below tests the ability to run containers within containers on your local setup.
The command below tests the ability to run containers within containers on your local setup.
( This most likely only works on a recent Linux box/VM, which is capable of running rootless containers within containers.
Requires kernel >= 5.12, Cgroups V2, podman, ... )
@ -36,7 +36,7 @@ and proceed with step 2)
- either `podman` or `docker` depending on your platform
## 2. Test cli
To verify that all pieces are in place run:
To verify that all pieces are in place run:
```
cloudbender version
```
@ -84,10 +84,10 @@ Commands:
The state for all Pulumi resources are stored on S3 in your account and in the same region as the resources being deployed.
No data is send to nor shared with the official Pulumi provided APIs.
CloudBender configures Pulumi with a local, temporary workspace on the fly. This incl. the injection of various common parameters like the AWS account ID and region etc.
CloudBender configures Pulumi with a local, temporary workspace on the fly. This incl. the injection of various common parameters like the AWS account ID and region etc.
### Cloudformation
All state is handled by AWS Cloudformation.
All state is handled by AWS Cloudformation.
The required account and region are determined by CloudBender automatically from the configuration.

View File

@ -55,13 +55,12 @@ def cli(ctx, profile, region, debug, directory):
sys.exit(1)
# Only load stackgroups to get profile and region
if ctx.invoked_subcommand in ["wrap", "list_stacks"]:
if ctx.invoked_subcommand == "wrap":
cb.read_config(loadStacks=False)
else:
cb.read_config()
if debug:
cb.dump_config()
cb.dump_config()
ctx.obj = cb
@ -213,21 +212,6 @@ def execute(cb, stack_name, function, args):
)
@click.command('import')
@click.argument("stack_name")
@click.argument("pulumi_state_file")
@click.pass_obj
def _import(cb, stack_name, pulumi_state_file):
"""Imports a Pulumi state file as stack"""
stacks = _find_stacks(cb, [stack_name])
for s in stacks:
if s.mode == "pulumi":
s._import(pulumi_state_file)
else:
logger.info("Cannot import as {} uses Cloudformation.".format(s.stackname))
@click.command()
@click.argument("stack_name")
@click.option(
@ -350,16 +334,7 @@ def wrap(cb, stack_group, cmd):
"""Execute custom external program"""
sg = cb.sg.get_stackgroup(stack_group)
sg.wrap(" ".join(cmd))
@click.command()
@click.argument("stack_group", nargs=1, required=True)
@click.pass_obj
def list_stacks(cb, stack_group):
"""List all Pulumi stacks"""
sg = cb.sg.get_stackgroup(stack_group)
sg.list_stacks()
cb.wrap(sg, " ".join(cmd))
@click.command()
@ -507,9 +482,7 @@ cli.add_command(refresh)
cli.add_command(preview)
cli.add_command(set_config)
cli.add_command(get_config)
cli.add_command(_import)
cli.add_command(export)
cli.add_command(list_stacks)
cli.add_command(assimilate)
cli.add_command(execute)
cli.add_command(wrap)

View File

@ -1,7 +1,9 @@
import pathlib
import logging
import pexpect
from .stackgroup import StackGroup
from .connection import BotoConnection
from .jinja import read_config_file
from .exceptions import InvalidProjectDir
@ -131,3 +133,17 @@ class CloudBender(object):
matching_stacks.append(s)
return matching_stacks
def wrap(self, stack_group, cmd):
"""
Set AWS environment based on profile before executing a custom command, eg. steampipe
"""
profile = stack_group.config.get("profile", "default")
region = stack_group.config.get("region", "global")
connection_manager = BotoConnection(profile, region)
connection_manager.exportProfileEnv()
child = pexpect.spawn(cmd)
child.interact()

View File

@ -57,6 +57,8 @@ def pulumi_ws(func):
self.work_dir = tempfile.mkdtemp(
dir=tempfile.gettempdir(), prefix="cloudbender-"
)
cwd = os.getcwd()
os.chdir(self.work_dir)
# add all artifact_paths/pulumi to the search path for easier
# imports in the pulumi code
@ -146,7 +148,8 @@ def pulumi_ws(func):
try:
_min_version = self._pulumi_code.MIN_CLOUDBENDER_VERSION
if semver.compare(
semver.Version.parse(__version__.strip("v")).finalize_version(),
semver.Version.parse(
__version__.strip("v")).finalize_version(),
_min_version.strip("v")) < 0:
raise ValueError(
f"Minimal required CloudBender version is {_min_version}, but we are {__version__}!"
@ -199,11 +202,12 @@ def pulumi_ws(func):
secrets_provider=secrets_provider,
)
# self.pulumi_workspace = pulumi.automation.LocalWorkspace(self.pulumi_ws_opts)
response = func(self, *args, **kwargs)
# Cleanup temp workspace
if cwd:
os.chdir(cwd)
if self.work_dir and os.path.exists(self.work_dir):
shutil.rmtree(self.work_dir)

View File

@ -8,6 +8,7 @@ import pathlib
import pprint
import pulumi
import importlib
import pkg_resources
from datetime import datetime, timedelta
from dateutil.tz import tzutc
@ -534,7 +535,6 @@ class Stack(object):
logger.info("Passed.")
return 0
@pulumi_ws
def get_outputs(self, include=".*", values=False):
"""gets outputs of the stack"""
@ -851,6 +851,7 @@ class Stack(object):
return status
@pulumi_ws
@exec_hooks
def update(self):
"""Updates an existing stack"""
@ -987,7 +988,7 @@ class Stack(object):
def assimilate(self):
"""Import resources into Pulumi stack"""
pulumi_stack = self._get_pulumi_stack()
pulumi_stack = self._get_pulumi_stack(create=True)
# now lets import each defined resource
for r in self._pulumi_code.RESOURCES:
@ -1024,19 +1025,6 @@ class Stack(object):
return
@pulumi_ws
def _import(self, pulumi_state_file):
"""Imports a Pulumi stack"""
pulumi_stack = self._get_pulumi_stack()
with open(pulumi_state_file, "r") as file:
state = json.loads(file.read())
deployment = pulumi.automation.Deployment(version=3, deployment=state)
pulumi_stack.import_stack(deployment)
return
@pulumi_ws
def set_config(self, key, value, secret):
"""Set a config or secret"""
@ -1071,18 +1059,11 @@ class Stack(object):
if "parameters" not in settings:
settings["parameters"] = {}
# hack for bug above, we support one level of nested values for now
_val = pulumi_settings["config"]["{}:{}".format(
self.parameters["Conglomerate"], key)]
if '.' in key:
try:
(root, leaf) = key.split('.')
except ValueError:
raise ParameterIllegalValue(
"Currently only one level hierachies within parameters are supported!"
)
(root, leaf) = key.split('.')
if root not in settings["parameters"]:
settings["parameters"][root] = {}
@ -1324,7 +1305,6 @@ class Stack(object):
logger.info(" ".join([self.region, self.stackname, text]))
def _get_pulumi_stack(self, create=False):
if create:
pulumi_stack = pulumi.automation.create_or_select_stack(
stack_name=self.pulumi_stackname,
@ -1333,7 +1313,7 @@ class Stack(object):
opts=self.pulumi_ws_opts,
)
pulumi_stack.workspace.install_plugin(
"aws", importlib.metadata.distribution("pulumi_aws").version
"aws", pkg_resources.get_distribution("pulumi_aws").version
)
else:

View File

@ -1,13 +1,6 @@
import logging
import pprint
import pexpect
import pulumi
import tempfile
import rich.table
import rich.console
from .connection import BotoConnection
from .utils import dict_merge
from .jinja import read_config_file
from .stack import Stack
@ -32,7 +25,7 @@ class StackGroup(object):
for sg in self.sgs:
sg.dump_config()
logger.info(
logger.debug(
"StackGroup {}: {}".format(self.rel_path, pprint.pformat(self.config))
)
@ -142,54 +135,3 @@ class StackGroup(object):
return s
return None
def wrap(self, cmd):
"""
Set AWS environment based on profile before executing a custom command, eg. steampipe
"""
profile = self.config.get("profile", "default")
region = self.config.get("region", "global")
connection_manager = BotoConnection(profile, region)
connection_manager.exportProfileEnv()
child = pexpect.spawn(cmd)
child.interact()
def list_stacks(self):
project_name = self.config["parameters"]["Conglomerate"]
pulumi_backend = "{}/{}/{}".format(self.config["pulumi"]["backend"], project_name, self.config["region"])
project_settings = pulumi.automation.ProjectSettings(
name=project_name, runtime="python", backend=pulumi.automation.ProjectBackend(url=pulumi_backend)
)
work_dir = tempfile.mkdtemp(
dir=tempfile.gettempdir(), prefix="cloudbender-"
)
# AWS setup
profile = self.config.get("profile", "default")
region = self.config.get("region", "global")
connection_manager = BotoConnection(profile, region)
connection_manager.exportProfileEnv()
pulumi_workspace = pulumi.automation.LocalWorkspace(
work_dir=work_dir,
project_settings=project_settings
)
stacks = pulumi_workspace.list_stacks()
table = rich.table.Table(title="Pulumi stacks")
table.add_column("Name")
table.add_column("Last Update")
table.add_column("Resources")
for s in stacks:
table.add_row(s.name, str(s.last_update), str(s.resource_count))
console = rich.console.Console()
console.print(table)

View File

@ -11,23 +11,21 @@ authors = [
description = "Deploy and maintain infrastructure in automated and trackable manner"
readme = "README.md"
license = "AGPL-3.0-or-later"
requires-python = ">=3.12"
requires-python = ">=3.11"
dependencies = [
"boto3==1.35.70",
"boto3==1.35.15",
"mock==5.1.0",
"Jinja2==3.1.4",
"click==8.1.7",
"pexpect==4.9.0",
"python-minifier==2.11.3",
"cfn-lint==1.20.1",
"python-minifier==2.9.0",
"cfn-lint==1.12.4",
"ruamel.yaml==0.18.6",
"rich==13.9.4",
"pulumi==3.142.0",
"pulumi-aws==6.61.0",
"pulumi-aws-native==1.11.0",
"pulumi-policy==1.13.0",
"pulumi==3.131.0",
"pulumi-aws==6.51.0",
"pulumi-aws-native==0.121.0",
"pulumi-policy==1.12.0",
"pulumi-command==1.0.1",
"pulumi_random==4.16.7",
]
classifiers = [