feat: add execute task, rework Dockerfile to allow podman run rootless
This commit is contained in:
parent
c7b0daab22
commit
b32de905a4
40
Dockerfile
40
Dockerfile
@ -39,9 +39,11 @@ RUN pip install . --no-deps
|
||||
RUN cd /root/.pulumi/bin && rm -f *dotnet *nodejs *go *java && strip pulumi* || true
|
||||
|
||||
|
||||
# Now build the final runtime
|
||||
# Now build the final runtime, incl. running rootless containers
|
||||
FROM python:${RUNTIME_VERSION}-alpine${DISTRO_VERSION}
|
||||
|
||||
ARG USER=cloudbender
|
||||
|
||||
#cd /etc/apk/keys && \
|
||||
#echo "@testing http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \
|
||||
#cfssl@testing \
|
||||
@ -52,20 +54,46 @@ RUN apk upgrade -U --available --no-cache && \
|
||||
libc6-compat \
|
||||
ca-certificates \
|
||||
aws-cli \
|
||||
podman
|
||||
fuse-overlayfs \
|
||||
podman \
|
||||
buildah \
|
||||
strace
|
||||
|
||||
COPY --from=builder /venv /venv
|
||||
COPY --from=builder /root/.pulumi/bin /usr/local/bin
|
||||
RUN mkdir /workspace && \
|
||||
|
||||
# Dont run as root by default
|
||||
RUN addgroup $USER && adduser $USER -G $USER -D && \
|
||||
mkdir -p /home/$USER/.local/share/containers && \
|
||||
chown $USER:$USER -R /home/$USER
|
||||
|
||||
# Rootless podman
|
||||
# https://github.com/containers/podman/blob/main/contrib/podmanimage/stable/Containerfile
|
||||
ADD conf/containers.conf conf/registries.conf conf/storage.conf /etc/containers/
|
||||
ADD --chown=$USER:$USER conf/podman-containers.conf /home/$USER/.config/containers/containers.conf
|
||||
|
||||
RUN mkdir -p /var/lib/shared/overlay-images /var/lib/shared/overlay-layers \
|
||||
/var/lib/shared/vfs-images /var/lib/shared/vfs-layers && \
|
||||
touch /var/lib/shared/overlay-images/images.lock /var/lib/shared/overlay-layers/layers.lock \
|
||||
/var/lib/shared/vfs-images/images.lock /var/lib/shared/vfs-layers/layers.lock && \
|
||||
mkdir /tmp/podman-run-1000 && chown $USER:$USER /tmp/podman-run-1000 && chmod 700 /tmp/podman-run-1000 && \
|
||||
echo -e "$USER:1:999\n$USER:1001:64535" > /etc/subuid && \
|
||||
echo -e "$USER:1:999\n$USER:1001:64535" > /etc/subgid && \
|
||||
mkdir /workspace && \
|
||||
cd /usr/bin && ln -s podman docker
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
ENV XDG_RUNTIME_DIR=/tmp/podman-run-1000
|
||||
ENV _CONTAINERS_USERNS_CONFIGURED=""
|
||||
ENV BUILDAH_ISOLATION=chroot
|
||||
|
||||
ENV VIRTUAL_ENV=/venv
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
|
||||
# Dont run as root by default
|
||||
RUN addgroup cloudbender && adduser cloudbender -G cloudbender -D
|
||||
USER cloudbender
|
||||
USER $USER
|
||||
|
||||
# Allow container layers to be stored in PVCs
|
||||
VOLUME /home/$USER/.local/share/containers
|
||||
|
||||
CMD ["cloudbender"]
|
||||
|
@ -152,8 +152,30 @@ def refresh(cb, stack_name):
|
||||
|
||||
@click.command()
|
||||
@click.argument("stack_name")
|
||||
@click.argument("function", default="")
|
||||
@click.argument('args', nargs=-1)
|
||||
@click.option(
|
||||
"-r", "--remove-pending-operations",
|
||||
"--listall",
|
||||
is_flag=True,
|
||||
help="List all available execute functions for this stack",
|
||||
)
|
||||
@click.pass_obj
|
||||
def execute(cb, stack_name, function, args, listall=False):
|
||||
"""Executes custom Python function within an existing stack context"""
|
||||
stacks = _find_stacks(cb, [stack_name])
|
||||
|
||||
for s in stacks:
|
||||
if s.mode == "pulumi":
|
||||
s.execute(function, args, listall)
|
||||
else:
|
||||
logger.info("{} uses Cloudformation, no exec feature available.".format(s.stackname))
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.argument("stack_name")
|
||||
@click.option(
|
||||
"-r",
|
||||
"--remove-pending-operations",
|
||||
is_flag=True,
|
||||
help="All pending stack operations are removed and the stack will be re-imported",
|
||||
)
|
||||
@ -408,6 +430,7 @@ cli.add_command(set_config)
|
||||
cli.add_command(get_config)
|
||||
cli.add_command(export)
|
||||
cli.add_command(assimilate)
|
||||
cli.add_command(execute)
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli(obj={})
|
||||
|
@ -1,8 +1,5 @@
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import tempfile
|
||||
import shutil
|
||||
from functools import wraps
|
||||
|
||||
from .exceptions import InvalidHook
|
||||
@ -37,25 +34,6 @@ def exec_hooks(func):
|
||||
return decorated
|
||||
|
||||
|
||||
def pulumi_ws(func):
|
||||
@wraps(func)
|
||||
def decorated(self, *args, **kwargs):
|
||||
# setup temp workspace
|
||||
self.work_dir = tempfile.mkdtemp(
|
||||
dir=tempfile.gettempdir(), prefix="cloudbender-"
|
||||
)
|
||||
|
||||
response = func(self, *args, **kwargs)
|
||||
|
||||
# Cleanup temp workspace
|
||||
if os.path.exists(self.work_dir):
|
||||
shutil.rmtree(self.work_dir)
|
||||
|
||||
return response
|
||||
|
||||
return decorated
|
||||
|
||||
|
||||
# Various hooks
|
||||
def cmd(stack, arguments):
|
||||
"""
|
||||
|
@ -2,179 +2,179 @@ import sys
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import tempfile
|
||||
import importlib
|
||||
import pkg_resources
|
||||
import pulumi
|
||||
from functools import wraps
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Fail early if pulumi binaries are not available
|
||||
if not shutil.which("pulumi"):
|
||||
raise FileNotFoundError(
|
||||
"Cannot find pulumi binary, see https://www.pulumi.com/docs/get-started/install/"
|
||||
)
|
||||
|
||||
def pulumi_init(stack, create=False):
|
||||
|
||||
# Fail early if pulumi binaries are not available
|
||||
if not shutil.which("pulumi"):
|
||||
raise FileNotFoundError(
|
||||
"Cannot find pulumi binary, see https://www.pulumi.com/docs/get-started/install/"
|
||||
)
|
||||
def pulumi_ws(func):
|
||||
@wraps(func)
|
||||
def decorated(self, *args, **kwargs):
|
||||
# setup temp workspace
|
||||
if self.mode == "pulumi":
|
||||
self.work_dir = tempfile.mkdtemp(
|
||||
dir=tempfile.gettempdir(), prefix="cloudbender-"
|
||||
)
|
||||
|
||||
# add all artifact_paths/pulumi to the search path for easier imports in the pulumi code
|
||||
for artifacts_path in stack.ctx["artifact_paths"]:
|
||||
_path = "{}/pulumi".format(artifacts_path.resolve())
|
||||
sys.path.append(_path)
|
||||
# add all artifact_paths/pulumi to the search path for easier imports in the pulumi code
|
||||
for artifacts_path in self.ctx["artifact_paths"]:
|
||||
_path = "{}/pulumi".format(artifacts_path.resolve())
|
||||
sys.path.append(_path)
|
||||
|
||||
# Try local implementation first, similar to Jinja2 mode
|
||||
_found = False
|
||||
try:
|
||||
_stack = importlib.import_module(
|
||||
"config.{}.{}".format(stack.rel_path, stack.template).replace("/", ".")
|
||||
)
|
||||
_found = True
|
||||
|
||||
except ImportError:
|
||||
for artifacts_path in stack.ctx["artifact_paths"]:
|
||||
# Try local implementation first, similar to Jinja2 mode
|
||||
_found = False
|
||||
try:
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"_stack",
|
||||
"{}/pulumi/{}.py".format(artifacts_path.resolve(), stack.template),
|
||||
_stack = importlib.import_module(
|
||||
"config.{}.{}".format(self.rel_path, self.template).replace("/", ".")
|
||||
)
|
||||
_stack = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(_stack)
|
||||
_found = True
|
||||
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
except ImportError:
|
||||
for artifacts_path in self.ctx["artifact_paths"]:
|
||||
try:
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"_stack",
|
||||
"{}/pulumi/{}.py".format(artifacts_path.resolve(), self.template),
|
||||
)
|
||||
_stack = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(_stack)
|
||||
_found = True
|
||||
|
||||
if not _found:
|
||||
raise FileNotFoundError(
|
||||
"Cannot find Pulumi implementation for {}".format(stack.stackname)
|
||||
)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# Store internal pulumi code reference
|
||||
stack._pulumi_code = _stack
|
||||
project_name = stack.parameters["Conglomerate"]
|
||||
|
||||
# Remove stacknameprefix if equals Conglomerate as Pulumi implicitly prefixes project_name
|
||||
pulumi_stackname = re.sub(r"^" + project_name + "-?", "", stack.stackname)
|
||||
try:
|
||||
pulumi_backend = "{}/{}/{}".format(
|
||||
stack.pulumi["backend"], project_name, stack.region
|
||||
)
|
||||
|
||||
except KeyError:
|
||||
raise KeyError("Missing pulumi.backend setting !")
|
||||
|
||||
account_id = stack.connection_manager.call(
|
||||
"sts", "get_caller_identity", profile=stack.profile, region=stack.region
|
||||
)["Account"]
|
||||
# Ugly hack as Pulumi currently doesnt support MFA_TOKENs during role assumptions
|
||||
# Do NOT set them via 'aws:secretKey' as they end up in the stack.json in plain text !!!
|
||||
if (
|
||||
stack.connection_manager._sessions[(stack.profile, stack.region)]
|
||||
.get_credentials()
|
||||
.token
|
||||
):
|
||||
os.environ["AWS_SESSION_TOKEN"] = (
|
||||
stack.connection_manager._sessions[(stack.profile, stack.region)]
|
||||
.get_credentials()
|
||||
.token
|
||||
)
|
||||
|
||||
os.environ["AWS_ACCESS_KEY_ID"] = (
|
||||
stack.connection_manager._sessions[(stack.profile, stack.region)]
|
||||
.get_credentials()
|
||||
.access_key
|
||||
)
|
||||
os.environ["AWS_SECRET_ACCESS_KEY"] = (
|
||||
stack.connection_manager._sessions[(stack.profile, stack.region)]
|
||||
.get_credentials()
|
||||
.secret_key
|
||||
)
|
||||
os.environ["AWS_DEFAULT_REGION"] = stack.region
|
||||
|
||||
# Secrets provider
|
||||
if "secretsProvider" in stack.pulumi:
|
||||
secrets_provider = stack.pulumi["secretsProvider"]
|
||||
if (
|
||||
secrets_provider == "passphrase"
|
||||
and "PULUMI_CONFIG_PASSPHRASE" not in os.environ
|
||||
):
|
||||
raise ValueError("Missing PULUMI_CONFIG_PASSPHRASE environment variable!")
|
||||
|
||||
else:
|
||||
try:
|
||||
if stack._pulumi_code.IKNOWHATIDO:
|
||||
logger.warning(
|
||||
"Missing pulumi.secretsProvider setting, IKNOWHATIDO enabled ... "
|
||||
if not _found:
|
||||
raise FileNotFoundError(
|
||||
"Cannot find Pulumi implementation for {}".format(self.stackname)
|
||||
)
|
||||
secrets_provider = None
|
||||
except AttributeError:
|
||||
raise ValueError("Missing pulumi.secretsProvider setting!")
|
||||
|
||||
# Set tag for stack file name and version
|
||||
_tags = {}
|
||||
try:
|
||||
_version = stack._pulumi_code.VERSION
|
||||
except AttributeError:
|
||||
_version = "undefined"
|
||||
# Store internal pulumi code reference
|
||||
self._pulumi_code = _stack
|
||||
|
||||
# Tag all resources with our metadata, allowing "prune" eventually
|
||||
_tags["zdt:cloudbender.source"] = "{}:{}".format(
|
||||
os.path.basename(stack._pulumi_code.__file__), _version
|
||||
)
|
||||
_tags["zdt:cloudbender.owner"] = f"{project_name}.{pulumi_stackname}"
|
||||
# Use legacy Conglomerate as Pulumi project_name
|
||||
project_name = self.parameters["Conglomerate"]
|
||||
|
||||
_config = {
|
||||
"aws:region": stack.region,
|
||||
"aws:defaultTags": {"tags": _tags},
|
||||
"zdt:region": stack.region,
|
||||
"zdt:awsAccountId": account_id,
|
||||
"zdt:projectName": project_name,
|
||||
"zdt:stackName": pulumi_stackname
|
||||
}
|
||||
# Remove stacknameprefix if equals Conglomerate as Pulumi implicitly prefixes project_name
|
||||
self.pulumi_stackname = re.sub(r"^" + project_name + "-?", "", self.stackname)
|
||||
try:
|
||||
pulumi_backend = "{}/{}/{}".format(
|
||||
self.pulumi["backend"], project_name, self.region
|
||||
)
|
||||
|
||||
# inject all parameters as config in the <Conglomerate> namespace
|
||||
for p in stack.parameters:
|
||||
_config["{}:{}".format(stack.parameters["Conglomerate"], p)] = stack.parameters[
|
||||
p
|
||||
]
|
||||
except KeyError:
|
||||
raise KeyError("Missing pulumi.backend setting !")
|
||||
|
||||
stack_settings = pulumi.automation.StackSettings(
|
||||
config=_config,
|
||||
secrets_provider=secrets_provider,
|
||||
encryption_salt=stack.pulumi.get("encryptionsalt", None),
|
||||
encrypted_key=stack.pulumi.get("encryptedkey", None),
|
||||
)
|
||||
account_id = self.connection_manager.call(
|
||||
"sts", "get_caller_identity", profile=self.profile, region=self.region
|
||||
)["Account"]
|
||||
|
||||
project_settings = pulumi.automation.ProjectSettings(
|
||||
name=project_name, runtime="python", backend={"url": pulumi_backend}
|
||||
)
|
||||
# Ugly hack as Pulumi currently doesnt support MFA_TOKENs during role assumptions
|
||||
# Do NOT set them via 'aws:secretKey' as they end up in the self.json in plain text !!!
|
||||
if (
|
||||
self.connection_manager._sessions[(self.profile, self.region)]
|
||||
.get_credentials()
|
||||
.token
|
||||
):
|
||||
os.environ["AWS_SESSION_TOKEN"] = (
|
||||
self.connection_manager._sessions[(self.profile, self.region)]
|
||||
.get_credentials()
|
||||
.token
|
||||
)
|
||||
|
||||
ws_opts = pulumi.automation.LocalWorkspaceOptions(
|
||||
work_dir=stack.work_dir,
|
||||
project_settings=project_settings,
|
||||
stack_settings={pulumi_stackname: stack_settings},
|
||||
secrets_provider=secrets_provider,
|
||||
)
|
||||
os.environ["AWS_ACCESS_KEY_ID"] = (
|
||||
self.connection_manager._sessions[(self.profile, self.region)]
|
||||
.get_credentials()
|
||||
.access_key
|
||||
)
|
||||
os.environ["AWS_SECRET_ACCESS_KEY"] = (
|
||||
self.connection_manager._sessions[(self.profile, self.region)]
|
||||
.get_credentials()
|
||||
.secret_key
|
||||
)
|
||||
os.environ["AWS_DEFAULT_REGION"] = self.region
|
||||
|
||||
if create:
|
||||
pulumi_stack = pulumi.automation.create_or_select_stack(
|
||||
stack_name=pulumi_stackname,
|
||||
project_name=project_name,
|
||||
program=stack._pulumi_code.pulumi_program,
|
||||
opts=ws_opts,
|
||||
)
|
||||
pulumi_stack.workspace.install_plugin(
|
||||
"aws", pkg_resources.get_distribution("pulumi_aws").version
|
||||
)
|
||||
# Secrets provider
|
||||
if "secretsProvider" in self.pulumi:
|
||||
secrets_provider = self.pulumi["secretsProvider"]
|
||||
if (
|
||||
secrets_provider == "passphrase"
|
||||
and "PULUMI_CONFIG_PASSPHRASE" not in os.environ
|
||||
):
|
||||
raise ValueError("Missing PULUMI_CONFIG_PASSPHRASE environment variable!")
|
||||
|
||||
else:
|
||||
pulumi_stack = pulumi.automation.select_stack(
|
||||
stack_name=pulumi_stackname,
|
||||
project_name=project_name,
|
||||
program=stack._pulumi_code.pulumi_program,
|
||||
opts=ws_opts,
|
||||
)
|
||||
else:
|
||||
try:
|
||||
if self._pulumi_code.IKNOWHATIDO:
|
||||
logger.warning(
|
||||
"Missing pulumi.secretsProvider setting, IKNOWHATIDO enabled ... "
|
||||
)
|
||||
secrets_provider = None
|
||||
except AttributeError:
|
||||
raise ValueError("Missing pulumi.secretsProvider setting!")
|
||||
|
||||
return pulumi_stack
|
||||
# Set tag for stack file name and version
|
||||
_tags = {}
|
||||
try:
|
||||
_version = self._pulumi_code.VERSION
|
||||
except AttributeError:
|
||||
_version = "undefined"
|
||||
|
||||
# Tag all resources with our metadata, allowing "prune" eventually
|
||||
_tags["zdt:cloudbender.source"] = "{}:{}".format(
|
||||
os.path.basename(self._pulumi_code.__file__), _version
|
||||
)
|
||||
_tags["zdt:cloudbender.owner"] = f"{project_name}.{self.pulumi_stackname}"
|
||||
|
||||
self.pulumi_config.update({
|
||||
"aws:region": self.region,
|
||||
"aws:defaultTags": {"tags": _tags},
|
||||
"zdt:region": self.region,
|
||||
"zdt:awsAccountId": account_id,
|
||||
"zdt:projectName": project_name,
|
||||
"zdt:stackName": self.pulumi_stackname
|
||||
})
|
||||
|
||||
# inject all parameters as config in the <Conglomerate> namespace
|
||||
for p in self.parameters:
|
||||
self.pulumi_config["{}:{}".format(self.parameters["Conglomerate"], p)] = self.parameters[
|
||||
p
|
||||
]
|
||||
|
||||
stack_settings = pulumi.automation.StackSettings(
|
||||
config=self.pulumi_config,
|
||||
secrets_provider=secrets_provider,
|
||||
encryption_salt=self.pulumi.get("encryptionsalt", None),
|
||||
encrypted_key=self.pulumi.get("encryptedkey", None),
|
||||
)
|
||||
|
||||
project_settings = pulumi.automation.ProjectSettings(
|
||||
name=project_name, runtime="python", backend={"url": pulumi_backend}
|
||||
)
|
||||
|
||||
self.pulumi_ws_opts = pulumi.automation.LocalWorkspaceOptions(
|
||||
work_dir=self.work_dir,
|
||||
project_settings=project_settings,
|
||||
stack_settings={self.pulumi_stackname: stack_settings},
|
||||
secrets_provider=secrets_provider,
|
||||
)
|
||||
|
||||
response = func(self, *args, **kwargs)
|
||||
|
||||
# Cleanup temp workspace
|
||||
if self.work_dir and os.path.exists(self.work_dir):
|
||||
shutil.rmtree(self.work_dir)
|
||||
|
||||
return response
|
||||
|
||||
return decorated
|
||||
|
@ -7,6 +7,7 @@ import time
|
||||
import pathlib
|
||||
import pprint
|
||||
import pulumi
|
||||
import pkg_resources
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from dateutil.tz import tzutc
|
||||
@ -18,14 +19,13 @@ from .connection import BotoConnection
|
||||
from .jinja import JinjaEnv, read_config_file
|
||||
from . import __version__
|
||||
from .exceptions import ParameterNotFound, ParameterIllegalValue, ChecksumError
|
||||
from .hooks import exec_hooks, pulumi_ws
|
||||
from .pulumi import pulumi_init
|
||||
from .hooks import exec_hooks
|
||||
from .pulumi import pulumi_ws
|
||||
|
||||
import cfnlint.core
|
||||
import cfnlint.template
|
||||
import cfnlint.graph
|
||||
|
||||
import importlib.resources as pkg_resources
|
||||
from . import templates
|
||||
|
||||
import logging
|
||||
@ -81,9 +81,13 @@ class Stack(object):
|
||||
self.default_lock = None
|
||||
self.multi_delete = True
|
||||
self.template_bucket_url = None
|
||||
|
||||
self.work_dir = None
|
||||
self.pulumi = {}
|
||||
self._pulumi_stack = None
|
||||
self.pulumi_stackname = ""
|
||||
self.pulumi_config = {}
|
||||
self.pulumi_ws_opts = None
|
||||
|
||||
def dump_config(self):
|
||||
logger.debug("<Stack {}: {}>".format(self.id, pprint.pformat(vars(self))))
|
||||
@ -484,7 +488,7 @@ class Stack(object):
|
||||
"""gets outputs of the stack"""
|
||||
|
||||
if self.mode == "pulumi":
|
||||
self.outputs = pulumi_init(self).outputs()
|
||||
self.outputs = self._get_pulumi_stack().outputs()
|
||||
|
||||
else:
|
||||
self.read_template_file()
|
||||
@ -724,7 +728,7 @@ class Stack(object):
|
||||
|
||||
if self.mode == "pulumi":
|
||||
kwargs = self._set_pulumi_args()
|
||||
pulumi_init(self, create=True).up(**kwargs)
|
||||
self._get_pulumi_stack(create=True).up(**kwargs)
|
||||
|
||||
else:
|
||||
# Prepare parameters
|
||||
@ -822,7 +826,7 @@ class Stack(object):
|
||||
logger.info("Deleting {0} {1}".format(self.region, self.stackname))
|
||||
|
||||
if self.mode == "pulumi":
|
||||
pulumi_stack = pulumi_init(self)
|
||||
pulumi_stack = self._get_pulumi_stack()
|
||||
pulumi_stack.destroy(on_output=self._log_pulumi)
|
||||
pulumi_stack.workspace.remove_stack(pulumi_stack.name)
|
||||
|
||||
@ -843,7 +847,7 @@ class Stack(object):
|
||||
def refresh(self):
|
||||
"""Refreshes a Pulumi stack"""
|
||||
|
||||
pulumi_init(self).refresh(on_output=self._log_pulumi)
|
||||
self._get_pulumi_stack().refresh(on_output=self._log_pulumi)
|
||||
|
||||
return
|
||||
|
||||
@ -852,15 +856,44 @@ class Stack(object):
|
||||
"""Preview a Pulumi stack up operation"""
|
||||
|
||||
kwargs = self._set_pulumi_args()
|
||||
pulumi_init(self, create=True).preview(**kwargs)
|
||||
self._get_pulumi_stack(create=True).preview(**kwargs)
|
||||
|
||||
return
|
||||
|
||||
@pulumi_ws
|
||||
def execute(self, function, args, listall=False):
|
||||
"""Executes custom Python function within a Pulumi stack"""
|
||||
|
||||
# call all available functions and output built in help
|
||||
if listall:
|
||||
for k in vars(self._pulumi_code).keys():
|
||||
if k.startswith("_execute_"):
|
||||
docstring = vars(self._pulumi_code)[k](docstring=True)
|
||||
print("{}: {}".format(k.lstrip("_execute_"), docstring))
|
||||
return
|
||||
|
||||
else:
|
||||
if not function:
|
||||
logger.error("No function specified !")
|
||||
return
|
||||
|
||||
exec_function = f"_execute_{function}"
|
||||
if exec_function in vars(self._pulumi_code):
|
||||
pulumi_stack = self._get_pulumi_stack()
|
||||
vars(self._pulumi_code)[exec_function](
|
||||
config=pulumi_stack.get_all_config(), outputs=pulumi_stack.outputs(), args=args
|
||||
)
|
||||
|
||||
else:
|
||||
logger.error(
|
||||
"{} is not defined in {}".format(function, self._pulumi_code)
|
||||
)
|
||||
|
||||
@pulumi_ws
|
||||
def assimilate(self):
|
||||
"""Import resources into Pulumi stack"""
|
||||
|
||||
pulumi_stack = pulumi_init(self, create=True)
|
||||
pulumi_stack = self._get_pulumi_stack(create=True)
|
||||
|
||||
# now lets import each defined resource
|
||||
for r in self._pulumi_code.RESOURCES:
|
||||
@ -881,7 +914,7 @@ class Stack(object):
|
||||
def export(self, remove_pending_operations):
|
||||
"""Exports a Pulumi stack"""
|
||||
|
||||
pulumi_stack = pulumi_init(self)
|
||||
pulumi_stack = self._get_pulumi_stack()
|
||||
deployment = pulumi_stack.export_stack()
|
||||
|
||||
if remove_pending_operations:
|
||||
@ -897,7 +930,7 @@ class Stack(object):
|
||||
def set_config(self, key, value, secret):
|
||||
"""Set a config or secret"""
|
||||
|
||||
pulumi_stack = pulumi_init(self, create=True)
|
||||
pulumi_stack = self._get_pulumi_stack(create=True)
|
||||
pulumi_stack.set_config(key, pulumi.automation.ConfigValue(value, secret))
|
||||
|
||||
# Store salt or key and encrypted value in CloudBender stack config
|
||||
@ -932,7 +965,7 @@ class Stack(object):
|
||||
def get_config(self, key):
|
||||
"""Get a config or secret"""
|
||||
|
||||
print(pulumi_init(self).get_config(key).value)
|
||||
print(self._get_pulumi_stack().get_config(key).value)
|
||||
|
||||
def create_change_set(self, change_set_name):
|
||||
"""Creates a Change Set with the name ``change_set_name``."""
|
||||
@ -1153,6 +1186,29 @@ class Stack(object):
|
||||
if text and not text.isspace():
|
||||
logger.info(" ".join([self.region, self.stackname, text]))
|
||||
|
||||
def _get_pulumi_stack(self, create=False):
|
||||
|
||||
if create:
|
||||
pulumi_stack = pulumi.automation.create_or_select_stack(
|
||||
stack_name=self.pulumi_stackname,
|
||||
project_name=self.parameters["Conglomerate"],
|
||||
program=self._pulumi_code.pulumi_program,
|
||||
opts=self.pulumi_ws_opts,
|
||||
)
|
||||
pulumi_stack.workspace.install_plugin(
|
||||
"aws", pkg_resources.get_distribution("pulumi_aws").version
|
||||
)
|
||||
|
||||
else:
|
||||
pulumi_stack = pulumi.automation.select_stack(
|
||||
stack_name=self.pulumi_stackname,
|
||||
project_name=self.parameters["Conglomerate"],
|
||||
program=self._pulumi_code.pulumi_program,
|
||||
opts=self.pulumi_ws_opts,
|
||||
)
|
||||
|
||||
return pulumi_stack
|
||||
|
||||
def _set_pulumi_args(self, kwargs={}):
|
||||
kwargs["on_output"] = self._log_pulumi
|
||||
kwargs["policy_packs"] = []
|
||||
@ -1163,7 +1219,9 @@ class Stack(object):
|
||||
for policy in self.pulumi["policies"]:
|
||||
found = False
|
||||
for artifacts_path in self.ctx["artifact_paths"]:
|
||||
path = "{}/pulumi/policies/{}".format(artifacts_path.resolve(), policy)
|
||||
path = "{}/pulumi/policies/{}".format(
|
||||
artifacts_path.resolve(), policy
|
||||
)
|
||||
if os.path.exists(path):
|
||||
kwargs["policy_packs"].append(path)
|
||||
found = True
|
||||
|
12
conf/containers.conf
Normal file
12
conf/containers.conf
Normal file
@ -0,0 +1,12 @@
|
||||
[containers]
|
||||
netns="host"
|
||||
userns="host"
|
||||
ipcns="host"
|
||||
utsns="host"
|
||||
cgroupns="host"
|
||||
cgroups="disabled"
|
||||
log_driver = "k8s-file"
|
||||
[engine]
|
||||
cgroup_manager = "cgroupfs"
|
||||
events_logger="file"
|
||||
runtime="crun"
|
4
conf/podman-containers.conf
Normal file
4
conf/podman-containers.conf
Normal file
@ -0,0 +1,4 @@
|
||||
[containers]
|
||||
volumes = [
|
||||
"/proc:/proc",
|
||||
]
|
2
conf/registries.conf
Normal file
2
conf/registries.conf
Normal file
@ -0,0 +1,2 @@
|
||||
# Note that changing the order here may break lazy devs Dockerfile
|
||||
unqualified-search-registries = [ "gcr.io", "quay.io", "docker.io", "registry.fedoraproject.org"]
|
14
conf/storage.conf
Normal file
14
conf/storage.conf
Normal file
@ -0,0 +1,14 @@
|
||||
[storage]
|
||||
driver = "overlay"
|
||||
runroot = "/run/containers/storage"
|
||||
graphroot = "/var/lib/containers/storage"
|
||||
|
||||
[storage.options]
|
||||
additionalimagestores = [
|
||||
"/var/lib/shared",
|
||||
]
|
||||
|
||||
[storage.options.overlay]
|
||||
mount_program = "/usr/bin/fuse-overlayfs"
|
||||
mountopt = "nodev,fsync=0"
|
||||
[storage.options.thinpool]
|
Loading…
Reference in New Issue
Block a user