diff --git a/CONFIGURATION.md b/CONFIGURATION.md index 791372d..3466ed2 100644 --- a/CONFIGURATION.md +++ b/CONFIGURATION.md @@ -302,3 +302,8 @@ access is granted, `true` or `false`/`null`. Determines where images should be published. The key is the region identifier (or `ALL`), and the value is whether or not to publish to that region, `true` or `false`/`null`. + +### `encrypted` string + +Determines whether the image will be encrypted when imported and published. +Currently, only the **aws** cloud module supports this. diff --git a/README.md b/README.md index 595f8d2..54755a5 100644 --- a/README.md +++ b/README.md @@ -94,31 +94,39 @@ _We manage the credentials for publishing official Alpine images with an ### The `build` Script ``` -usage: build [-h] [--debug] [--clean] [--custom DIR [DIR ...]] - [--skip KEY [KEY ...]] [--only KEY [KEY ...]] [--revise] [--use-broker] - [--no-color] [--parallel N] [--vars FILE [FILE ...]] - {configs,state,local,import,publish} +usage: build [-h] [--debug] [--clean] [--pad-uefi-bin-arch ARCH [ARCH ...]] + [--custom DIR [DIR ...]] [--skip KEY [KEY ...]] [--only KEY [KEY ...]] + [--revise] [--use-broker] [--no-color] [--parallel N] + [--vars FILE [FILE ...]] + {configs,state,rollback,local,upload,import,publish,release} positional arguments: (build up to and including this step) configs resolve image build configuration state refresh current image build state + rollback remove existing local/uploaded/imported images if un-published/released local build images locally - import import local images to cloud provider default region - publish set image permissions and publish to cloud regions + upload upload images and metadata to storage +* import import local images to cloud provider default region (*) +* publish set image permissions and publish to cloud regions (*) + release mark images as being officially relased + +(*) may not apply to or be implemented for all cloud providers optional arguments: -h, --help show this help message and exit --debug enable debug output --clean start with a clean work environment + --pad-uefi-bin-arch ARCH [ARCH ...] + pad out UEFI firmware to 64 MiB ('aarch64') --custom DIR [DIR ...] overlay custom directory in work environment --skip KEY [KEY ...] skip variants with dimension key(s) --only KEY [KEY ...] only variants with dimension key(s) - --revise remove existing local/imported image, or bump - revision and rebuild if published + --revise remove existing local/uploaded/imported images if + un-published/released, or bump revision and rebuild --use-broker use the identity broker to get credentials --no-color turn off Packer color output - --parallel N build N images in parallel (default: 1) - --vars FILE [FILE ...] supply Packer with -vars-file(s) + --parallel N build N images in parallel + --vars FILE [FILE ...] supply Packer with -vars-file(s) (default: []) ``` The `build` script will automatically create a `work/` directory containing a @@ -145,21 +153,32 @@ it to `work/images.yaml`, if it does not already exist. The `state` step always checks the current state of the image builds, determines what actions need to be taken, and updates `work/images.yaml`. A subset of image builds can be targeted by using the `--skip` and `--only` -arguments. The `--revise` argument indicates that any _unpublished_ local -or imported images should be removed and rebuilt; as _published_ images can't -be removed, `--revise` instead increments the _`revision`_ value to rebuild -new images. +arguments. -`local`, `import`, and `publish` steps are orchestrated by Packer. By default, -each image will be processed serially; providing the `--parallel` argument with -a value greater than 1 will parallelize operations. The degree to which you -can parallelze `local` image builds will depend on the local build hardware -- -as QEMU virtual machines are launched for each image being built. Image -`import` and `publish` steps are much more lightweight, and can support higher -parallelism. +The `rollback` step, when used with `--revise` argument indicates that any +_unpublished_ and _unreleased_ local, imported, or uploaded images should be +removed and rebuilt. + +As _published_ and _released_ images can't be removed, `--revise` can be used +with `configs` or `state` to increment the _`revision`_ value to rebuild newly +revised images. + +`local`, `upload`, `import`, `publish`, and `release` steps are orchestrated by +Packer. By default, each image will be processed serially; providing the +`--parallel` argument with a value greater than 1 will parallelize operations. +The degree to which you can parallelze `local` image builds will depend on the +local build hardware -- as QEMU virtual machines are launched for each image +being built. Image `upload`, `import`, `publish`, and `release` steps are much +more lightweight, and can support higher parallelism. The `local` step builds local images with QEMU, for those that are not already -built locally or have already been imported. +built locally or have already been imported. Images are converted to formats +amenable for import into the cloud provider (if necessary) and checksums are +generated. + +The `upload` step uploads the local image, checksum, and metadata to the +defined `storage_url`. The `import`, `publish`, and `release` steps will +also upload updated image metadata. The `import` step imports the local images into the cloud providers' default regions, unless they've already been imported. At this point the images are @@ -170,10 +189,16 @@ if they haven't already been copied there. This step will always update image permissions, descriptions, tags, and deprecation date (if applicable) in all regions where the image has been published. +***NOTE:*** The `import` and `publish` steps are skipped for those cloud +providers where this does not make sense (i.e. NoCloud) or for those which +it has not yet been coded. + +The `release` step marks the images as being fully released. + ### The `cloud_helper.py` Script This script is meant to be called only by Packer from its `post-processor` -block for image `import` and `publish` steps. +block. ---- ## Build Configuration diff --git a/alpine.pkr.hcl b/alpine.pkr.hcl index e9b20b0..cc588fe 100644 --- a/alpine.pkr.hcl +++ b/alpine.pkr.hcl @@ -31,7 +31,7 @@ variable "qemu" { locals { # possible actions for the post-processor actions = [ - "build", "upload", "import", "publish", "release" + "local", "upload", "import", "publish", "release" ] debug_arg = var.DEBUG == 0 ? "" : "--debug" @@ -85,7 +85,7 @@ build { # QEMU builder dynamic "source" { for_each = { for b, c in local.configs: - b => c if contains(c.actions, "build") + b => c if contains(c.actions, "local") } iterator = B labels = ["qemu.alpine"] # links us to the base source @@ -112,10 +112,10 @@ build { } } - # Null builder (don't build, but we might import and/or publish) + # Null builder (don't build, but we might do other actions) dynamic "source" { for_each = { for b, c in local.configs: - b => c if !contains(c.actions, "build") + b => c if !contains(c.actions, "local") } iterator = B labels = ["null.alpine"] @@ -129,7 +129,7 @@ build { # install setup files dynamic "provisioner" { for_each = { for b, c in local.configs: - b => c if contains(c.actions, "build") + b => c if contains(c.actions, "local") } iterator = B labels = ["file"] @@ -144,7 +144,7 @@ build { # run setup scripts dynamic "provisioner" { for_each = { for b, c in local.configs: - b => c if contains(c.actions, "build") + b => c if contains(c.actions, "local") } iterator = B labels = ["shell"] diff --git a/build b/build index 250bd96..3982db2 100755 --- a/build +++ b/build @@ -43,7 +43,7 @@ from urllib.request import urlopen import clouds from alpine import Alpine -from image_configs import ImageConfigManager +from image_config_manager import ImageConfigManager ### Constants & Variables @@ -267,8 +267,7 @@ if args.use_broker: ### Setup Configs latest = alpine.version_info() -log.info('Latest Alpine version %s and release %s', latest['version'], latest['release']) -log.info('Latest Alpine release notes: %s', latest['notes']) +log.info('Latest Alpine version %s, release %s, and notes: %s', latest['version'], latest['release'], latest['notes']) if args.clean: clean_work() @@ -340,6 +339,7 @@ if p.returncode != 0: log.info('Packer Completed') # update final state in work/images.yaml +# TODO: do we need to do all of this or just save all the image_configs? image_configs.refresh_state( step='final', only=args.only, diff --git a/cloud_helper.py b/cloud_helper.py index e74c577..94070e4 100755 --- a/cloud_helper.py +++ b/cloud_helper.py @@ -29,16 +29,15 @@ if os.path.join(os.getcwd(), venv_args[0]) != sys.executable: import argparse import logging -from pathlib import Path from ruamel.yaml import YAML import clouds -from image_configs import ImageConfigManager +from image_config_manager import ImageConfigManager ### Constants & Variables -ACTIONS = ['build', 'upload', 'import', 'publish', 'release'] +ACTIONS = ['local', 'upload', 'import', 'publish', 'release'] LOGFORMAT = '%(name)s - %(levelname)s - %(message)s' @@ -78,26 +77,22 @@ yaml.explicit_start = True for image_key in args.image_keys: image_config = configs.get(image_key) - if args.action == 'build': + if args.action == 'local': image_config.convert_image() elif args.action == 'upload': - # TODO: image_config.upload_image() - pass + if image_config.storage: + image_config.upload_image() elif args.action == 'import': clouds.import_image(image_config) elif args.action == 'publish': - # TODO: we should probably always ensure the directory exists - os.makedirs(image_config.local_dir, exist_ok=True) - # TODO: save artifacts to image_config itself - artifacts = clouds.publish_image(image_config) - yaml.dump(artifacts, image_config.artifacts_yaml) + clouds.publish_image(image_config) elif args.action == 'release': pass # TODO: image_config.release_image() - configurable steps to take on remote host # save per-image metadata - image_config.save_metadata(upload=(False if args.action =='build' else True)) + image_config.save_metadata(args.action) diff --git a/clouds/__init__.py b/clouds/__init__.py index c0d0649..5b859ed 100644 --- a/clouds/__init__.py +++ b/clouds/__init__.py @@ -1,6 +1,6 @@ # vim: ts=4 et: -from . import aws # , oci, gcp, azure +from . import aws, nocloud, azure, gcp, oci ADAPTERS = {} @@ -12,7 +12,13 @@ def register(*mods): ADAPTERS[cloud] = p -register(aws) # , oci, azure, gcp) +register( + aws, # well-tested and fully supported + nocloud, # beta, but supported + azure, # alpha, needs testing, lacks import and publish + gcp, # alpha, needs testing, lacks import and publish + oci, # alpha, needs testing, lacks import and publish +) # using a credential provider is optional, set across all adapters @@ -25,8 +31,9 @@ def set_credential_provider(debug=False): ### forward to the correct adapter -def latest_build_image(config): - return ADAPTERS[config.cloud].latest_build_image( +# TODO: latest_imported_tags(...) +def get_latest_imported_tags(config): + return ADAPTERS[config.cloud].get_latest_imported_tags( config.project, config.image_key ) diff --git a/clouds/aws.py b/clouds/aws.py index 5fad927..64e2a95 100644 --- a/clouds/aws.py +++ b/clouds/aws.py @@ -4,13 +4,13 @@ import logging import hashlib import os +import subprocess import time from datetime import datetime -from subprocess import run from .interfaces.adapter import CloudAdapterInterface -from image_configs import Tags, DictObj +from image_tags import DictObj, ImageTags class AWSCloudAdapter(CloudAdapterInterface): @@ -38,7 +38,7 @@ class AWSCloudAdapter(CloudAdapterInterface): try: import boto3 except ModuleNotFoundError: - run(['work/bin/pip', 'install', '-U', 'boto3']) + subprocess.run(['work/bin/pip', 'install', '-U', 'boto3']) import boto3 self._sdk = boto3 @@ -91,19 +91,20 @@ class AWSCloudAdapter(CloudAdapterInterface): ec2r.images.filter(**req), key=lambda k: k.creation_date, reverse=True) # necessary cloud-agnostic image info + # TODO: still necessary? maybe just incoroporate into new latest_imported_tags()? def _image_info(self, i): - tags = Tags(from_list=i.tags) + tags = ImageTags(from_list=i.tags) return DictObj({k: tags.get(k, None) for k in self.IMAGE_INFO}) - # get the latest imported image for a given build name - def latest_build_image(self, project, image_key): + # get the latest imported image's tags for a given build key + def get_latest_imported_tags(self, project, image_key): images = self._get_images_with_tags( project=project, image_key=image_key, ) if images: # first one is the latest - return self._image_info(images[0]) + return ImageTags(from_list=images[0].tags) return None @@ -228,7 +229,9 @@ class AWSCloudAdapter(CloudAdapterInterface): snapshot.delete() raise - return self._image_info(image) + # update ImageConfig with imported tag values, minus special AWS 'Name' + tags.pop('Name', None) + ic.__dict__ |= tags # delete an (unpublished) image def delete_image(self, image_id): @@ -245,7 +248,7 @@ class AWSCloudAdapter(CloudAdapterInterface): # publish an image def publish_image(self, ic): log = logging.getLogger('publish') - source_image = self.latest_build_image( + source_image = self.get_latest_imported_tags( ic.project, ic.image_key, ) @@ -330,7 +333,7 @@ class AWSCloudAdapter(CloudAdapterInterface): if image.state == 'available': # tag image log.info('%s: Adding tags to %s', r, image.id) - image_tags = Tags(from_list=image.tags) + image_tags = ImageTags(from_list=image.tags) fresh = False if 'published' not in image_tags: fresh = True @@ -387,7 +390,7 @@ class AWSCloudAdapter(CloudAdapterInterface): time.sleep(copy_wait) copy_wait = 30 - return artifacts + ic.artifacts = artifacts def register(cloud, cred_provider=None): diff --git a/clouds/azure.py b/clouds/azure.py new file mode 100644 index 0000000..4fda4b5 --- /dev/null +++ b/clouds/azure.py @@ -0,0 +1,22 @@ +from .interfaces.adapter import CloudAdapterInterface + +# NOTE: This stub allows images to be built locally and uploaded to storage, +# but code for automated importing and publishing of images for this cloud +# publisher has not yet been written. + +class AzureCloudAdapter(CloudAdapterInterface): + + def get_latest_imported_tags(self, project, image_key): + return None + + def import_image(self, ic): + pass + + def delete_image(self, config, image_id): + pass + + def publish_image(self, ic): + pass + +def register(cloud, cred_provider=None): + return AzureCloudAdapter(cloud, cred_provider) diff --git a/clouds/gcp.py b/clouds/gcp.py new file mode 100644 index 0000000..d58a581 --- /dev/null +++ b/clouds/gcp.py @@ -0,0 +1,22 @@ +from .interfaces.adapter import CloudAdapterInterface + +# NOTE: This stub allows images to be built locally and uploaded to storage, +# but code for automated importing and publishing of images for this cloud +# publisher has not yet been written. + +class GCPCloudAdapter(CloudAdapterInterface): + + def get_latest_imported_tags(self, project, image_key): + return None + + def import_image(self, ic): + pass + + def delete_image(self, config, image_id): + pass + + def publish_image(self, ic): + pass + +def register(cloud, cred_provider=None): + return GCPCloudAdapter(cloud, cred_provider) diff --git a/clouds/interfaces/adapter.py b/clouds/interfaces/adapter.py index d271cae..0ef6c55 100644 --- a/clouds/interfaces/adapter.py +++ b/clouds/interfaces/adapter.py @@ -27,7 +27,7 @@ class CloudAdapterInterface: def session(self, region=None): raise NotImplementedError - def latest_build_image(self, project, image_key): + def get_latest_imported_tags(self, project, image_key): raise NotImplementedError def import_image(self, config): diff --git a/clouds/nocloud.py b/clouds/nocloud.py new file mode 100644 index 0000000..8079319 --- /dev/null +++ b/clouds/nocloud.py @@ -0,0 +1,21 @@ +from .interfaces.adapter import CloudAdapterInterface + +# NOTE: NoCloud images are never imported or published because there's +# no actual cloud provider associated with them. + +class NoCloudAdapter(CloudAdapterInterface): + + def get_latest_imported_tags(self, project, image_key): + return None + + def import_image(self, ic): + pass + + def delete_image(self, config, image_id): + pass + + def publish_image(self, ic): + pass + +def register(cloud, cred_provider=None): + return NoCloudAdapter(cloud, cred_provider) diff --git a/clouds/oci.py b/clouds/oci.py new file mode 100644 index 0000000..78ffe0f --- /dev/null +++ b/clouds/oci.py @@ -0,0 +1,22 @@ +from .interfaces.adapter import CloudAdapterInterface + +# NOTE: This stub allows images to be built locally and uploaded to storage, +# but code for automated importing and publishing of images for this cloud +# publisher has not yet been written. + +class OCICloudAdapter(CloudAdapterInterface): + + def get_latest_imported_tags(self, project, image_key): + return None + + def import_image(self, ic): + pass + + def delete_image(self, config, image_id): + pass + + def publish_image(self, ic): + pass + +def register(cloud, cred_provider=None): + return OCICloudAdapter(cloud, cred_provider) diff --git a/configs/alpine.conf b/configs/alpine.conf index d8b30c4..65f533b 100644 --- a/configs/alpine.conf +++ b/configs/alpine.conf @@ -40,9 +40,10 @@ Default { image_format = qcow2 # these paths are subject to change, as image downloads are developed - storage_url = "ssh://tomalok@dev.alpinelinux.org/public_html/alpine-cloud-images/{v_version}/cloud/{cloud}" - download_url = "https://dev.alpinelinux.org/~tomalok/alpine-cloud-images/{v_version}/cloud/{cloud}" # development - #download_url = "https://dl-cdn.alpinelinux.org/alpine/{v_version}/cloud/{cloud}" + storage_url = "ssh://tomalok@dev.alpinelinux.org/public_html/alpine-cloud-images/{v_version}/cloud/{cloud}/{arch}" + #storage_url = "file://~jake/tmp/alpine-cloud-images/{v_version}/cloud/{cloud}/{arch}" + download_url = "https://dev.alpinelinux.org/~tomalok/alpine-cloud-images/{v_version}/cloud/{cloud}/{arch}" # development + #download_url = "https://dl-cdn.alpinelinux.org/alpine/{v_version}/cloud/{cloud}/{arch}" # image access access.PUBLIC = true @@ -73,7 +74,12 @@ Dimensions { cloudinit { include required("bootstrap/cloudinit.conf") } } cloud { - aws { include required("cloud/aws.conf") } + aws { include required("cloud/aws.conf") } + nocloud { include required("cloud/nocloud.conf") } + # these are considered "alpha" + azure { include required("cloud/azure.conf") } + gcp { include required("cloud/gcp.conf") } + oci { include required("cloud/oci.conf") } } } @@ -88,4 +94,12 @@ Mandatory { # final provisioning script scripts = [ cleanup ] + + # TODO: remove this after testing + #access.PUBLIC = false + #regions { + # ALL = false + # us-west-2 = true + # us-east-1 = true + #} } diff --git a/configs/bootstrap/tiny.conf b/configs/bootstrap/tiny.conf index 972f51b..59d96d8 100644 --- a/configs/bootstrap/tiny.conf +++ b/configs/bootstrap/tiny.conf @@ -25,9 +25,11 @@ WHEN { } } } - # azure.packages.tiny-cloud-azure = true - # gcp.packages.tiny-cloud-gcp = true - # oci.packages.tiny-cloud-oci = true + # other per-cloud packages + nocloud.packages.tiny-cloud-nocloud = true + azure.packages.tiny-cloud-azure = true + gcp.packages.tiny-cloud-gcp = true + oci.packages.tiny-cloud-oci = true } scripts = [ setup-tiny ] diff --git a/configs/cloud/aws.conf b/configs/cloud/aws.conf index 8127e04..234e1a7 100644 --- a/configs/cloud/aws.conf +++ b/configs/cloud/aws.conf @@ -14,6 +14,8 @@ initfs_features { nvme = true } +# TODO: what about IPv6-only networks? +# maybe we only set it for <= 3.17, and leave it to dhcpcd? ntp_server = 169.254.169.123 access.PUBLIC = true diff --git a/configs/cloud/azure.conf b/configs/cloud/azure.conf new file mode 100644 index 0000000..c83c286 --- /dev/null +++ b/configs/cloud/azure.conf @@ -0,0 +1,9 @@ +# vim: ts=2 et: +cloud_name = Microsoft Azure (alpha) +image_format = vhd + +# start with 3.18 +EXCLUDE = ["3.12", "3.13", "3.14", "3.15", "3.16", "3.17"] + +# TODO: https://learn.microsoft.com/en-us/azure/virtual-machines/linux/time-sync +ntp_server = "" diff --git a/configs/cloud/gcp.conf b/configs/cloud/gcp.conf new file mode 100644 index 0000000..053e545 --- /dev/null +++ b/configs/cloud/gcp.conf @@ -0,0 +1,15 @@ +# vim: ts=2 et: +cloud_name = Google Cloud Platform (alpha) +# TODO: https://cloud.google.com/compute/docs/import/importing-virtual-disks +# Mentions "VHD" but also mentions "..." if that also includes QCOW2, then +# we should use that instead. The "Manual Import" section on the sidebar +# has a "Manually import boot disks" subpage which also mentions importing +# compressed raw images... We would prefer to avoid that if possible. +image_format = vhd + +# start with 3.18 +EXCLUDE = ["3.12", "3.13", "3.14", "3.15", "3.16", "3.17"] + +# TODO: https://cloud.google.com/compute/docs/instances/configure-ntp +# (metadata.google.internal) +ntp_server = "" diff --git a/configs/cloud/nocloud.conf b/configs/cloud/nocloud.conf new file mode 100644 index 0000000..0ac44f4 --- /dev/null +++ b/configs/cloud/nocloud.conf @@ -0,0 +1,8 @@ +# vim: ts=2 et: +cloud_name = NoCloud +image_format = qcow2 + +# start with 3.18 +EXCLUDE = ["3.12", "3.13", "3.14", "3.15", "3.16", "3.17"] + +ntp_server = "" diff --git a/configs/cloud/oci.conf b/configs/cloud/oci.conf new file mode 100644 index 0000000..f9a6ad4 --- /dev/null +++ b/configs/cloud/oci.conf @@ -0,0 +1,8 @@ +# vim: ts=2 et: +cloud_name = Oracle Cloud Infrastructure (alpha) +image_format = qcow2 + +# start with 3.18 +EXCLUDE = ["3.12", "3.13", "3.14", "3.15", "3.16", "3.17"] + +ntp_server = "169.254.169.254" diff --git a/configs/version/base/5.conf b/configs/version/base/5.conf new file mode 100644 index 0000000..f13f817 --- /dev/null +++ b/configs/version/base/5.conf @@ -0,0 +1,8 @@ +# vim: ts=2 et: + +include required("4.conf") + +packages { + # start using dhcpcd for improved IPv6 experience + dhcpcd = true +} diff --git a/configs/version/edge.conf b/configs/version/edge.conf index 885a4bf..8289861 100644 --- a/configs/version/edge.conf +++ b/configs/version/edge.conf @@ -1,9 +1,9 @@ # vim: ts=2 et: -include required("base/4.conf") +include required("base/5.conf") motd { - sudo_removed = "NOTE: 'sudo' is no longer installed by default, please use 'doas' instead." + sudo_removed = "NOTE: 'sudo' is not installed by default, please use 'doas' instead." } # clear out inherited repos diff --git a/gen_mksite_releases.py b/gen_mksite_releases.py index ca71bff..b53016d 100755 --- a/gen_mksite_releases.py +++ b/gen_mksite_releases.py @@ -38,7 +38,7 @@ from collections import defaultdict from ruamel.yaml import YAML import clouds -from image_configs import ImageConfigManager +from image_config_manager import ImageConfigManager ### Constants & Variables @@ -162,7 +162,8 @@ for i_key, i_cfg in configs.get().items(): } versions[version]['images'][image_name]['downloads'][cloud] |= { 'cloud': cloud, - 'image_url': i_cfg.download_url, + 'image_format': i_cfg.image_format, + 'image_url': i_cfg.download_url + '/' + (i_cfg.image_name) } versions[version]['images'][image_name]['regions'][region] |= { 'cloud': cloud, diff --git a/image_configs.py b/image_config.py similarity index 52% rename from image_configs.py rename to image_config.py index 3336c61..69a5be7 100644 --- a/image_configs.py +++ b/image_config.py @@ -1,8 +1,6 @@ # vim: ts=4 et: import hashlib -import itertools -import logging import mergedeep import os import pyhocon @@ -11,177 +9,10 @@ import shutil from copy import deepcopy from datetime import datetime from pathlib import Path -from ruamel.yaml import YAML -from subprocess import Popen, PIPE -from urllib.parse import urlparse import clouds - - -class ImageConfigManager(): - - def __init__(self, conf_path, yaml_path, log=__name__, alpine=None): - self.conf_path = Path(conf_path) - self.yaml_path = Path(yaml_path) - self.log = logging.getLogger(log) - self.alpine = alpine - - self.now = datetime.utcnow() - self._configs = {} - - self.yaml = YAML() - self.yaml.register_class(ImageConfig) - self.yaml.explicit_start = True - # hide !ImageConfig tag from Packer - self.yaml.representer.org_represent_mapping = self.yaml.representer.represent_mapping - self.yaml.representer.represent_mapping = self._strip_yaml_tag_type - - # load resolved YAML, if exists - if self.yaml_path.exists(): - self._load_yaml() - else: - self._resolve() - - def get(self, key=None): - if not key: - return self._configs - - return self._configs[key] - - # load already-resolved YAML configs, restoring ImageConfig objects - def _load_yaml(self): - self.log.info('Loading existing %s', self.yaml_path) - for key, config in self.yaml.load(self.yaml_path).items(): - self._configs[key] = ImageConfig(key, config, log=self.log, yaml=self.yaml) - # TODO: also pull in additional per-image metatdata from the build process? - - # save resolved configs to YAML - def _save_yaml(self): - self.log.info('Saving %s', self.yaml_path) - self.yaml.dump(self._configs, self.yaml_path) - - # hide !ImageConfig tag from Packer - def _strip_yaml_tag_type(self, tag, mapping, flow_style=None): - if tag == '!ImageConfig': - tag = u'tag:yaml.org,2002:map' - - return self.yaml.representer.org_represent_mapping(tag, mapping, flow_style=flow_style) - - # resolve from HOCON configs - def _resolve(self): - self.log.info('Generating configs.yaml in work environment') - cfg = pyhocon.ConfigFactory.parse_file(self.conf_path) - # set version releases - for v, vcfg in cfg.Dimensions.version.items(): - # version keys are quoted to protect dots - self._set_version_release(v.strip('"'), vcfg) - - dimensions = list(cfg.Dimensions.keys()) - self.log.debug('dimensions: %s', dimensions) - - for dim_keys in (itertools.product(*cfg['Dimensions'].values())): - config_key = '-'.join(dim_keys).replace('"', '') - - # dict of dimension -> dimension_key - dim_map = dict(zip(dimensions, dim_keys)) - - # replace version with release, and make image_key from that - release = cfg.Dimensions.version[dim_map['version']].release - (rel_map := dim_map.copy())['version'] = release - image_key = '-'.join(rel_map.values()) - - image_config = ImageConfig( - config_key, - { - 'image_key': image_key, - 'release': release - } | dim_map, - log=self.log, - yaml=self.yaml - ) - - # merge in the Default config - image_config._merge(cfg.Default) - skip = False - # merge in each dimension key's configs - for dim, dim_key in dim_map.items(): - dim_cfg = deepcopy(cfg.Dimensions[dim][dim_key]) - - image_config._merge(dim_cfg) - - # now that we're done with ConfigTree/dim_cfg, remove " from dim_keys - dim_keys = set(k.replace('"', '') for k in dim_keys) - - # WHEN blocks inside WHEN blocks are considered "and" operations - while (when := image_config._pop('WHEN', None)): - for when_keys, when_conf in when.items(): - # WHEN keys with spaces are considered "or" operations - if len(set(when_keys.split(' ')) & dim_keys) > 0: - image_config._merge(when_conf) - - exclude = image_config._pop('EXCLUDE', None) - if exclude and set(exclude) & set(dim_keys): - self.log.debug('%s SKIPPED, %s excludes %s', config_key, dim_key, exclude) - skip = True - break - - if eol := image_config._get('end_of_life', None): - if self.now > datetime.fromisoformat(eol): - self.log.warning('%s SKIPPED, %s end_of_life %s', config_key, dim_key, eol) - skip = True - break - - if skip is True: - continue - - # merge in the Mandatory configs at the end - image_config._merge(cfg.Mandatory) - - # clean stuff up - image_config._normalize() - image_config.qemu['iso_url'] = self.alpine.virt_iso_url(arch=image_config.arch) - - # we've resolved everything, add tags attribute to config - self._configs[config_key] = image_config - - self._save_yaml() - - # set current version release - def _set_version_release(self, v, c): - info = self.alpine.version_info(v) - c.put('release', info['release']) - c.put('end_of_life', info['end_of_life']) - c.put('release_notes', info['notes']) - - # release is also appended to name & description arrays - c.put('name', [c.release]) - c.put('description', [c.release]) - - # update current config status - def refresh_state(self, step, only=[], skip=[], revise=False): - self.log.info('Refreshing State') - has_actions = False - for ic in self._configs.values(): - # clear away any previous actions - if hasattr(ic, 'actions'): - delattr(ic, 'actions') - - dim_keys = set(ic.config_key.split('-')) - if only and len(set(only) & dim_keys) != len(only): - self.log.debug("%s SKIPPED, doesn't match --only", ic.config_key) - continue - - if skip and len(set(skip) & dim_keys) > 0: - self.log.debug('%s SKIPPED, matches --skip', ic.config_key) - continue - - ic.refresh_state(step, revise) - if not has_actions and len(ic.actions): - has_actions = True - - # re-save with updated actions - self._save_yaml() - return has_actions +from image_storage import ImageStorage, run +from image_tags import ImageTags class ImageConfig(): @@ -194,10 +25,14 @@ class ImageConfig(): OPTIONAL_TAGS = [ 'built', 'uploaded', 'imported', 'import_id', 'import_region', 'published', 'released' ] + STEPS = [ + 'local', 'upload', 'import', 'publish', 'release' + ] def __init__(self, config_key, obj={}, log=None, yaml=None): self._log = log self._yaml = yaml + self._storage = None self.config_key = str(config_key) tags = obj.pop('tags', None) self.__dict__ |= self._deep_dict(obj) @@ -226,13 +61,9 @@ class ImageConfig(): return Path('work/images') / self.cloud / self.image_key @property - def local_path(self): + def local_image(self): return self.local_dir / ('image.qcow2') - @property - def artifacts_yaml(self): - return self.local_dir / 'artifacts.yaml' - @property def image_name(self): return self.name.format(**self.__dict__) @@ -250,13 +81,9 @@ class ImageConfig(): return self.local_dir / self.image_file @property - def image_metadata_file(self): + def metadata_file(self): return '.'.join([self.image_name, 'yaml']) - @property - def image_metadata_path(self): - return self.local_dir / self.image_metadata_file - def region_url(self, region, image_id): return self.cloud_region_url.format(region=region, image_id=image_id, **self.__dict__) @@ -285,7 +112,7 @@ class ImageConfig(): if self.__dict__.get(k, None): t[k] = self.__dict__[k] - return Tags(t) + return ImageTags(t) # recursively convert a ConfigTree object to a dict object def _deep_dict(self, layer): @@ -325,6 +152,7 @@ class ImageConfig(): self.name = '-'.join(self.name) self.description = ' '.join(self.description) self._resolve_motd() + self._resolve_urls() self._stringify_repos() self._stringify_packages() self._stringify_services() @@ -350,6 +178,13 @@ class ImageConfig(): self.motd = '\n\n'.join(motd.values()).format(**self.__dict__) + def _resolve_urls(self): + if 'storage_url' in self.__dict__: + self.storage_url = self.storage_url.format(v_version=self.v_version, **self.__dict__) + + if 'download_url' in self.__dict__: + self.download_url = self.download_url.format(v_version=self.v_version, **self.__dict__) + def _stringify_repos(self): # stringify repos map # : # @ enabled @@ -420,28 +255,60 @@ class ImageConfig(): for m, v in self.__dict__[d].items() ))) + def _is_step_or_earlier(self, s, step): + log = self._log + if step == 'state': + return True + + if step not in self.STEPS: + return False + + return self.STEPS.index(s) <= self.STEPS.index(step) + + # TODO: this needs to be sorted out for 'upload' and 'release' steps def refresh_state(self, step, revise=False): log = self._log actions = {} revision = 0 - remote_image = clouds.latest_build_image(self) - log.debug('\n%s', remote_image) step_state = step == 'state' + step_rollback = step == 'rollback' + undo = {} - # enable actions based on the specified step - if step in ['local', 'import', 'publish', 'state']: - actions['build'] = True + # enable initial set of possible actions based on specified step + for s in self.STEPS: + if self._is_step_or_earlier(s, step): + actions[s] = True - if step in ['import', 'publish', 'state']: - actions['import'] = True + # pick up any updated image metadata + self.load_metadata() - if step in ['publish', 'state']: - # we will resolve publish destinations (if any) later - actions['publish'] = True + # TODO: check storage and/or cloud - use this instead of remote_image + # latest_revision = self.get_latest_revision() + + if (step_rollback or revise) and self.local_image.exists(): + undo['local'] = True + + + + if step_rollback: + if self.local_image.exists(): + undo['local'] = True + + if not self.published or self.released: + if self.uploaded: + undo['upload'] = True + + if self.imported: + undo['import'] = True + + # TODO: rename to 'remote_tags'? + # if we load remote tags into state automatically, shouldn't that info already be in self? + remote_image = clouds.get_latest_imported_tags(self) + log.debug('\n%s', remote_image) if revise: - if self.local_path.exists(): + if self.local_image.exists(): # remove previously built local image artifacts log.warning('%s existing local image dir %s', 'Would remove' if step_state else 'Removing', @@ -449,13 +316,13 @@ class ImageConfig(): if not step_state: shutil.rmtree(self.local_dir) - if remote_image and remote_image.published: + if remote_image and remote_image.get('published', None): log.warning('%s image revision for %s', 'Would bump' if step_state else 'Bumping', self.image_key) revision = int(remote_image.revision) + 1 - elif remote_image and remote_image.imported: + elif remote_image and remote_image.get('imported', None): # remove existing imported (but unpublished) image log.warning('%s unpublished remote image %s', 'Would remove' if step_state else 'Removing', @@ -466,20 +333,24 @@ class ImageConfig(): remote_image = None elif remote_image: - if remote_image.imported: - # already imported, don't build/import again + if remote_image.get('imported', None): + # already imported, don't build/upload/import again log.debug('%s - already imported', self.image_key) - actions.pop('build', None) + actions.pop('local', None) + actions.pop('upload', None) actions.pop('import', None) - if remote_image.published: + if remote_image.get('published', None): # NOTE: re-publishing can update perms or push to new regions log.debug('%s - already published', self.image_key) - if self.local_path.exists(): + if self.local_image.exists(): # local image's already built, don't rebuild log.debug('%s - already locally built', self.image_key) - actions.pop('build', None) + actions.pop('local', None) + + else: + self.built = None # merge remote_image data into image state if remote_image: @@ -488,96 +359,106 @@ class ImageConfig(): else: self.__dict__ |= { 'revision': revision, + 'uploaded': None, 'imported': None, 'import_id': None, 'import_region': None, 'published': None, + 'artifacts': None, + 'released': None, } - # update artifacts, if we've got 'em - if self.artifacts_yaml.exists(): - self.artifacts = self.yaml.load(self.artifacts_yaml) - - else: - self.artifacts = None + # remove remaining actions not possible based on specified step + for s in self.STEPS: + if not self._is_step_or_earlier(s, step): + actions.pop(s, None) self.actions = list(actions) log.info('%s/%s = %s', self.cloud, self.image_name, self.actions) self.state_updated = datetime.utcnow().isoformat() - def _run(self, cmd, errmsg=None, errvals=[]): - log = self._log - p = Popen(cmd, stdout=PIPE, stdin=PIPE, encoding='utf8') - out, err = p.communicate() - if p.returncode: - if log: - if errmsg: - log.error(errmsg, *errvals) + @property + def storage(self): + if self._storage is None: + self._storage = ImageStorage(self.local_dir, self.storage_url, log=self._log) - log.error('COMMAND: %s', ' '.join(cmd)) - log.error('EXIT: %d', p.returncode) - log.error('STDOUT:\n%s', out) - log.error('STDERR:\n%s', err) - - raise RuntimeError - - return out, err + return self._storage def _save_checksum(self, file): self._log.info("Calculating checksum for '%s'", file) sha256_hash = hashlib.sha256() + sha512_hash = hashlib.sha512() with open(file, 'rb') as f: for block in iter(lambda: f.read(4096), b''): sha256_hash.update(block) + sha512_hash.update(block) with open(str(file) + '.sha256', 'w') as f: print(sha256_hash.hexdigest(), file=f) + with open(str(file) + '.sha512', 'w') as f: + print(sha512_hash.hexdigest(), file=f) + # convert local QCOW2 to format appropriate for a cloud def convert_image(self): - self._log.info('Converting %s to %s', self.local_path, self.image_path) - self._run( - self.CONVERT_CMD[self.image_format] + [self.local_path, self.image_path], - errmsg='Unable to convert %s to %s', errvals=[self.local_path, self.image_path], + self._log.info('Converting %s to %s', self.local_image, self.image_path) + run( + self.CONVERT_CMD[self.image_format] + [self.local_image, self.image_path], + log=self._log, errmsg='Unable to convert %s to %s', + errvals=[self.local_image, self.image_path] ) self._save_checksum(self.image_path) self.built = datetime.utcnow().isoformat() - def save_metadata(self, upload=True): + def upload_image(self): + self.storage.store( + self.image_file, + self.image_file + '.sha256', + self.image_file + '.sha512' + ) + self.uploaded = datetime.utcnow().isoformat() + + def save_metadata(self, action): os.makedirs(self.local_dir, exist_ok=True) self._log.info('Saving image metadata') - self._yaml.dump(dict(self.tags), self.image_metadata_path) - self._save_checksum(self.image_metadata_path) + # TODO: save metadata updated timestamp as metadata? + # TODO: def self.metadata to return what we consider metadata? + metadata = dict(self.tags) + self.metadata_updated = datetime.utcnow().isoformat() + metadata |= { + 'artifacts': self._get('artifacts', None), + 'metadata_updated': self.metadata_updated + } + metadata_path = self.local_dir / self.metadata_file + self._yaml.dump(metadata, metadata_path) + self._save_checksum(metadata_path) + if action != 'local' and self.storage: + self.storage.store( + self.metadata_file, + self.metadata_file + '.sha256', + self.metadata_file + '.sha512' + ) + def load_metadata(self): + # TODO: what if we have fresh configs, but the image is already uploaded/imported? + # we'll need to get revision first somehow + if 'revision' not in self.__dict__: + return -class DictObj(dict): + # TODO: revision = '*' for now - or only if unknown? - def __getattr__(self, key): - return self[key] + # get a list of local matching -r*.yaml? + metadata_path = self.local_dir / self.metadata_file + if metadata_path.exists(): + self._log.info('Loading image metadata from %s', metadata_path) + self.__dict__ |= self._yaml.load(metadata_path).items() - def __setattr__(self, key, value): - self[key] = value + # get a list of storage matching -r*.yaml + #else: + # retrieve metadata (and image?) from storage_url + # else: + # retrieve metadata from imported image - def __delattr__(self, key): - del self[key] - - -class Tags(DictObj): - - def __init__(self, d={}, from_list=None, key_name='Key', value_name='Value'): - for key, value in d.items(): - self.__setattr__(key, value) - - if from_list: - self.from_list(from_list, key_name, value_name) - - def __setattr__(self, key, value): - self[key] = str(value) - - def as_list(self, key_name='Key', value_name='Value'): - return [{key_name: k, value_name: v} for k, v in self.items()] - - def from_list(self, list=[], key_name='Key', value_name='Value'): - for tag in list: - self.__setattr__(tag[key_name], tag[value_name]) + # if there's no stored metadata, we are in transition, + # get a list of imported images matching -r*.yaml diff --git a/image_config_manager.py b/image_config_manager.py new file mode 100644 index 0000000..704aaa2 --- /dev/null +++ b/image_config_manager.py @@ -0,0 +1,178 @@ +# vim: ts=4 et: + +import itertools +import logging +import pyhocon + +from copy import deepcopy +from datetime import datetime +from pathlib import Path +from ruamel.yaml import YAML + +from image_config import ImageConfig + + + +class ImageConfigManager(): + + def __init__(self, conf_path, yaml_path, log=__name__, alpine=None): + self.conf_path = Path(conf_path) + self.yaml_path = Path(yaml_path) + self.log = logging.getLogger(log) + self.alpine = alpine + + self.now = datetime.utcnow() + self._configs = {} + + self.yaml = YAML() + self.yaml.register_class(ImageConfig) + self.yaml.explicit_start = True + # hide !ImageConfig tag from Packer + self.yaml.representer.org_represent_mapping = self.yaml.representer.represent_mapping + self.yaml.representer.represent_mapping = self._strip_yaml_tag_type + + # load resolved YAML, if exists + if self.yaml_path.exists(): + self._load_yaml() + else: + self._resolve() + + def get(self, key=None): + if not key: + return self._configs + + return self._configs[key] + + # load already-resolved YAML configs, restoring ImageConfig objects + def _load_yaml(self): + self.log.info('Loading existing %s', self.yaml_path) + for key, config in self.yaml.load(self.yaml_path).items(): + self._configs[key] = ImageConfig(key, config, log=self.log, yaml=self.yaml) + + # save resolved configs to YAML + def _save_yaml(self): + self.log.info('Saving %s', self.yaml_path) + self.yaml.dump(self._configs, self.yaml_path) + + # hide !ImageConfig tag from Packer + def _strip_yaml_tag_type(self, tag, mapping, flow_style=None): + if tag == '!ImageConfig': + tag = u'tag:yaml.org,2002:map' + + return self.yaml.representer.org_represent_mapping(tag, mapping, flow_style=flow_style) + + # resolve from HOCON configs + def _resolve(self): + self.log.info('Generating configs.yaml in work environment') + cfg = pyhocon.ConfigFactory.parse_file(self.conf_path) + # set version releases + for v, vcfg in cfg.Dimensions.version.items(): + # version keys are quoted to protect dots + self._set_version_release(v.strip('"'), vcfg) + + dimensions = list(cfg.Dimensions.keys()) + self.log.debug('dimensions: %s', dimensions) + + for dim_keys in (itertools.product(*cfg['Dimensions'].values())): + config_key = '-'.join(dim_keys).replace('"', '') + + # dict of dimension -> dimension_key + dim_map = dict(zip(dimensions, dim_keys)) + + # replace version with release, and make image_key from that + release = cfg.Dimensions.version[dim_map['version']].release + (rel_map := dim_map.copy())['version'] = release + image_key = '-'.join(rel_map.values()) + + image_config = ImageConfig( + config_key, + { + 'image_key': image_key, + 'release': release + } | dim_map, + log=self.log, + yaml=self.yaml + ) + + # merge in the Default config + image_config._merge(cfg.Default) + skip = False + # merge in each dimension key's configs + for dim, dim_key in dim_map.items(): + dim_cfg = deepcopy(cfg.Dimensions[dim][dim_key]) + + image_config._merge(dim_cfg) + + # now that we're done with ConfigTree/dim_cfg, remove " from dim_keys + dim_keys = set(k.replace('"', '') for k in dim_keys) + + # WHEN blocks inside WHEN blocks are considered "and" operations + while (when := image_config._pop('WHEN', None)): + for when_keys, when_conf in when.items(): + # WHEN keys with spaces are considered "or" operations + if len(set(when_keys.split(' ')) & dim_keys) > 0: + image_config._merge(when_conf) + + exclude = image_config._pop('EXCLUDE', None) + if exclude and set(exclude) & set(dim_keys): + self.log.debug('%s SKIPPED, %s excludes %s', config_key, dim_key, exclude) + skip = True + break + + if eol := image_config._get('end_of_life', None): + if self.now > datetime.fromisoformat(eol): + self.log.warning('%s SKIPPED, %s end_of_life %s', config_key, dim_key, eol) + skip = True + break + + if skip is True: + continue + + # merge in the Mandatory configs at the end + image_config._merge(cfg.Mandatory) + + # clean stuff up + image_config._normalize() + image_config.qemu['iso_url'] = self.alpine.virt_iso_url(arch=image_config.arch) + + # we've resolved everything, add tags attribute to config + self._configs[config_key] = image_config + + self._save_yaml() + + # set current version release + def _set_version_release(self, v, c): + info = self.alpine.version_info(v) + c.put('release', info['release']) + c.put('end_of_life', info['end_of_life']) + c.put('release_notes', info['notes']) + + # release is also appended to name & description arrays + c.put('name', [c.release]) + c.put('description', [c.release]) + + # update current config status + def refresh_state(self, step, only=[], skip=[], revise=False): + self.log.info('Refreshing State') + has_actions = False + for ic in self._configs.values(): + # clear away any previous actions + if hasattr(ic, 'actions'): + delattr(ic, 'actions') + + dim_keys = set(ic.config_key.split('-')) + if only and len(set(only) & dim_keys) != len(only): + self.log.debug("%s SKIPPED, doesn't match --only", ic.config_key) + continue + + if skip and len(set(skip) & dim_keys) > 0: + self.log.debug('%s SKIPPED, matches --skip', ic.config_key) + continue + + ic.refresh_state(step, revise) + if not has_actions and len(ic.actions): + has_actions = True + + # re-save with updated actions + self._save_yaml() + return has_actions diff --git a/image_storage.py b/image_storage.py new file mode 100644 index 0000000..c4ea602 --- /dev/null +++ b/image_storage.py @@ -0,0 +1,183 @@ +# vim: ts=4 et: + +import shutil +import os + +from glob import glob +from pathlib import Path +from subprocess import Popen, PIPE +from urllib.parse import urlparse + +from image_tags import DictObj + + +def run(cmd, log, errmsg=None, errvals=[]): + # ensure command and error values are lists of strings + cmd = [str(c) for c in cmd] + errvals = [str(ev) for ev in errvals] + + log.debug('COMMAND: %s', ' '.join(cmd)) + p = Popen(cmd, stdout=PIPE, stdin=PIPE, encoding='utf8') + out, err = p.communicate() + if p.returncode: + if errmsg: + log.error(errmsg, *errvals) + + log.error('COMMAND: %s', ' '.join(cmd)) + log.error('EXIT: %d', p.returncode) + log.error('STDOUT:\n%s', out) + log.error('STDERR:\n%s', err) + raise RuntimeError + + return out, err + + +class ImageStorage(): + + def __init__(self, local, storage_url, log): + self.log = log + self.local = local + self.url = storage_url.removesuffix('/') + url = urlparse(self.url) + if url.scheme not in ['', 'file', 'ssh']: + self.log.error('Storage with "%s" scheme is unsupported', url.scheme) + raise RuntimeError + + if url.scheme in ['', 'file']: + self.scheme = 'file' + self.remote = Path(url.netloc + url.path).expanduser() + + else: + self.scheme = 'ssh' + self.host = url.hostname + self.remote = Path(url.path[1:]) # drop leading / -- use // for absolute path + self.ssh = DictObj({ + 'port': ['-p', url.port] if url.port else [], + 'user': ['-l', url.username] if url.username else [], + }) + self.scp = DictObj({ + 'port': ['-P', url.port] if url.port else [], + 'user': url.username + '@' if url.username else '', + }) + + def store(self, *files): + log = self.log + if not files: + log.debug('No files to store') + return + + src = self.local + dest = self.remote + if self.scheme == 'file': + dest.mkdir(parents=True, exist_ok=True) + for file in files: + log.info('Storing %s', dest / file) + shutil.copy2(src / file, dest / file) + + return + + url = self.url + host = self.host + ssh = self.ssh + scp = self.scp + run( + ['ssh'] + ssh.port + ssh.user + [host, 'mkdir', '-p', dest], + log=log, errmsg='Unable to ensure existence of %s', errvals=[url] + ) + src_files = [] + for file in files: + log.info('Storing %s', url + '/' + file) + src_files.append(src / file) + + run( + ['scp'] + scp.port + src_files + [scp.user + ':'.join([host, str(dest)])], + log=log, errmsg='Failed to store files' + ) + + def retrieve(self, *files): + log = self.log + if not files: + log.debug('No files to retrieve') + return + + src = self.remote + dest = self.local + dest.mkdir(parents=True, exist_ok=True) + if self.scheme == 'file': + for file in files: + log.info('Retrieving %s', src / file) + shutil.copy2(src / file, dest / file) + + return + + url = self.url + host = self.host + scp = self.scp + src_files = [] + for file in files: + log.info('Retrieving %s', url + '/' + file) + src_files.append(scp.user + ':'.join([host, str(src / file)])) + + run( + ['scp'] + scp.port + src_files + [dest], + log=log, errmsg='Failed to retrieve files' + ) + + # TODO: optional files=[]? + def list(self, match=None): + log = self.log + path = self.remote + if not match: + match = '*' + + files = [] + if self.scheme == 'file': + path.mkdir(parents=True, exist_ok=True) + log.info('Listing of %s files in %s', match, path) + files = sorted(glob(str(path / match)), key=os.path.getmtime, reverse=True) + + else: + url = self.url + host = self.host + ssh = self.ssh + log.info('Listing %s files at %s', match, url) + run( + ['ssh'] + ssh.port + ssh.user + [host, 'mkdir', '-p', path], + log=log, errmsg='Unable to create path' + ) + out, _ = run( + ['ssh'] + ssh.port + ssh.user + [host, 'ls', '-1drt', path / match], + log=log, errmsg='Failed to list files' + ) + files = out.splitlines() + + return [os.path.basename(f) for f in files] + + def remove(self, files): + log = self.log + if not files: + log.debug('No files to remove') + return + + dest = self.remote + if self.scheme == 'file': + for file in files: + path = dest / file + log.info('Removing %s', path) + if path.exists(): + path.unlink() + + return + + url = self.url + host = self.host + ssh = self.ssh + dest_files = [] + for file in files: + log.info('Removing %s', url + '/' + file) + dest_files.append(dest / file) + + run( + ['ssh'] + ssh.port + ssh.user + [host, 'rm', '-f'] + dest_files, + log=log, errmsg='Failed to remove files' + ) diff --git a/image_tags.py b/image_tags.py new file mode 100644 index 0000000..3d7219a --- /dev/null +++ b/image_tags.py @@ -0,0 +1,32 @@ +# vim: ts=4 et: + +class DictObj(dict): + + def __getattr__(self, key): + return self[key] + + def __setattr__(self, key, value): + self[key] = value + + def __delattr__(self, key): + del self[key] + + +class ImageTags(DictObj): + + def __init__(self, d={}, from_list=None, key_name='Key', value_name='Value'): + for key, value in d.items(): + self.__setattr__(key, value) + + if from_list: + self.from_list(from_list, key_name, value_name) + + def __setattr__(self, key, value): + self[key] = str(value) + + def as_list(self, key_name='Key', value_name='Value'): + return [{key_name: k, value_name: v} for k, v in self.items()] + + def from_list(self, list=[], key_name='Key', value_name='Value'): + for tag in list: + self.__setattr__(tag[key_name], tag[value_name]) diff --git a/scripts/setup-cloudinit b/scripts/setup-cloudinit index 9a01b0e..dd953c0 100755 --- a/scripts/setup-cloudinit +++ b/scripts/setup-cloudinit @@ -26,6 +26,18 @@ case "$CLOUD" in aws) DATASOURCE="Ec2" ;; + nocloud) + DATASOURCE="NoCloud" + ;; + azure) + DATASOURCE="Azure" + ;; + gcp) + DATASOURCE="GCE" + ;; + oci) + DATASOURCE="Oracle" + ;; *) echo "Unsupported Cloud '$CLOUD'" >&2 exit 1