Merge commit 'ba79bc7f95ac2f6c01c12534323579c9808c868a'
This commit is contained in:
commit
f5d1cb1a9a
|
@ -302,3 +302,8 @@ access is granted, `true` or `false`/`null`.
|
|||
Determines where images should be published. The key is the region
|
||||
identifier (or `ALL`), and the value is whether or not to publish to that
|
||||
region, `true` or `false`/`null`.
|
||||
|
||||
### `encrypted` string
|
||||
|
||||
Determines whether the image will be encrypted when imported and published.
|
||||
Currently, only the **aws** cloud module supports this.
|
||||
|
|
|
@ -94,31 +94,39 @@ _We manage the credentials for publishing official Alpine images with an
|
|||
### The `build` Script
|
||||
|
||||
```
|
||||
usage: build [-h] [--debug] [--clean] [--custom DIR [DIR ...]]
|
||||
[--skip KEY [KEY ...]] [--only KEY [KEY ...]] [--revise] [--use-broker]
|
||||
[--no-color] [--parallel N] [--vars FILE [FILE ...]]
|
||||
{configs,state,local,import,publish}
|
||||
usage: build [-h] [--debug] [--clean] [--pad-uefi-bin-arch ARCH [ARCH ...]]
|
||||
[--custom DIR [DIR ...]] [--skip KEY [KEY ...]] [--only KEY [KEY ...]]
|
||||
[--revise] [--use-broker] [--no-color] [--parallel N]
|
||||
[--vars FILE [FILE ...]]
|
||||
{configs,state,rollback,local,upload,import,publish,release}
|
||||
|
||||
positional arguments: (build up to and including this step)
|
||||
configs resolve image build configuration
|
||||
state refresh current image build state
|
||||
rollback remove existing local/uploaded/imported images if un-published/released
|
||||
local build images locally
|
||||
import import local images to cloud provider default region
|
||||
publish set image permissions and publish to cloud regions
|
||||
upload upload images and metadata to storage
|
||||
* import import local images to cloud provider default region (*)
|
||||
* publish set image permissions and publish to cloud regions (*)
|
||||
release mark images as being officially relased
|
||||
|
||||
(*) may not apply to or be implemented for all cloud providers
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
--debug enable debug output
|
||||
--clean start with a clean work environment
|
||||
--pad-uefi-bin-arch ARCH [ARCH ...]
|
||||
pad out UEFI firmware to 64 MiB ('aarch64')
|
||||
--custom DIR [DIR ...] overlay custom directory in work environment
|
||||
--skip KEY [KEY ...] skip variants with dimension key(s)
|
||||
--only KEY [KEY ...] only variants with dimension key(s)
|
||||
--revise remove existing local/imported image, or bump
|
||||
revision and rebuild if published
|
||||
--revise remove existing local/uploaded/imported images if
|
||||
un-published/released, or bump revision and rebuild
|
||||
--use-broker use the identity broker to get credentials
|
||||
--no-color turn off Packer color output
|
||||
--parallel N build N images in parallel (default: 1)
|
||||
--vars FILE [FILE ...] supply Packer with -vars-file(s)
|
||||
--parallel N build N images in parallel
|
||||
--vars FILE [FILE ...] supply Packer with -vars-file(s) (default: [])
|
||||
```
|
||||
|
||||
The `build` script will automatically create a `work/` directory containing a
|
||||
|
@ -145,21 +153,32 @@ it to `work/images.yaml`, if it does not already exist.
|
|||
The `state` step always checks the current state of the image builds,
|
||||
determines what actions need to be taken, and updates `work/images.yaml`. A
|
||||
subset of image builds can be targeted by using the `--skip` and `--only`
|
||||
arguments. The `--revise` argument indicates that any _unpublished_ local
|
||||
or imported images should be removed and rebuilt; as _published_ images can't
|
||||
be removed, `--revise` instead increments the _`revision`_ value to rebuild
|
||||
new images.
|
||||
arguments.
|
||||
|
||||
`local`, `import`, and `publish` steps are orchestrated by Packer. By default,
|
||||
each image will be processed serially; providing the `--parallel` argument with
|
||||
a value greater than 1 will parallelize operations. The degree to which you
|
||||
can parallelze `local` image builds will depend on the local build hardware --
|
||||
as QEMU virtual machines are launched for each image being built. Image
|
||||
`import` and `publish` steps are much more lightweight, and can support higher
|
||||
parallelism.
|
||||
The `rollback` step, when used with `--revise` argument indicates that any
|
||||
_unpublished_ and _unreleased_ local, imported, or uploaded images should be
|
||||
removed and rebuilt.
|
||||
|
||||
As _published_ and _released_ images can't be removed, `--revise` can be used
|
||||
with `configs` or `state` to increment the _`revision`_ value to rebuild newly
|
||||
revised images.
|
||||
|
||||
`local`, `upload`, `import`, `publish`, and `release` steps are orchestrated by
|
||||
Packer. By default, each image will be processed serially; providing the
|
||||
`--parallel` argument with a value greater than 1 will parallelize operations.
|
||||
The degree to which you can parallelze `local` image builds will depend on the
|
||||
local build hardware -- as QEMU virtual machines are launched for each image
|
||||
being built. Image `upload`, `import`, `publish`, and `release` steps are much
|
||||
more lightweight, and can support higher parallelism.
|
||||
|
||||
The `local` step builds local images with QEMU, for those that are not already
|
||||
built locally or have already been imported.
|
||||
built locally or have already been imported. Images are converted to formats
|
||||
amenable for import into the cloud provider (if necessary) and checksums are
|
||||
generated.
|
||||
|
||||
The `upload` step uploads the local image, checksum, and metadata to the
|
||||
defined `storage_url`. The `import`, `publish`, and `release` steps will
|
||||
also upload updated image metadata.
|
||||
|
||||
The `import` step imports the local images into the cloud providers' default
|
||||
regions, unless they've already been imported. At this point the images are
|
||||
|
@ -170,10 +189,16 @@ if they haven't already been copied there. This step will always update
|
|||
image permissions, descriptions, tags, and deprecation date (if applicable)
|
||||
in all regions where the image has been published.
|
||||
|
||||
***NOTE:*** The `import` and `publish` steps are skipped for those cloud
|
||||
providers where this does not make sense (i.e. NoCloud) or for those which
|
||||
it has not yet been coded.
|
||||
|
||||
The `release` step marks the images as being fully released.
|
||||
|
||||
### The `cloud_helper.py` Script
|
||||
|
||||
This script is meant to be called only by Packer from its `post-processor`
|
||||
block for image `import` and `publish` steps.
|
||||
block.
|
||||
|
||||
----
|
||||
## Build Configuration
|
||||
|
|
|
@ -31,7 +31,7 @@ variable "qemu" {
|
|||
locals {
|
||||
# possible actions for the post-processor
|
||||
actions = [
|
||||
"build", "upload", "import", "publish", "release"
|
||||
"local", "upload", "import", "publish", "release"
|
||||
]
|
||||
|
||||
debug_arg = var.DEBUG == 0 ? "" : "--debug"
|
||||
|
@ -85,7 +85,7 @@ build {
|
|||
# QEMU builder
|
||||
dynamic "source" {
|
||||
for_each = { for b, c in local.configs:
|
||||
b => c if contains(c.actions, "build")
|
||||
b => c if contains(c.actions, "local")
|
||||
}
|
||||
iterator = B
|
||||
labels = ["qemu.alpine"] # links us to the base source
|
||||
|
@ -112,10 +112,10 @@ build {
|
|||
}
|
||||
}
|
||||
|
||||
# Null builder (don't build, but we might import and/or publish)
|
||||
# Null builder (don't build, but we might do other actions)
|
||||
dynamic "source" {
|
||||
for_each = { for b, c in local.configs:
|
||||
b => c if !contains(c.actions, "build")
|
||||
b => c if !contains(c.actions, "local")
|
||||
}
|
||||
iterator = B
|
||||
labels = ["null.alpine"]
|
||||
|
@ -129,7 +129,7 @@ build {
|
|||
# install setup files
|
||||
dynamic "provisioner" {
|
||||
for_each = { for b, c in local.configs:
|
||||
b => c if contains(c.actions, "build")
|
||||
b => c if contains(c.actions, "local")
|
||||
}
|
||||
iterator = B
|
||||
labels = ["file"]
|
||||
|
@ -144,7 +144,7 @@ build {
|
|||
# run setup scripts
|
||||
dynamic "provisioner" {
|
||||
for_each = { for b, c in local.configs:
|
||||
b => c if contains(c.actions, "build")
|
||||
b => c if contains(c.actions, "local")
|
||||
}
|
||||
iterator = B
|
||||
labels = ["shell"]
|
||||
|
|
|
@ -43,7 +43,7 @@ from urllib.request import urlopen
|
|||
|
||||
import clouds
|
||||
from alpine import Alpine
|
||||
from image_configs import ImageConfigManager
|
||||
from image_config_manager import ImageConfigManager
|
||||
|
||||
|
||||
### Constants & Variables
|
||||
|
@ -267,8 +267,7 @@ if args.use_broker:
|
|||
### Setup Configs
|
||||
|
||||
latest = alpine.version_info()
|
||||
log.info('Latest Alpine version %s and release %s', latest['version'], latest['release'])
|
||||
log.info('Latest Alpine release notes: %s', latest['notes'])
|
||||
log.info('Latest Alpine version %s, release %s, and notes: %s', latest['version'], latest['release'], latest['notes'])
|
||||
if args.clean:
|
||||
clean_work()
|
||||
|
||||
|
@ -340,6 +339,7 @@ if p.returncode != 0:
|
|||
log.info('Packer Completed')
|
||||
|
||||
# update final state in work/images.yaml
|
||||
# TODO: do we need to do all of this or just save all the image_configs?
|
||||
image_configs.refresh_state(
|
||||
step='final',
|
||||
only=args.only,
|
||||
|
|
|
@ -29,16 +29,15 @@ if os.path.join(os.getcwd(), venv_args[0]) != sys.executable:
|
|||
|
||||
import argparse
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
import clouds
|
||||
from image_configs import ImageConfigManager
|
||||
from image_config_manager import ImageConfigManager
|
||||
|
||||
|
||||
### Constants & Variables
|
||||
|
||||
ACTIONS = ['build', 'upload', 'import', 'publish', 'release']
|
||||
ACTIONS = ['local', 'upload', 'import', 'publish', 'release']
|
||||
LOGFORMAT = '%(name)s - %(levelname)s - %(message)s'
|
||||
|
||||
|
||||
|
@ -78,26 +77,22 @@ yaml.explicit_start = True
|
|||
for image_key in args.image_keys:
|
||||
image_config = configs.get(image_key)
|
||||
|
||||
if args.action == 'build':
|
||||
if args.action == 'local':
|
||||
image_config.convert_image()
|
||||
|
||||
elif args.action == 'upload':
|
||||
# TODO: image_config.upload_image()
|
||||
pass
|
||||
if image_config.storage:
|
||||
image_config.upload_image()
|
||||
|
||||
elif args.action == 'import':
|
||||
clouds.import_image(image_config)
|
||||
|
||||
elif args.action == 'publish':
|
||||
# TODO: we should probably always ensure the directory exists
|
||||
os.makedirs(image_config.local_dir, exist_ok=True)
|
||||
# TODO: save artifacts to image_config itself
|
||||
artifacts = clouds.publish_image(image_config)
|
||||
yaml.dump(artifacts, image_config.artifacts_yaml)
|
||||
clouds.publish_image(image_config)
|
||||
|
||||
elif args.action == 'release':
|
||||
pass
|
||||
# TODO: image_config.release_image() - configurable steps to take on remote host
|
||||
|
||||
# save per-image metadata
|
||||
image_config.save_metadata(upload=(False if args.action =='build' else True))
|
||||
image_config.save_metadata(args.action)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# vim: ts=4 et:
|
||||
|
||||
from . import aws # , oci, gcp, azure
|
||||
from . import aws, nocloud, azure, gcp, oci
|
||||
|
||||
ADAPTERS = {}
|
||||
|
||||
|
@ -12,7 +12,13 @@ def register(*mods):
|
|||
ADAPTERS[cloud] = p
|
||||
|
||||
|
||||
register(aws) # , oci, azure, gcp)
|
||||
register(
|
||||
aws, # well-tested and fully supported
|
||||
nocloud, # beta, but supported
|
||||
azure, # alpha, needs testing, lacks import and publish
|
||||
gcp, # alpha, needs testing, lacks import and publish
|
||||
oci, # alpha, needs testing, lacks import and publish
|
||||
)
|
||||
|
||||
|
||||
# using a credential provider is optional, set across all adapters
|
||||
|
@ -25,8 +31,9 @@ def set_credential_provider(debug=False):
|
|||
|
||||
### forward to the correct adapter
|
||||
|
||||
def latest_build_image(config):
|
||||
return ADAPTERS[config.cloud].latest_build_image(
|
||||
# TODO: latest_imported_tags(...)
|
||||
def get_latest_imported_tags(config):
|
||||
return ADAPTERS[config.cloud].get_latest_imported_tags(
|
||||
config.project,
|
||||
config.image_key
|
||||
)
|
||||
|
|
|
@ -4,13 +4,13 @@
|
|||
import logging
|
||||
import hashlib
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
from datetime import datetime
|
||||
from subprocess import run
|
||||
|
||||
from .interfaces.adapter import CloudAdapterInterface
|
||||
from image_configs import Tags, DictObj
|
||||
from image_tags import DictObj, ImageTags
|
||||
|
||||
|
||||
class AWSCloudAdapter(CloudAdapterInterface):
|
||||
|
@ -38,7 +38,7 @@ class AWSCloudAdapter(CloudAdapterInterface):
|
|||
try:
|
||||
import boto3
|
||||
except ModuleNotFoundError:
|
||||
run(['work/bin/pip', 'install', '-U', 'boto3'])
|
||||
subprocess.run(['work/bin/pip', 'install', '-U', 'boto3'])
|
||||
import boto3
|
||||
|
||||
self._sdk = boto3
|
||||
|
@ -91,19 +91,20 @@ class AWSCloudAdapter(CloudAdapterInterface):
|
|||
ec2r.images.filter(**req), key=lambda k: k.creation_date, reverse=True)
|
||||
|
||||
# necessary cloud-agnostic image info
|
||||
# TODO: still necessary? maybe just incoroporate into new latest_imported_tags()?
|
||||
def _image_info(self, i):
|
||||
tags = Tags(from_list=i.tags)
|
||||
tags = ImageTags(from_list=i.tags)
|
||||
return DictObj({k: tags.get(k, None) for k in self.IMAGE_INFO})
|
||||
|
||||
# get the latest imported image for a given build name
|
||||
def latest_build_image(self, project, image_key):
|
||||
# get the latest imported image's tags for a given build key
|
||||
def get_latest_imported_tags(self, project, image_key):
|
||||
images = self._get_images_with_tags(
|
||||
project=project,
|
||||
image_key=image_key,
|
||||
)
|
||||
if images:
|
||||
# first one is the latest
|
||||
return self._image_info(images[0])
|
||||
return ImageTags(from_list=images[0].tags)
|
||||
|
||||
return None
|
||||
|
||||
|
@ -228,7 +229,9 @@ class AWSCloudAdapter(CloudAdapterInterface):
|
|||
snapshot.delete()
|
||||
raise
|
||||
|
||||
return self._image_info(image)
|
||||
# update ImageConfig with imported tag values, minus special AWS 'Name'
|
||||
tags.pop('Name', None)
|
||||
ic.__dict__ |= tags
|
||||
|
||||
# delete an (unpublished) image
|
||||
def delete_image(self, image_id):
|
||||
|
@ -245,7 +248,7 @@ class AWSCloudAdapter(CloudAdapterInterface):
|
|||
# publish an image
|
||||
def publish_image(self, ic):
|
||||
log = logging.getLogger('publish')
|
||||
source_image = self.latest_build_image(
|
||||
source_image = self.get_latest_imported_tags(
|
||||
ic.project,
|
||||
ic.image_key,
|
||||
)
|
||||
|
@ -330,7 +333,7 @@ class AWSCloudAdapter(CloudAdapterInterface):
|
|||
if image.state == 'available':
|
||||
# tag image
|
||||
log.info('%s: Adding tags to %s', r, image.id)
|
||||
image_tags = Tags(from_list=image.tags)
|
||||
image_tags = ImageTags(from_list=image.tags)
|
||||
fresh = False
|
||||
if 'published' not in image_tags:
|
||||
fresh = True
|
||||
|
@ -387,7 +390,7 @@ class AWSCloudAdapter(CloudAdapterInterface):
|
|||
time.sleep(copy_wait)
|
||||
copy_wait = 30
|
||||
|
||||
return artifacts
|
||||
ic.artifacts = artifacts
|
||||
|
||||
|
||||
def register(cloud, cred_provider=None):
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
from .interfaces.adapter import CloudAdapterInterface
|
||||
|
||||
# NOTE: This stub allows images to be built locally and uploaded to storage,
|
||||
# but code for automated importing and publishing of images for this cloud
|
||||
# publisher has not yet been written.
|
||||
|
||||
class AzureCloudAdapter(CloudAdapterInterface):
|
||||
|
||||
def get_latest_imported_tags(self, project, image_key):
|
||||
return None
|
||||
|
||||
def import_image(self, ic):
|
||||
pass
|
||||
|
||||
def delete_image(self, config, image_id):
|
||||
pass
|
||||
|
||||
def publish_image(self, ic):
|
||||
pass
|
||||
|
||||
def register(cloud, cred_provider=None):
|
||||
return AzureCloudAdapter(cloud, cred_provider)
|
|
@ -0,0 +1,22 @@
|
|||
from .interfaces.adapter import CloudAdapterInterface
|
||||
|
||||
# NOTE: This stub allows images to be built locally and uploaded to storage,
|
||||
# but code for automated importing and publishing of images for this cloud
|
||||
# publisher has not yet been written.
|
||||
|
||||
class GCPCloudAdapter(CloudAdapterInterface):
|
||||
|
||||
def get_latest_imported_tags(self, project, image_key):
|
||||
return None
|
||||
|
||||
def import_image(self, ic):
|
||||
pass
|
||||
|
||||
def delete_image(self, config, image_id):
|
||||
pass
|
||||
|
||||
def publish_image(self, ic):
|
||||
pass
|
||||
|
||||
def register(cloud, cred_provider=None):
|
||||
return GCPCloudAdapter(cloud, cred_provider)
|
|
@ -27,7 +27,7 @@ class CloudAdapterInterface:
|
|||
def session(self, region=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def latest_build_image(self, project, image_key):
|
||||
def get_latest_imported_tags(self, project, image_key):
|
||||
raise NotImplementedError
|
||||
|
||||
def import_image(self, config):
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
from .interfaces.adapter import CloudAdapterInterface
|
||||
|
||||
# NOTE: NoCloud images are never imported or published because there's
|
||||
# no actual cloud provider associated with them.
|
||||
|
||||
class NoCloudAdapter(CloudAdapterInterface):
|
||||
|
||||
def get_latest_imported_tags(self, project, image_key):
|
||||
return None
|
||||
|
||||
def import_image(self, ic):
|
||||
pass
|
||||
|
||||
def delete_image(self, config, image_id):
|
||||
pass
|
||||
|
||||
def publish_image(self, ic):
|
||||
pass
|
||||
|
||||
def register(cloud, cred_provider=None):
|
||||
return NoCloudAdapter(cloud, cred_provider)
|
|
@ -0,0 +1,22 @@
|
|||
from .interfaces.adapter import CloudAdapterInterface
|
||||
|
||||
# NOTE: This stub allows images to be built locally and uploaded to storage,
|
||||
# but code for automated importing and publishing of images for this cloud
|
||||
# publisher has not yet been written.
|
||||
|
||||
class OCICloudAdapter(CloudAdapterInterface):
|
||||
|
||||
def get_latest_imported_tags(self, project, image_key):
|
||||
return None
|
||||
|
||||
def import_image(self, ic):
|
||||
pass
|
||||
|
||||
def delete_image(self, config, image_id):
|
||||
pass
|
||||
|
||||
def publish_image(self, ic):
|
||||
pass
|
||||
|
||||
def register(cloud, cred_provider=None):
|
||||
return OCICloudAdapter(cloud, cred_provider)
|
|
@ -40,9 +40,10 @@ Default {
|
|||
image_format = qcow2
|
||||
|
||||
# these paths are subject to change, as image downloads are developed
|
||||
storage_url = "ssh://tomalok@dev.alpinelinux.org/public_html/alpine-cloud-images/{v_version}/cloud/{cloud}"
|
||||
download_url = "https://dev.alpinelinux.org/~tomalok/alpine-cloud-images/{v_version}/cloud/{cloud}" # development
|
||||
#download_url = "https://dl-cdn.alpinelinux.org/alpine/{v_version}/cloud/{cloud}"
|
||||
storage_url = "ssh://tomalok@dev.alpinelinux.org/public_html/alpine-cloud-images/{v_version}/cloud/{cloud}/{arch}"
|
||||
#storage_url = "file://~jake/tmp/alpine-cloud-images/{v_version}/cloud/{cloud}/{arch}"
|
||||
download_url = "https://dev.alpinelinux.org/~tomalok/alpine-cloud-images/{v_version}/cloud/{cloud}/{arch}" # development
|
||||
#download_url = "https://dl-cdn.alpinelinux.org/alpine/{v_version}/cloud/{cloud}/{arch}"
|
||||
|
||||
# image access
|
||||
access.PUBLIC = true
|
||||
|
@ -73,7 +74,12 @@ Dimensions {
|
|||
cloudinit { include required("bootstrap/cloudinit.conf") }
|
||||
}
|
||||
cloud {
|
||||
aws { include required("cloud/aws.conf") }
|
||||
aws { include required("cloud/aws.conf") }
|
||||
nocloud { include required("cloud/nocloud.conf") }
|
||||
# these are considered "alpha"
|
||||
azure { include required("cloud/azure.conf") }
|
||||
gcp { include required("cloud/gcp.conf") }
|
||||
oci { include required("cloud/oci.conf") }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -88,4 +94,12 @@ Mandatory {
|
|||
|
||||
# final provisioning script
|
||||
scripts = [ cleanup ]
|
||||
|
||||
# TODO: remove this after testing
|
||||
#access.PUBLIC = false
|
||||
#regions {
|
||||
# ALL = false
|
||||
# us-west-2 = true
|
||||
# us-east-1 = true
|
||||
#}
|
||||
}
|
||||
|
|
|
@ -25,9 +25,11 @@ WHEN {
|
|||
}
|
||||
}
|
||||
}
|
||||
# azure.packages.tiny-cloud-azure = true
|
||||
# gcp.packages.tiny-cloud-gcp = true
|
||||
# oci.packages.tiny-cloud-oci = true
|
||||
# other per-cloud packages
|
||||
nocloud.packages.tiny-cloud-nocloud = true
|
||||
azure.packages.tiny-cloud-azure = true
|
||||
gcp.packages.tiny-cloud-gcp = true
|
||||
oci.packages.tiny-cloud-oci = true
|
||||
}
|
||||
|
||||
scripts = [ setup-tiny ]
|
||||
|
|
|
@ -14,6 +14,8 @@ initfs_features {
|
|||
nvme = true
|
||||
}
|
||||
|
||||
# TODO: what about IPv6-only networks?
|
||||
# maybe we only set it for <= 3.17, and leave it to dhcpcd?
|
||||
ntp_server = 169.254.169.123
|
||||
|
||||
access.PUBLIC = true
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
# vim: ts=2 et:
|
||||
cloud_name = Microsoft Azure (alpha)
|
||||
image_format = vhd
|
||||
|
||||
# start with 3.18
|
||||
EXCLUDE = ["3.12", "3.13", "3.14", "3.15", "3.16", "3.17"]
|
||||
|
||||
# TODO: https://learn.microsoft.com/en-us/azure/virtual-machines/linux/time-sync
|
||||
ntp_server = ""
|
|
@ -0,0 +1,15 @@
|
|||
# vim: ts=2 et:
|
||||
cloud_name = Google Cloud Platform (alpha)
|
||||
# TODO: https://cloud.google.com/compute/docs/import/importing-virtual-disks
|
||||
# Mentions "VHD" but also mentions "..." if that also includes QCOW2, then
|
||||
# we should use that instead. The "Manual Import" section on the sidebar
|
||||
# has a "Manually import boot disks" subpage which also mentions importing
|
||||
# compressed raw images... We would prefer to avoid that if possible.
|
||||
image_format = vhd
|
||||
|
||||
# start with 3.18
|
||||
EXCLUDE = ["3.12", "3.13", "3.14", "3.15", "3.16", "3.17"]
|
||||
|
||||
# TODO: https://cloud.google.com/compute/docs/instances/configure-ntp
|
||||
# (metadata.google.internal)
|
||||
ntp_server = ""
|
|
@ -0,0 +1,8 @@
|
|||
# vim: ts=2 et:
|
||||
cloud_name = NoCloud
|
||||
image_format = qcow2
|
||||
|
||||
# start with 3.18
|
||||
EXCLUDE = ["3.12", "3.13", "3.14", "3.15", "3.16", "3.17"]
|
||||
|
||||
ntp_server = ""
|
|
@ -0,0 +1,8 @@
|
|||
# vim: ts=2 et:
|
||||
cloud_name = Oracle Cloud Infrastructure (alpha)
|
||||
image_format = qcow2
|
||||
|
||||
# start with 3.18
|
||||
EXCLUDE = ["3.12", "3.13", "3.14", "3.15", "3.16", "3.17"]
|
||||
|
||||
ntp_server = "169.254.169.254"
|
|
@ -0,0 +1,8 @@
|
|||
# vim: ts=2 et:
|
||||
|
||||
include required("4.conf")
|
||||
|
||||
packages {
|
||||
# start using dhcpcd for improved IPv6 experience
|
||||
dhcpcd = true
|
||||
}
|
|
@ -1,9 +1,9 @@
|
|||
# vim: ts=2 et:
|
||||
|
||||
include required("base/4.conf")
|
||||
include required("base/5.conf")
|
||||
|
||||
motd {
|
||||
sudo_removed = "NOTE: 'sudo' is no longer installed by default, please use 'doas' instead."
|
||||
sudo_removed = "NOTE: 'sudo' is not installed by default, please use 'doas' instead."
|
||||
}
|
||||
|
||||
# clear out inherited repos
|
||||
|
|
|
@ -38,7 +38,7 @@ from collections import defaultdict
|
|||
from ruamel.yaml import YAML
|
||||
|
||||
import clouds
|
||||
from image_configs import ImageConfigManager
|
||||
from image_config_manager import ImageConfigManager
|
||||
|
||||
|
||||
### Constants & Variables
|
||||
|
@ -162,7 +162,8 @@ for i_key, i_cfg in configs.get().items():
|
|||
}
|
||||
versions[version]['images'][image_name]['downloads'][cloud] |= {
|
||||
'cloud': cloud,
|
||||
'image_url': i_cfg.download_url,
|
||||
'image_format': i_cfg.image_format,
|
||||
'image_url': i_cfg.download_url + '/' + (i_cfg.image_name)
|
||||
}
|
||||
versions[version]['images'][image_name]['regions'][region] |= {
|
||||
'cloud': cloud,
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
# vim: ts=4 et:
|
||||
|
||||
import hashlib
|
||||
import itertools
|
||||
import logging
|
||||
import mergedeep
|
||||
import os
|
||||
import pyhocon
|
||||
|
@ -11,177 +9,10 @@ import shutil
|
|||
from copy import deepcopy
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from ruamel.yaml import YAML
|
||||
from subprocess import Popen, PIPE
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import clouds
|
||||
|
||||
|
||||
class ImageConfigManager():
|
||||
|
||||
def __init__(self, conf_path, yaml_path, log=__name__, alpine=None):
|
||||
self.conf_path = Path(conf_path)
|
||||
self.yaml_path = Path(yaml_path)
|
||||
self.log = logging.getLogger(log)
|
||||
self.alpine = alpine
|
||||
|
||||
self.now = datetime.utcnow()
|
||||
self._configs = {}
|
||||
|
||||
self.yaml = YAML()
|
||||
self.yaml.register_class(ImageConfig)
|
||||
self.yaml.explicit_start = True
|
||||
# hide !ImageConfig tag from Packer
|
||||
self.yaml.representer.org_represent_mapping = self.yaml.representer.represent_mapping
|
||||
self.yaml.representer.represent_mapping = self._strip_yaml_tag_type
|
||||
|
||||
# load resolved YAML, if exists
|
||||
if self.yaml_path.exists():
|
||||
self._load_yaml()
|
||||
else:
|
||||
self._resolve()
|
||||
|
||||
def get(self, key=None):
|
||||
if not key:
|
||||
return self._configs
|
||||
|
||||
return self._configs[key]
|
||||
|
||||
# load already-resolved YAML configs, restoring ImageConfig objects
|
||||
def _load_yaml(self):
|
||||
self.log.info('Loading existing %s', self.yaml_path)
|
||||
for key, config in self.yaml.load(self.yaml_path).items():
|
||||
self._configs[key] = ImageConfig(key, config, log=self.log, yaml=self.yaml)
|
||||
# TODO: also pull in additional per-image metatdata from the build process?
|
||||
|
||||
# save resolved configs to YAML
|
||||
def _save_yaml(self):
|
||||
self.log.info('Saving %s', self.yaml_path)
|
||||
self.yaml.dump(self._configs, self.yaml_path)
|
||||
|
||||
# hide !ImageConfig tag from Packer
|
||||
def _strip_yaml_tag_type(self, tag, mapping, flow_style=None):
|
||||
if tag == '!ImageConfig':
|
||||
tag = u'tag:yaml.org,2002:map'
|
||||
|
||||
return self.yaml.representer.org_represent_mapping(tag, mapping, flow_style=flow_style)
|
||||
|
||||
# resolve from HOCON configs
|
||||
def _resolve(self):
|
||||
self.log.info('Generating configs.yaml in work environment')
|
||||
cfg = pyhocon.ConfigFactory.parse_file(self.conf_path)
|
||||
# set version releases
|
||||
for v, vcfg in cfg.Dimensions.version.items():
|
||||
# version keys are quoted to protect dots
|
||||
self._set_version_release(v.strip('"'), vcfg)
|
||||
|
||||
dimensions = list(cfg.Dimensions.keys())
|
||||
self.log.debug('dimensions: %s', dimensions)
|
||||
|
||||
for dim_keys in (itertools.product(*cfg['Dimensions'].values())):
|
||||
config_key = '-'.join(dim_keys).replace('"', '')
|
||||
|
||||
# dict of dimension -> dimension_key
|
||||
dim_map = dict(zip(dimensions, dim_keys))
|
||||
|
||||
# replace version with release, and make image_key from that
|
||||
release = cfg.Dimensions.version[dim_map['version']].release
|
||||
(rel_map := dim_map.copy())['version'] = release
|
||||
image_key = '-'.join(rel_map.values())
|
||||
|
||||
image_config = ImageConfig(
|
||||
config_key,
|
||||
{
|
||||
'image_key': image_key,
|
||||
'release': release
|
||||
} | dim_map,
|
||||
log=self.log,
|
||||
yaml=self.yaml
|
||||
)
|
||||
|
||||
# merge in the Default config
|
||||
image_config._merge(cfg.Default)
|
||||
skip = False
|
||||
# merge in each dimension key's configs
|
||||
for dim, dim_key in dim_map.items():
|
||||
dim_cfg = deepcopy(cfg.Dimensions[dim][dim_key])
|
||||
|
||||
image_config._merge(dim_cfg)
|
||||
|
||||
# now that we're done with ConfigTree/dim_cfg, remove " from dim_keys
|
||||
dim_keys = set(k.replace('"', '') for k in dim_keys)
|
||||
|
||||
# WHEN blocks inside WHEN blocks are considered "and" operations
|
||||
while (when := image_config._pop('WHEN', None)):
|
||||
for when_keys, when_conf in when.items():
|
||||
# WHEN keys with spaces are considered "or" operations
|
||||
if len(set(when_keys.split(' ')) & dim_keys) > 0:
|
||||
image_config._merge(when_conf)
|
||||
|
||||
exclude = image_config._pop('EXCLUDE', None)
|
||||
if exclude and set(exclude) & set(dim_keys):
|
||||
self.log.debug('%s SKIPPED, %s excludes %s', config_key, dim_key, exclude)
|
||||
skip = True
|
||||
break
|
||||
|
||||
if eol := image_config._get('end_of_life', None):
|
||||
if self.now > datetime.fromisoformat(eol):
|
||||
self.log.warning('%s SKIPPED, %s end_of_life %s', config_key, dim_key, eol)
|
||||
skip = True
|
||||
break
|
||||
|
||||
if skip is True:
|
||||
continue
|
||||
|
||||
# merge in the Mandatory configs at the end
|
||||
image_config._merge(cfg.Mandatory)
|
||||
|
||||
# clean stuff up
|
||||
image_config._normalize()
|
||||
image_config.qemu['iso_url'] = self.alpine.virt_iso_url(arch=image_config.arch)
|
||||
|
||||
# we've resolved everything, add tags attribute to config
|
||||
self._configs[config_key] = image_config
|
||||
|
||||
self._save_yaml()
|
||||
|
||||
# set current version release
|
||||
def _set_version_release(self, v, c):
|
||||
info = self.alpine.version_info(v)
|
||||
c.put('release', info['release'])
|
||||
c.put('end_of_life', info['end_of_life'])
|
||||
c.put('release_notes', info['notes'])
|
||||
|
||||
# release is also appended to name & description arrays
|
||||
c.put('name', [c.release])
|
||||
c.put('description', [c.release])
|
||||
|
||||
# update current config status
|
||||
def refresh_state(self, step, only=[], skip=[], revise=False):
|
||||
self.log.info('Refreshing State')
|
||||
has_actions = False
|
||||
for ic in self._configs.values():
|
||||
# clear away any previous actions
|
||||
if hasattr(ic, 'actions'):
|
||||
delattr(ic, 'actions')
|
||||
|
||||
dim_keys = set(ic.config_key.split('-'))
|
||||
if only and len(set(only) & dim_keys) != len(only):
|
||||
self.log.debug("%s SKIPPED, doesn't match --only", ic.config_key)
|
||||
continue
|
||||
|
||||
if skip and len(set(skip) & dim_keys) > 0:
|
||||
self.log.debug('%s SKIPPED, matches --skip', ic.config_key)
|
||||
continue
|
||||
|
||||
ic.refresh_state(step, revise)
|
||||
if not has_actions and len(ic.actions):
|
||||
has_actions = True
|
||||
|
||||
# re-save with updated actions
|
||||
self._save_yaml()
|
||||
return has_actions
|
||||
from image_storage import ImageStorage, run
|
||||
from image_tags import ImageTags
|
||||
|
||||
|
||||
class ImageConfig():
|
||||
|
@ -194,10 +25,14 @@ class ImageConfig():
|
|||
OPTIONAL_TAGS = [
|
||||
'built', 'uploaded', 'imported', 'import_id', 'import_region', 'published', 'released'
|
||||
]
|
||||
STEPS = [
|
||||
'local', 'upload', 'import', 'publish', 'release'
|
||||
]
|
||||
|
||||
def __init__(self, config_key, obj={}, log=None, yaml=None):
|
||||
self._log = log
|
||||
self._yaml = yaml
|
||||
self._storage = None
|
||||
self.config_key = str(config_key)
|
||||
tags = obj.pop('tags', None)
|
||||
self.__dict__ |= self._deep_dict(obj)
|
||||
|
@ -226,13 +61,9 @@ class ImageConfig():
|
|||
return Path('work/images') / self.cloud / self.image_key
|
||||
|
||||
@property
|
||||
def local_path(self):
|
||||
def local_image(self):
|
||||
return self.local_dir / ('image.qcow2')
|
||||
|
||||
@property
|
||||
def artifacts_yaml(self):
|
||||
return self.local_dir / 'artifacts.yaml'
|
||||
|
||||
@property
|
||||
def image_name(self):
|
||||
return self.name.format(**self.__dict__)
|
||||
|
@ -250,13 +81,9 @@ class ImageConfig():
|
|||
return self.local_dir / self.image_file
|
||||
|
||||
@property
|
||||
def image_metadata_file(self):
|
||||
def metadata_file(self):
|
||||
return '.'.join([self.image_name, 'yaml'])
|
||||
|
||||
@property
|
||||
def image_metadata_path(self):
|
||||
return self.local_dir / self.image_metadata_file
|
||||
|
||||
def region_url(self, region, image_id):
|
||||
return self.cloud_region_url.format(region=region, image_id=image_id, **self.__dict__)
|
||||
|
||||
|
@ -285,7 +112,7 @@ class ImageConfig():
|
|||
if self.__dict__.get(k, None):
|
||||
t[k] = self.__dict__[k]
|
||||
|
||||
return Tags(t)
|
||||
return ImageTags(t)
|
||||
|
||||
# recursively convert a ConfigTree object to a dict object
|
||||
def _deep_dict(self, layer):
|
||||
|
@ -325,6 +152,7 @@ class ImageConfig():
|
|||
self.name = '-'.join(self.name)
|
||||
self.description = ' '.join(self.description)
|
||||
self._resolve_motd()
|
||||
self._resolve_urls()
|
||||
self._stringify_repos()
|
||||
self._stringify_packages()
|
||||
self._stringify_services()
|
||||
|
@ -350,6 +178,13 @@ class ImageConfig():
|
|||
|
||||
self.motd = '\n\n'.join(motd.values()).format(**self.__dict__)
|
||||
|
||||
def _resolve_urls(self):
|
||||
if 'storage_url' in self.__dict__:
|
||||
self.storage_url = self.storage_url.format(v_version=self.v_version, **self.__dict__)
|
||||
|
||||
if 'download_url' in self.__dict__:
|
||||
self.download_url = self.download_url.format(v_version=self.v_version, **self.__dict__)
|
||||
|
||||
def _stringify_repos(self):
|
||||
# stringify repos map
|
||||
# <repo>: <tag> # @<tag> <repo> enabled
|
||||
|
@ -420,28 +255,60 @@ class ImageConfig():
|
|||
for m, v in self.__dict__[d].items()
|
||||
)))
|
||||
|
||||
def _is_step_or_earlier(self, s, step):
|
||||
log = self._log
|
||||
if step == 'state':
|
||||
return True
|
||||
|
||||
if step not in self.STEPS:
|
||||
return False
|
||||
|
||||
return self.STEPS.index(s) <= self.STEPS.index(step)
|
||||
|
||||
|
||||
# TODO: this needs to be sorted out for 'upload' and 'release' steps
|
||||
def refresh_state(self, step, revise=False):
|
||||
log = self._log
|
||||
actions = {}
|
||||
revision = 0
|
||||
remote_image = clouds.latest_build_image(self)
|
||||
log.debug('\n%s', remote_image)
|
||||
step_state = step == 'state'
|
||||
step_rollback = step == 'rollback'
|
||||
undo = {}
|
||||
|
||||
# enable actions based on the specified step
|
||||
if step in ['local', 'import', 'publish', 'state']:
|
||||
actions['build'] = True
|
||||
# enable initial set of possible actions based on specified step
|
||||
for s in self.STEPS:
|
||||
if self._is_step_or_earlier(s, step):
|
||||
actions[s] = True
|
||||
|
||||
if step in ['import', 'publish', 'state']:
|
||||
actions['import'] = True
|
||||
# pick up any updated image metadata
|
||||
self.load_metadata()
|
||||
|
||||
if step in ['publish', 'state']:
|
||||
# we will resolve publish destinations (if any) later
|
||||
actions['publish'] = True
|
||||
# TODO: check storage and/or cloud - use this instead of remote_image
|
||||
# latest_revision = self.get_latest_revision()
|
||||
|
||||
if (step_rollback or revise) and self.local_image.exists():
|
||||
undo['local'] = True
|
||||
|
||||
|
||||
|
||||
if step_rollback:
|
||||
if self.local_image.exists():
|
||||
undo['local'] = True
|
||||
|
||||
if not self.published or self.released:
|
||||
if self.uploaded:
|
||||
undo['upload'] = True
|
||||
|
||||
if self.imported:
|
||||
undo['import'] = True
|
||||
|
||||
# TODO: rename to 'remote_tags'?
|
||||
# if we load remote tags into state automatically, shouldn't that info already be in self?
|
||||
remote_image = clouds.get_latest_imported_tags(self)
|
||||
log.debug('\n%s', remote_image)
|
||||
|
||||
if revise:
|
||||
if self.local_path.exists():
|
||||
if self.local_image.exists():
|
||||
# remove previously built local image artifacts
|
||||
log.warning('%s existing local image dir %s',
|
||||
'Would remove' if step_state else 'Removing',
|
||||
|
@ -449,13 +316,13 @@ class ImageConfig():
|
|||
if not step_state:
|
||||
shutil.rmtree(self.local_dir)
|
||||
|
||||
if remote_image and remote_image.published:
|
||||
if remote_image and remote_image.get('published', None):
|
||||
log.warning('%s image revision for %s',
|
||||
'Would bump' if step_state else 'Bumping',
|
||||
self.image_key)
|
||||
revision = int(remote_image.revision) + 1
|
||||
|
||||
elif remote_image and remote_image.imported:
|
||||
elif remote_image and remote_image.get('imported', None):
|
||||
# remove existing imported (but unpublished) image
|
||||
log.warning('%s unpublished remote image %s',
|
||||
'Would remove' if step_state else 'Removing',
|
||||
|
@ -466,20 +333,24 @@ class ImageConfig():
|
|||
remote_image = None
|
||||
|
||||
elif remote_image:
|
||||
if remote_image.imported:
|
||||
# already imported, don't build/import again
|
||||
if remote_image.get('imported', None):
|
||||
# already imported, don't build/upload/import again
|
||||
log.debug('%s - already imported', self.image_key)
|
||||
actions.pop('build', None)
|
||||
actions.pop('local', None)
|
||||
actions.pop('upload', None)
|
||||
actions.pop('import', None)
|
||||
|
||||
if remote_image.published:
|
||||
if remote_image.get('published', None):
|
||||
# NOTE: re-publishing can update perms or push to new regions
|
||||
log.debug('%s - already published', self.image_key)
|
||||
|
||||
if self.local_path.exists():
|
||||
if self.local_image.exists():
|
||||
# local image's already built, don't rebuild
|
||||
log.debug('%s - already locally built', self.image_key)
|
||||
actions.pop('build', None)
|
||||
actions.pop('local', None)
|
||||
|
||||
else:
|
||||
self.built = None
|
||||
|
||||
# merge remote_image data into image state
|
||||
if remote_image:
|
||||
|
@ -488,96 +359,106 @@ class ImageConfig():
|
|||
else:
|
||||
self.__dict__ |= {
|
||||
'revision': revision,
|
||||
'uploaded': None,
|
||||
'imported': None,
|
||||
'import_id': None,
|
||||
'import_region': None,
|
||||
'published': None,
|
||||
'artifacts': None,
|
||||
'released': None,
|
||||
}
|
||||
|
||||
# update artifacts, if we've got 'em
|
||||
if self.artifacts_yaml.exists():
|
||||
self.artifacts = self.yaml.load(self.artifacts_yaml)
|
||||
|
||||
else:
|
||||
self.artifacts = None
|
||||
# remove remaining actions not possible based on specified step
|
||||
for s in self.STEPS:
|
||||
if not self._is_step_or_earlier(s, step):
|
||||
actions.pop(s, None)
|
||||
|
||||
self.actions = list(actions)
|
||||
log.info('%s/%s = %s', self.cloud, self.image_name, self.actions)
|
||||
|
||||
self.state_updated = datetime.utcnow().isoformat()
|
||||
|
||||
def _run(self, cmd, errmsg=None, errvals=[]):
|
||||
log = self._log
|
||||
p = Popen(cmd, stdout=PIPE, stdin=PIPE, encoding='utf8')
|
||||
out, err = p.communicate()
|
||||
if p.returncode:
|
||||
if log:
|
||||
if errmsg:
|
||||
log.error(errmsg, *errvals)
|
||||
@property
|
||||
def storage(self):
|
||||
if self._storage is None:
|
||||
self._storage = ImageStorage(self.local_dir, self.storage_url, log=self._log)
|
||||
|
||||
log.error('COMMAND: %s', ' '.join(cmd))
|
||||
log.error('EXIT: %d', p.returncode)
|
||||
log.error('STDOUT:\n%s', out)
|
||||
log.error('STDERR:\n%s', err)
|
||||
|
||||
raise RuntimeError
|
||||
|
||||
return out, err
|
||||
return self._storage
|
||||
|
||||
def _save_checksum(self, file):
|
||||
self._log.info("Calculating checksum for '%s'", file)
|
||||
sha256_hash = hashlib.sha256()
|
||||
sha512_hash = hashlib.sha512()
|
||||
with open(file, 'rb') as f:
|
||||
for block in iter(lambda: f.read(4096), b''):
|
||||
sha256_hash.update(block)
|
||||
sha512_hash.update(block)
|
||||
|
||||
with open(str(file) + '.sha256', 'w') as f:
|
||||
print(sha256_hash.hexdigest(), file=f)
|
||||
|
||||
with open(str(file) + '.sha512', 'w') as f:
|
||||
print(sha512_hash.hexdigest(), file=f)
|
||||
|
||||
# convert local QCOW2 to format appropriate for a cloud
|
||||
def convert_image(self):
|
||||
self._log.info('Converting %s to %s', self.local_path, self.image_path)
|
||||
self._run(
|
||||
self.CONVERT_CMD[self.image_format] + [self.local_path, self.image_path],
|
||||
errmsg='Unable to convert %s to %s', errvals=[self.local_path, self.image_path],
|
||||
self._log.info('Converting %s to %s', self.local_image, self.image_path)
|
||||
run(
|
||||
self.CONVERT_CMD[self.image_format] + [self.local_image, self.image_path],
|
||||
log=self._log, errmsg='Unable to convert %s to %s',
|
||||
errvals=[self.local_image, self.image_path]
|
||||
)
|
||||
self._save_checksum(self.image_path)
|
||||
self.built = datetime.utcnow().isoformat()
|
||||
|
||||
def save_metadata(self, upload=True):
|
||||
def upload_image(self):
|
||||
self.storage.store(
|
||||
self.image_file,
|
||||
self.image_file + '.sha256',
|
||||
self.image_file + '.sha512'
|
||||
)
|
||||
self.uploaded = datetime.utcnow().isoformat()
|
||||
|
||||
def save_metadata(self, action):
|
||||
os.makedirs(self.local_dir, exist_ok=True)
|
||||
self._log.info('Saving image metadata')
|
||||
self._yaml.dump(dict(self.tags), self.image_metadata_path)
|
||||
self._save_checksum(self.image_metadata_path)
|
||||
# TODO: save metadata updated timestamp as metadata?
|
||||
# TODO: def self.metadata to return what we consider metadata?
|
||||
metadata = dict(self.tags)
|
||||
self.metadata_updated = datetime.utcnow().isoformat()
|
||||
metadata |= {
|
||||
'artifacts': self._get('artifacts', None),
|
||||
'metadata_updated': self.metadata_updated
|
||||
}
|
||||
metadata_path = self.local_dir / self.metadata_file
|
||||
self._yaml.dump(metadata, metadata_path)
|
||||
self._save_checksum(metadata_path)
|
||||
if action != 'local' and self.storage:
|
||||
self.storage.store(
|
||||
self.metadata_file,
|
||||
self.metadata_file + '.sha256',
|
||||
self.metadata_file + '.sha512'
|
||||
)
|
||||
|
||||
def load_metadata(self):
|
||||
# TODO: what if we have fresh configs, but the image is already uploaded/imported?
|
||||
# we'll need to get revision first somehow
|
||||
if 'revision' not in self.__dict__:
|
||||
return
|
||||
|
||||
class DictObj(dict):
|
||||
# TODO: revision = '*' for now - or only if unknown?
|
||||
|
||||
def __getattr__(self, key):
|
||||
return self[key]
|
||||
# get a list of local matching <name>-r*.yaml?
|
||||
metadata_path = self.local_dir / self.metadata_file
|
||||
if metadata_path.exists():
|
||||
self._log.info('Loading image metadata from %s', metadata_path)
|
||||
self.__dict__ |= self._yaml.load(metadata_path).items()
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
self[key] = value
|
||||
# get a list of storage matching <name>-r*.yaml
|
||||
#else:
|
||||
# retrieve metadata (and image?) from storage_url
|
||||
# else:
|
||||
# retrieve metadata from imported image
|
||||
|
||||
def __delattr__(self, key):
|
||||
del self[key]
|
||||
|
||||
|
||||
class Tags(DictObj):
|
||||
|
||||
def __init__(self, d={}, from_list=None, key_name='Key', value_name='Value'):
|
||||
for key, value in d.items():
|
||||
self.__setattr__(key, value)
|
||||
|
||||
if from_list:
|
||||
self.from_list(from_list, key_name, value_name)
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
self[key] = str(value)
|
||||
|
||||
def as_list(self, key_name='Key', value_name='Value'):
|
||||
return [{key_name: k, value_name: v} for k, v in self.items()]
|
||||
|
||||
def from_list(self, list=[], key_name='Key', value_name='Value'):
|
||||
for tag in list:
|
||||
self.__setattr__(tag[key_name], tag[value_name])
|
||||
# if there's no stored metadata, we are in transition,
|
||||
# get a list of imported images matching <name>-r*.yaml
|
|
@ -0,0 +1,178 @@
|
|||
# vim: ts=4 et:
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
import pyhocon
|
||||
|
||||
from copy import deepcopy
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
from image_config import ImageConfig
|
||||
|
||||
|
||||
|
||||
class ImageConfigManager():
|
||||
|
||||
def __init__(self, conf_path, yaml_path, log=__name__, alpine=None):
|
||||
self.conf_path = Path(conf_path)
|
||||
self.yaml_path = Path(yaml_path)
|
||||
self.log = logging.getLogger(log)
|
||||
self.alpine = alpine
|
||||
|
||||
self.now = datetime.utcnow()
|
||||
self._configs = {}
|
||||
|
||||
self.yaml = YAML()
|
||||
self.yaml.register_class(ImageConfig)
|
||||
self.yaml.explicit_start = True
|
||||
# hide !ImageConfig tag from Packer
|
||||
self.yaml.representer.org_represent_mapping = self.yaml.representer.represent_mapping
|
||||
self.yaml.representer.represent_mapping = self._strip_yaml_tag_type
|
||||
|
||||
# load resolved YAML, if exists
|
||||
if self.yaml_path.exists():
|
||||
self._load_yaml()
|
||||
else:
|
||||
self._resolve()
|
||||
|
||||
def get(self, key=None):
|
||||
if not key:
|
||||
return self._configs
|
||||
|
||||
return self._configs[key]
|
||||
|
||||
# load already-resolved YAML configs, restoring ImageConfig objects
|
||||
def _load_yaml(self):
|
||||
self.log.info('Loading existing %s', self.yaml_path)
|
||||
for key, config in self.yaml.load(self.yaml_path).items():
|
||||
self._configs[key] = ImageConfig(key, config, log=self.log, yaml=self.yaml)
|
||||
|
||||
# save resolved configs to YAML
|
||||
def _save_yaml(self):
|
||||
self.log.info('Saving %s', self.yaml_path)
|
||||
self.yaml.dump(self._configs, self.yaml_path)
|
||||
|
||||
# hide !ImageConfig tag from Packer
|
||||
def _strip_yaml_tag_type(self, tag, mapping, flow_style=None):
|
||||
if tag == '!ImageConfig':
|
||||
tag = u'tag:yaml.org,2002:map'
|
||||
|
||||
return self.yaml.representer.org_represent_mapping(tag, mapping, flow_style=flow_style)
|
||||
|
||||
# resolve from HOCON configs
|
||||
def _resolve(self):
|
||||
self.log.info('Generating configs.yaml in work environment')
|
||||
cfg = pyhocon.ConfigFactory.parse_file(self.conf_path)
|
||||
# set version releases
|
||||
for v, vcfg in cfg.Dimensions.version.items():
|
||||
# version keys are quoted to protect dots
|
||||
self._set_version_release(v.strip('"'), vcfg)
|
||||
|
||||
dimensions = list(cfg.Dimensions.keys())
|
||||
self.log.debug('dimensions: %s', dimensions)
|
||||
|
||||
for dim_keys in (itertools.product(*cfg['Dimensions'].values())):
|
||||
config_key = '-'.join(dim_keys).replace('"', '')
|
||||
|
||||
# dict of dimension -> dimension_key
|
||||
dim_map = dict(zip(dimensions, dim_keys))
|
||||
|
||||
# replace version with release, and make image_key from that
|
||||
release = cfg.Dimensions.version[dim_map['version']].release
|
||||
(rel_map := dim_map.copy())['version'] = release
|
||||
image_key = '-'.join(rel_map.values())
|
||||
|
||||
image_config = ImageConfig(
|
||||
config_key,
|
||||
{
|
||||
'image_key': image_key,
|
||||
'release': release
|
||||
} | dim_map,
|
||||
log=self.log,
|
||||
yaml=self.yaml
|
||||
)
|
||||
|
||||
# merge in the Default config
|
||||
image_config._merge(cfg.Default)
|
||||
skip = False
|
||||
# merge in each dimension key's configs
|
||||
for dim, dim_key in dim_map.items():
|
||||
dim_cfg = deepcopy(cfg.Dimensions[dim][dim_key])
|
||||
|
||||
image_config._merge(dim_cfg)
|
||||
|
||||
# now that we're done with ConfigTree/dim_cfg, remove " from dim_keys
|
||||
dim_keys = set(k.replace('"', '') for k in dim_keys)
|
||||
|
||||
# WHEN blocks inside WHEN blocks are considered "and" operations
|
||||
while (when := image_config._pop('WHEN', None)):
|
||||
for when_keys, when_conf in when.items():
|
||||
# WHEN keys with spaces are considered "or" operations
|
||||
if len(set(when_keys.split(' ')) & dim_keys) > 0:
|
||||
image_config._merge(when_conf)
|
||||
|
||||
exclude = image_config._pop('EXCLUDE', None)
|
||||
if exclude and set(exclude) & set(dim_keys):
|
||||
self.log.debug('%s SKIPPED, %s excludes %s', config_key, dim_key, exclude)
|
||||
skip = True
|
||||
break
|
||||
|
||||
if eol := image_config._get('end_of_life', None):
|
||||
if self.now > datetime.fromisoformat(eol):
|
||||
self.log.warning('%s SKIPPED, %s end_of_life %s', config_key, dim_key, eol)
|
||||
skip = True
|
||||
break
|
||||
|
||||
if skip is True:
|
||||
continue
|
||||
|
||||
# merge in the Mandatory configs at the end
|
||||
image_config._merge(cfg.Mandatory)
|
||||
|
||||
# clean stuff up
|
||||
image_config._normalize()
|
||||
image_config.qemu['iso_url'] = self.alpine.virt_iso_url(arch=image_config.arch)
|
||||
|
||||
# we've resolved everything, add tags attribute to config
|
||||
self._configs[config_key] = image_config
|
||||
|
||||
self._save_yaml()
|
||||
|
||||
# set current version release
|
||||
def _set_version_release(self, v, c):
|
||||
info = self.alpine.version_info(v)
|
||||
c.put('release', info['release'])
|
||||
c.put('end_of_life', info['end_of_life'])
|
||||
c.put('release_notes', info['notes'])
|
||||
|
||||
# release is also appended to name & description arrays
|
||||
c.put('name', [c.release])
|
||||
c.put('description', [c.release])
|
||||
|
||||
# update current config status
|
||||
def refresh_state(self, step, only=[], skip=[], revise=False):
|
||||
self.log.info('Refreshing State')
|
||||
has_actions = False
|
||||
for ic in self._configs.values():
|
||||
# clear away any previous actions
|
||||
if hasattr(ic, 'actions'):
|
||||
delattr(ic, 'actions')
|
||||
|
||||
dim_keys = set(ic.config_key.split('-'))
|
||||
if only and len(set(only) & dim_keys) != len(only):
|
||||
self.log.debug("%s SKIPPED, doesn't match --only", ic.config_key)
|
||||
continue
|
||||
|
||||
if skip and len(set(skip) & dim_keys) > 0:
|
||||
self.log.debug('%s SKIPPED, matches --skip', ic.config_key)
|
||||
continue
|
||||
|
||||
ic.refresh_state(step, revise)
|
||||
if not has_actions and len(ic.actions):
|
||||
has_actions = True
|
||||
|
||||
# re-save with updated actions
|
||||
self._save_yaml()
|
||||
return has_actions
|
|
@ -0,0 +1,183 @@
|
|||
# vim: ts=4 et:
|
||||
|
||||
import shutil
|
||||
import os
|
||||
|
||||
from glob import glob
|
||||
from pathlib import Path
|
||||
from subprocess import Popen, PIPE
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from image_tags import DictObj
|
||||
|
||||
|
||||
def run(cmd, log, errmsg=None, errvals=[]):
|
||||
# ensure command and error values are lists of strings
|
||||
cmd = [str(c) for c in cmd]
|
||||
errvals = [str(ev) for ev in errvals]
|
||||
|
||||
log.debug('COMMAND: %s', ' '.join(cmd))
|
||||
p = Popen(cmd, stdout=PIPE, stdin=PIPE, encoding='utf8')
|
||||
out, err = p.communicate()
|
||||
if p.returncode:
|
||||
if errmsg:
|
||||
log.error(errmsg, *errvals)
|
||||
|
||||
log.error('COMMAND: %s', ' '.join(cmd))
|
||||
log.error('EXIT: %d', p.returncode)
|
||||
log.error('STDOUT:\n%s', out)
|
||||
log.error('STDERR:\n%s', err)
|
||||
raise RuntimeError
|
||||
|
||||
return out, err
|
||||
|
||||
|
||||
class ImageStorage():
|
||||
|
||||
def __init__(self, local, storage_url, log):
|
||||
self.log = log
|
||||
self.local = local
|
||||
self.url = storage_url.removesuffix('/')
|
||||
url = urlparse(self.url)
|
||||
if url.scheme not in ['', 'file', 'ssh']:
|
||||
self.log.error('Storage with "%s" scheme is unsupported', url.scheme)
|
||||
raise RuntimeError
|
||||
|
||||
if url.scheme in ['', 'file']:
|
||||
self.scheme = 'file'
|
||||
self.remote = Path(url.netloc + url.path).expanduser()
|
||||
|
||||
else:
|
||||
self.scheme = 'ssh'
|
||||
self.host = url.hostname
|
||||
self.remote = Path(url.path[1:]) # drop leading / -- use // for absolute path
|
||||
self.ssh = DictObj({
|
||||
'port': ['-p', url.port] if url.port else [],
|
||||
'user': ['-l', url.username] if url.username else [],
|
||||
})
|
||||
self.scp = DictObj({
|
||||
'port': ['-P', url.port] if url.port else [],
|
||||
'user': url.username + '@' if url.username else '',
|
||||
})
|
||||
|
||||
def store(self, *files):
|
||||
log = self.log
|
||||
if not files:
|
||||
log.debug('No files to store')
|
||||
return
|
||||
|
||||
src = self.local
|
||||
dest = self.remote
|
||||
if self.scheme == 'file':
|
||||
dest.mkdir(parents=True, exist_ok=True)
|
||||
for file in files:
|
||||
log.info('Storing %s', dest / file)
|
||||
shutil.copy2(src / file, dest / file)
|
||||
|
||||
return
|
||||
|
||||
url = self.url
|
||||
host = self.host
|
||||
ssh = self.ssh
|
||||
scp = self.scp
|
||||
run(
|
||||
['ssh'] + ssh.port + ssh.user + [host, 'mkdir', '-p', dest],
|
||||
log=log, errmsg='Unable to ensure existence of %s', errvals=[url]
|
||||
)
|
||||
src_files = []
|
||||
for file in files:
|
||||
log.info('Storing %s', url + '/' + file)
|
||||
src_files.append(src / file)
|
||||
|
||||
run(
|
||||
['scp'] + scp.port + src_files + [scp.user + ':'.join([host, str(dest)])],
|
||||
log=log, errmsg='Failed to store files'
|
||||
)
|
||||
|
||||
def retrieve(self, *files):
|
||||
log = self.log
|
||||
if not files:
|
||||
log.debug('No files to retrieve')
|
||||
return
|
||||
|
||||
src = self.remote
|
||||
dest = self.local
|
||||
dest.mkdir(parents=True, exist_ok=True)
|
||||
if self.scheme == 'file':
|
||||
for file in files:
|
||||
log.info('Retrieving %s', src / file)
|
||||
shutil.copy2(src / file, dest / file)
|
||||
|
||||
return
|
||||
|
||||
url = self.url
|
||||
host = self.host
|
||||
scp = self.scp
|
||||
src_files = []
|
||||
for file in files:
|
||||
log.info('Retrieving %s', url + '/' + file)
|
||||
src_files.append(scp.user + ':'.join([host, str(src / file)]))
|
||||
|
||||
run(
|
||||
['scp'] + scp.port + src_files + [dest],
|
||||
log=log, errmsg='Failed to retrieve files'
|
||||
)
|
||||
|
||||
# TODO: optional files=[]?
|
||||
def list(self, match=None):
|
||||
log = self.log
|
||||
path = self.remote
|
||||
if not match:
|
||||
match = '*'
|
||||
|
||||
files = []
|
||||
if self.scheme == 'file':
|
||||
path.mkdir(parents=True, exist_ok=True)
|
||||
log.info('Listing of %s files in %s', match, path)
|
||||
files = sorted(glob(str(path / match)), key=os.path.getmtime, reverse=True)
|
||||
|
||||
else:
|
||||
url = self.url
|
||||
host = self.host
|
||||
ssh = self.ssh
|
||||
log.info('Listing %s files at %s', match, url)
|
||||
run(
|
||||
['ssh'] + ssh.port + ssh.user + [host, 'mkdir', '-p', path],
|
||||
log=log, errmsg='Unable to create path'
|
||||
)
|
||||
out, _ = run(
|
||||
['ssh'] + ssh.port + ssh.user + [host, 'ls', '-1drt', path / match],
|
||||
log=log, errmsg='Failed to list files'
|
||||
)
|
||||
files = out.splitlines()
|
||||
|
||||
return [os.path.basename(f) for f in files]
|
||||
|
||||
def remove(self, files):
|
||||
log = self.log
|
||||
if not files:
|
||||
log.debug('No files to remove')
|
||||
return
|
||||
|
||||
dest = self.remote
|
||||
if self.scheme == 'file':
|
||||
for file in files:
|
||||
path = dest / file
|
||||
log.info('Removing %s', path)
|
||||
if path.exists():
|
||||
path.unlink()
|
||||
|
||||
return
|
||||
|
||||
url = self.url
|
||||
host = self.host
|
||||
ssh = self.ssh
|
||||
dest_files = []
|
||||
for file in files:
|
||||
log.info('Removing %s', url + '/' + file)
|
||||
dest_files.append(dest / file)
|
||||
|
||||
run(
|
||||
['ssh'] + ssh.port + ssh.user + [host, 'rm', '-f'] + dest_files,
|
||||
log=log, errmsg='Failed to remove files'
|
||||
)
|
|
@ -0,0 +1,32 @@
|
|||
# vim: ts=4 et:
|
||||
|
||||
class DictObj(dict):
|
||||
|
||||
def __getattr__(self, key):
|
||||
return self[key]
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
self[key] = value
|
||||
|
||||
def __delattr__(self, key):
|
||||
del self[key]
|
||||
|
||||
|
||||
class ImageTags(DictObj):
|
||||
|
||||
def __init__(self, d={}, from_list=None, key_name='Key', value_name='Value'):
|
||||
for key, value in d.items():
|
||||
self.__setattr__(key, value)
|
||||
|
||||
if from_list:
|
||||
self.from_list(from_list, key_name, value_name)
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
self[key] = str(value)
|
||||
|
||||
def as_list(self, key_name='Key', value_name='Value'):
|
||||
return [{key_name: k, value_name: v} for k, v in self.items()]
|
||||
|
||||
def from_list(self, list=[], key_name='Key', value_name='Value'):
|
||||
for tag in list:
|
||||
self.__setattr__(tag[key_name], tag[value_name])
|
|
@ -26,6 +26,18 @@ case "$CLOUD" in
|
|||
aws)
|
||||
DATASOURCE="Ec2"
|
||||
;;
|
||||
nocloud)
|
||||
DATASOURCE="NoCloud"
|
||||
;;
|
||||
azure)
|
||||
DATASOURCE="Azure"
|
||||
;;
|
||||
gcp)
|
||||
DATASOURCE="GCE"
|
||||
;;
|
||||
oci)
|
||||
DATASOURCE="Oracle"
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported Cloud '$CLOUD'" >&2
|
||||
exit 1
|
||||
|
|
Loading…
Reference in New Issue