alpine-cloud-images, part two
This commit is contained in:
parent
9d882bff7a
commit
6674286b46
|
@ -0,0 +1,32 @@
|
|||
# vim: ts=4 et:
|
||||
|
||||
import re
|
||||
from urllib.request import urlopen
|
||||
|
||||
|
||||
# constants and vars
|
||||
CDN_URL = 'https://dl-cdn.alpinelinux.org/alpine'
|
||||
|
||||
|
||||
def get_version_release(alpine_version):
|
||||
apk_ver = get_apk_version(alpine_version, 'main', 'x86_64', 'alpine-base')
|
||||
release = apk_ver.split('-')[0]
|
||||
version = '.'.join(release.split('.')[:2])
|
||||
return {'version': version, 'release': release}
|
||||
|
||||
|
||||
# TODO? maybe download and parse APKINDEX instead?
|
||||
def get_apk_version(alpine_version, repo, arch, apk):
|
||||
repo_url = f"{CDN_URL}/{alpine_version}/{repo}/{arch}"
|
||||
apks_re = re.compile(f'"{apk}-(\\d.*)\\.apk"')
|
||||
res = urlopen(repo_url)
|
||||
for line in map(lambda x: x.decode('utf8'), res):
|
||||
if not line.startswith('<a href="'):
|
||||
continue
|
||||
|
||||
match = apks_re.search(line)
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
||||
# didn't find it?
|
||||
raise RuntimeError(f"Unable to find {apk} APK via {repo_url}")
|
|
@ -0,0 +1,310 @@
|
|||
#!/usr/bin/env python3
|
||||
# vim: ts=4 et:
|
||||
|
||||
# Ensure we're using the Python virtual env with our installed dependencies
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
sys.pycache_prefix = 'work/__pycache__'
|
||||
|
||||
# Create the work environment if it doesn't exist.
|
||||
if not os.path.exists('work'):
|
||||
import venv
|
||||
|
||||
PIP_LIBS = [
|
||||
'pip',
|
||||
'mergedeep',
|
||||
'pyhocon',
|
||||
'python-dateutil',
|
||||
'ruamel.yaml',
|
||||
]
|
||||
print('Work environment does not exist, creating...', file=sys.stderr)
|
||||
venv.create('work', with_pip=True)
|
||||
subprocess.run(['work/bin/pip', 'install', '-U', *PIP_LIBS])
|
||||
|
||||
# Re-execute using the right virtual environment, if necessary.
|
||||
venv_args = [os.path.join('work', 'bin', 'python3')] + sys.argv
|
||||
if os.path.join(os.getcwd(), venv_args[0]) != sys.executable:
|
||||
print("Re-executing with work environment's Python...\n", file=sys.stderr)
|
||||
os.execv(venv_args[0], venv_args)
|
||||
|
||||
# We're now in the right Python environment...
|
||||
|
||||
import argparse
|
||||
import io
|
||||
import logging
|
||||
import shutil
|
||||
import time
|
||||
|
||||
from subprocess import Popen, PIPE
|
||||
from urllib.request import urlopen
|
||||
|
||||
import alpine
|
||||
import clouds
|
||||
from image_configs import ImageConfigManager
|
||||
|
||||
|
||||
### Constants & Variables
|
||||
|
||||
STEPS = ['configs', 'actions', 'local', 'import', 'publish']
|
||||
LOGFORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
WORK_CLEAN = {'bin', 'include', 'lib', 'pyvenv.cfg', '__pycache__'}
|
||||
WORK_OVERLAYS = ['configs', 'scripts']
|
||||
OVMF_FIRMWARE = {
|
||||
'aarch64': 'usr/share/OVMF/QEMU_EFI.fd',
|
||||
'x86_64': 'usr/share/OVMF/OVMF.fd'
|
||||
}
|
||||
ISO_URL_FORMAT = f"{alpine.CDN_URL}/" \
|
||||
'v{version}/releases/{{arch}}/alpine-virt-{release}-{{arch}}.iso'
|
||||
|
||||
|
||||
### Functions
|
||||
|
||||
# ensure list has unique values, preserving order
|
||||
def unique_list(x):
|
||||
d = {e: 1 for e in x}
|
||||
return list(d.keys())
|
||||
|
||||
|
||||
def remove_dupe_args():
|
||||
class RemoveDupeArgs(argparse.Action):
|
||||
def __call__(self, parser, args, values, option_string=None):
|
||||
setattr(args, self.dest, unique_list(values))
|
||||
|
||||
return RemoveDupeArgs
|
||||
|
||||
|
||||
def are_args_valid(checker):
|
||||
class AreArgsValid(argparse.Action):
|
||||
def __call__(self, parser, args, values, option_string=None):
|
||||
# remove duplicates
|
||||
values = unique_list(values)
|
||||
for x in values:
|
||||
if not checker(x):
|
||||
parser.error(f"{option_string} value is not a {self.metavar}: {x}")
|
||||
|
||||
setattr(args, self.dest, values)
|
||||
|
||||
return AreArgsValid
|
||||
|
||||
|
||||
def clean_work():
|
||||
log.info('Cleaning work environment')
|
||||
|
||||
for x in (set(os.listdir('work')) - WORK_CLEAN):
|
||||
x = os.path.join('work', x)
|
||||
log.debug('removing %s', x)
|
||||
if os.path.isdir(x):
|
||||
shutil.rmtree(x)
|
||||
else:
|
||||
os.unlink(x)
|
||||
|
||||
|
||||
def is_same_dir_symlink(x):
|
||||
if not os.path.islink(x):
|
||||
return False
|
||||
|
||||
x_link = os.path.normpath(os.readlink(x))
|
||||
return x_link == os.path.basename(x_link)
|
||||
|
||||
|
||||
def install_overlay(overlay):
|
||||
log.info("Installing '%s' overlay in work environment", overlay)
|
||||
dest_dir = os.path.join('work', overlay)
|
||||
os.makedirs(dest_dir, exist_ok=True)
|
||||
for src in unique_list(['.'] + args.custom):
|
||||
src_dir = os.path.join(src, overlay)
|
||||
for x in os.listdir(src_dir):
|
||||
src_x = os.path.join(src_dir, x)
|
||||
dest_x = os.path.join(dest_dir, x)
|
||||
if is_same_dir_symlink(src_x):
|
||||
rel_x = os.readlink(src_x)
|
||||
else:
|
||||
rel_x = os.path.relpath(src_x, dest_dir)
|
||||
|
||||
# TODO: only images.conf symlink can be overridden
|
||||
if os.path.islink(dest_x):
|
||||
# only same-dir symlinks can be overridden
|
||||
if not is_same_dir_symlink(dest_x):
|
||||
log.warning("Can't override %s with %s", dest_x, src_x)
|
||||
continue
|
||||
|
||||
log.debug('overriding %s with %s', dest_x, src_x)
|
||||
os.unlink(dest_x)
|
||||
|
||||
elif os.path.exists(dest_x):
|
||||
# we expect only symlnks in the overlay destination!
|
||||
log.critical('Config overlay non-symlink detected: %s', dest_x)
|
||||
sys.exit(1)
|
||||
|
||||
log.debug('ln -sf %s %s', rel_x, dest_x)
|
||||
os.symlink(rel_x, dest_x)
|
||||
|
||||
|
||||
def install_overlays():
|
||||
for overlay in WORK_OVERLAYS:
|
||||
if not os.path.isdir(os.path.join('work', overlay)):
|
||||
install_overlay(overlay)
|
||||
|
||||
else:
|
||||
log.info("Using existing '%s' in work environment", overlay)
|
||||
|
||||
|
||||
def install_qemu_firmware():
|
||||
firm_dir = 'work/firmware'
|
||||
if os.path.isdir(firm_dir):
|
||||
log.info('Using existing UEFI firmware in work environment')
|
||||
return
|
||||
|
||||
log.info('Installing UEFI firmware in work environment')
|
||||
|
||||
os.makedirs(firm_dir)
|
||||
for arch, bin in OVMF_FIRMWARE.items():
|
||||
v = alpine.get_apk_version('latest-stable', 'community', arch, 'ovmf')
|
||||
ovmf_url = f"{alpine.CDN_URL}/latest-stable/community/{arch}/ovmf-{v}.apk"
|
||||
data = urlopen(ovmf_url).read()
|
||||
|
||||
# Python tarfile library can't extract from APKs
|
||||
tar_cmd = ['tar', '-zxf', '-', '-C', firm_dir, bin]
|
||||
p = Popen(tar_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
out, err = p.communicate(input=data)
|
||||
if p.returncode:
|
||||
log.critical('Unable to untar %s to get %s', ovmf_url, bin)
|
||||
log.error('%s = %s', p.returncode, ' '.join(tar_cmd))
|
||||
log.error('STDOUT:\n%s', out.decode('utf8'))
|
||||
log.error('STDERR:\n%s', err.decode('utf8'))
|
||||
sys.exit(1)
|
||||
|
||||
os.symlink(bin, os.path.join(firm_dir, f"uefi-{arch}.bin"))
|
||||
|
||||
|
||||
### Command Line & Logging
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
# general options
|
||||
parser.add_argument(
|
||||
'--debug', action='store_true', help='enable debug output')
|
||||
parser.add_argument(
|
||||
'--clean', action='store_true', help='start with a clean work environment')
|
||||
parser.add_argument(
|
||||
'--revise', action='store_true',
|
||||
help='bump revisions if images already published')
|
||||
# positional argument
|
||||
parser.add_argument(
|
||||
'step', choices=STEPS, help='build up to and including this step')
|
||||
# config options
|
||||
parser.add_argument(
|
||||
'--custom', metavar='DIR', nargs='+', action=are_args_valid(os.path.isdir),
|
||||
default=[], help='overlay custom directory in work environment')
|
||||
parser.add_argument(
|
||||
'--skip', metavar='KEY', nargs='+', action=remove_dupe_args(),
|
||||
default=[], help='skip variants with dimension key(s)')
|
||||
parser.add_argument(
|
||||
'--only', metavar='KEY', nargs='+', action=remove_dupe_args(),
|
||||
default=[], help='only variants with dimension key(s)')
|
||||
# packer options
|
||||
parser.add_argument(
|
||||
'--no-color', action='store_true', help='turn off Packer color output')
|
||||
parser.add_argument(
|
||||
'--parallel', metavar='N', type=int, default=1,
|
||||
help='build N images in parallel')
|
||||
parser.add_argument(
|
||||
'--vars', metavar='FILE', nargs='+', action=are_args_valid(os.path.isfile),
|
||||
default=[], help='supply Packer with -vars-file(s)')
|
||||
parser.add_argument(
|
||||
'--use-broker', action='store_true',
|
||||
help='use the identity broker to get credentials')
|
||||
# perhaps others?
|
||||
args = parser.parse_args()
|
||||
|
||||
log = logging.getLogger('build')
|
||||
log.setLevel(logging.DEBUG if args.debug else logging.INFO)
|
||||
console = logging.StreamHandler()
|
||||
logfmt = logging.Formatter(LOGFORMAT, datefmt='%FT%TZ')
|
||||
logfmt.converter = time.gmtime
|
||||
console.setFormatter(logfmt)
|
||||
log.addHandler(console)
|
||||
log.debug(args)
|
||||
|
||||
# set up credential provider, if we're going to use it
|
||||
if args.use_broker:
|
||||
clouds.set_credential_provider()
|
||||
|
||||
### Setup Configs
|
||||
|
||||
latest = alpine.get_version_release('latest-stable')
|
||||
log.info('Latest Alpine version %s and release %s', latest['version'], latest['release'])
|
||||
|
||||
if args.clean:
|
||||
clean_work()
|
||||
|
||||
# install overlay(s) if missing
|
||||
install_overlays()
|
||||
|
||||
image_configs = ImageConfigManager(
|
||||
conf_path='work/configs/images.conf',
|
||||
yaml_path='work/images.yaml',
|
||||
log='build',
|
||||
iso_url_format=ISO_URL_FORMAT.format(**latest)
|
||||
)
|
||||
|
||||
log.info('Configuration Complete')
|
||||
if args.step == 'configs':
|
||||
sys.exit(0)
|
||||
|
||||
### What needs doing?
|
||||
|
||||
if not image_configs.determine_actions(
|
||||
step=args.step, only=args.only, skip=args.skip, revise=args.revise):
|
||||
log.info('No pending actions to take at this time.')
|
||||
sys.exit(0)
|
||||
|
||||
if args.step == 'actions':
|
||||
sys.exit(0)
|
||||
|
||||
# install firmware if missing
|
||||
install_qemu_firmware()
|
||||
|
||||
### Build/Import/Publish with Packer
|
||||
|
||||
env = os.environ | {
|
||||
'TZ': 'UTC',
|
||||
'PACKER_CACHE_DIR': 'work/packer_cache'
|
||||
}
|
||||
|
||||
packer_cmd = [
|
||||
'packer', 'build', '-timestamp-ui',
|
||||
'-parallel-builds', str(args.parallel)
|
||||
]
|
||||
if args.no_color:
|
||||
packer_cmd.append('-color=false')
|
||||
|
||||
if args.use_broker:
|
||||
packer_cmd += ['-var', 'USE_BROKER=1']
|
||||
|
||||
if args.debug:
|
||||
# do not add '-debug', it will pause between steps
|
||||
packer_cmd += ['-var', 'DEBUG=1']
|
||||
|
||||
for var_file in args.vars:
|
||||
packer_cmd.append(f"-var-file={var_file}")
|
||||
|
||||
packer_cmd += ['.']
|
||||
log.info('Executing Packer...')
|
||||
log.debug(packer_cmd)
|
||||
out = io.StringIO()
|
||||
p = Popen(packer_cmd, stdout=PIPE, encoding='utf8', env=env)
|
||||
while p.poll() is None:
|
||||
text = p.stdout.readline()
|
||||
out.write(text)
|
||||
print(text, end="")
|
||||
|
||||
if p.returncode != 0:
|
||||
log.critical('Packer Failure')
|
||||
sys.exit(p.returncode)
|
||||
|
||||
log.info('Packer Completed')
|
||||
|
||||
# TODO? collect artifacts?
|
|
@ -0,0 +1,107 @@
|
|||
#!/usr/bin/env python3
|
||||
# vim: ts=4 et:
|
||||
|
||||
# Ensure we're using the Python virtual env with our installed dependencies
|
||||
import os
|
||||
import sys
|
||||
import textwrap
|
||||
|
||||
NOTE = textwrap.dedent("""
|
||||
This script is meant to be run as a Packer post-processor, and Packer is only
|
||||
meant to be executed from the main 'build' script, which is responsible for
|
||||
setting up the work environment.
|
||||
""")
|
||||
|
||||
sys.pycache_prefix = 'work/__pycache__'
|
||||
|
||||
if not os.path.exists('work'):
|
||||
print('FATAL: Work directory does not exist.', file=sys.stderr)
|
||||
print(NOTE, file=sys.stderr)
|
||||
exit(1)
|
||||
|
||||
# Re-execute using the right virtual environment, if necessary.
|
||||
venv_args = [os.path.join('work', 'bin', 'python3')] + sys.argv
|
||||
if os.path.join(os.getcwd(), venv_args[0]) != sys.executable:
|
||||
print("Re-executing with work environment's Python...\n", file=sys.stderr)
|
||||
os.execv(venv_args[0], venv_args)
|
||||
|
||||
# We're now in the right Python environment
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
import clouds
|
||||
from image_configs import ImageConfigManager
|
||||
|
||||
|
||||
### Constants & Variables
|
||||
|
||||
ACTIONS = ['import', 'publish']
|
||||
LOGFORMAT = '%(name)s - %(levelname)s - %(message)s'
|
||||
|
||||
|
||||
### Functions
|
||||
|
||||
# TODO? be more specific with args?
|
||||
# import image config's local image to cloud
|
||||
def import_image(ic):
|
||||
imported = clouds.import_image(ic)
|
||||
# write imported metadata
|
||||
imported_yaml = Path(os.path.join(ic.local_dir, 'imported.yaml'))
|
||||
yaml.dump(imported, imported_yaml)
|
||||
|
||||
|
||||
# TODO? be more specific with args?
|
||||
# publish image config's imported image to target regions with expected permissions
|
||||
def publish_image(ic):
|
||||
published = clouds.publish_image(ic)
|
||||
# ensure image work directory exists
|
||||
os.makedirs(ic.local_dir, exist_ok=True)
|
||||
# write published metadata
|
||||
published_yaml = Path(os.path.join(ic.local_dir, 'published.yaml'))
|
||||
yaml.dump(published, published_yaml)
|
||||
|
||||
|
||||
### Command Line & Logging
|
||||
|
||||
parser = argparse.ArgumentParser(description=NOTE)
|
||||
parser.add_argument('--debug', action='store_true', help='enable debug output')
|
||||
parser.add_argument(
|
||||
'--use-broker', action='store_true',
|
||||
help='use the identity broker to get credentials')
|
||||
parser.add_argument('action', choices=ACTIONS)
|
||||
parser.add_argument('image_keys', metavar='IMAGE_KEY', nargs='+')
|
||||
args = parser.parse_args()
|
||||
|
||||
log = logging.getLogger(args.action)
|
||||
log.setLevel(logging.DEBUG if args.debug else logging.INFO)
|
||||
# log to STDOUT so that it's not all red when executed by packer
|
||||
console = logging.StreamHandler(sys.stdout)
|
||||
console.setFormatter(logging.Formatter(LOGFORMAT))
|
||||
log.addHandler(console)
|
||||
log.debug(args)
|
||||
|
||||
# set up credential provider, if we're going to use it
|
||||
if args.use_broker:
|
||||
clouds.set_credential_provider()
|
||||
|
||||
# load build configs
|
||||
configs = ImageConfigManager(
|
||||
conf_path='work/configs/images.conf',
|
||||
yaml_path='work/images.yaml',
|
||||
log=args.action
|
||||
)
|
||||
|
||||
yaml = YAML()
|
||||
yaml.default_flow_style = False
|
||||
|
||||
for image_key in args.image_keys:
|
||||
image_config = configs.get(image_key)
|
||||
|
||||
if args.action == 'import':
|
||||
import_image(image_config)
|
||||
|
||||
elif args.action == 'publish':
|
||||
publish_image(image_config)
|
|
@ -0,0 +1,41 @@
|
|||
# vim: ts=4 et:
|
||||
|
||||
from . import aws # , oci, gcp, azure
|
||||
|
||||
ADAPTERS = {}
|
||||
|
||||
|
||||
def register(*mods):
|
||||
for mod in mods:
|
||||
cloud = mod.__name__.split('.')[-1]
|
||||
if p := mod.register(cloud):
|
||||
ADAPTERS[cloud] = p
|
||||
|
||||
|
||||
register(aws) # , oci, azure, gcp)
|
||||
|
||||
|
||||
# using a credential provider is optional, set across all adapters
|
||||
def set_credential_provider():
|
||||
from .identity_broker_client import IdentityBrokerClient
|
||||
cred_provider = IdentityBrokerClient()
|
||||
for adapter in ADAPTERS.values():
|
||||
adapter.cred_provider = cred_provider
|
||||
|
||||
|
||||
### forward to the correct adapter
|
||||
|
||||
def latest_build_image(config):
|
||||
return ADAPTERS[config.cloud].latest_build_image(config.name)
|
||||
|
||||
|
||||
def import_image(config):
|
||||
return ADAPTERS[config.cloud].import_image(config)
|
||||
|
||||
|
||||
def remove_image(config):
|
||||
return ADAPTERS[config.cloud].remove_image(config.remote_image['id'])
|
||||
|
||||
|
||||
def publish_image(config):
|
||||
return ADAPTERS[config.cloud].publish_image(config)
|
|
@ -0,0 +1,417 @@
|
|||
# NOTE: not meant to be executed directly
|
||||
# vim: ts=4 et:
|
||||
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import string
|
||||
import sys
|
||||
import time
|
||||
|
||||
from datetime import datetime
|
||||
from subprocess import Popen, PIPE, run
|
||||
|
||||
from .interfaces.adapter import CloudAdapterInterface
|
||||
from image_configs import Tags
|
||||
|
||||
|
||||
class AWSCloudAdapter(CloudAdapterInterface):
|
||||
CRED_MAP = {
|
||||
'access_key': 'aws_access_key_id',
|
||||
'secret_key': 'aws_secret_access_key',
|
||||
'session_token': 'aws_session_token',
|
||||
}
|
||||
CONVERT_CMD = (
|
||||
'qemu-img', 'convert', '-f', 'qcow2', '-O', 'vpc', '-o', 'force_size=on'
|
||||
)
|
||||
ARCH = {
|
||||
'aarch64': 'arm64',
|
||||
'x86_64': 'x86_64',
|
||||
}
|
||||
BOOT_MODE = {
|
||||
'bios': 'legacy-bios',
|
||||
'uefi': 'uefi',
|
||||
}
|
||||
|
||||
@property
|
||||
def sdk(self):
|
||||
# delayed import/install of SDK until we want to use it
|
||||
if not self._sdk:
|
||||
try:
|
||||
import boto3
|
||||
except ModuleNotFoundError:
|
||||
run(['work/bin/pip', 'install', '-U', 'boto3'])
|
||||
import boto3
|
||||
|
||||
self._sdk = boto3
|
||||
|
||||
return self._sdk
|
||||
|
||||
def session(self, region=None):
|
||||
if region not in self._sessions:
|
||||
creds = {'region_name': region} | self.credentials(region)
|
||||
self._sessions[region] = self.sdk.session.Session(**creds)
|
||||
|
||||
return self._sessions[region]
|
||||
|
||||
# TODO: property?
|
||||
def regions(self):
|
||||
if self.cred_provider:
|
||||
return self.cred_provider.get_regions(self.cloud)
|
||||
|
||||
# list of all subscribed regions
|
||||
return {r['RegionName']: True for r in self.session().client('ec2').describe_regions()['Regions']}
|
||||
|
||||
# TODO: property?
|
||||
def default_region(self):
|
||||
if self.cred_provider:
|
||||
return self.cred_provider.get_default_region(self.cloud)
|
||||
|
||||
# rely on our env or ~/.aws config for the default
|
||||
return None
|
||||
|
||||
def credentials(self, region=None):
|
||||
if not self.cred_provider:
|
||||
# use the cloud SDK's default credential discovery
|
||||
return {}
|
||||
|
||||
creds = self.cred_provider.get_credentials(self.cloud, region)
|
||||
# return dict suitable to use for session()
|
||||
return {self.CRED_MAP[k]: v for k, v in creds.items() if k in self.CRED_MAP}
|
||||
|
||||
def _get_images_with_tags(self, tags={}, region=None):
|
||||
ec2r = self.session(region).resource('ec2')
|
||||
req = {'Owners': ['self'], 'Filters': []}
|
||||
for k, v in tags.items():
|
||||
req['Filters'].append({'Name': f"tag:{k}", 'Values': [str(v)]})
|
||||
|
||||
return sorted(
|
||||
ec2r.images.filter(**req), key=lambda k: k.creation_date, reverse=True)
|
||||
|
||||
def _aws_tags(self, b_tags):
|
||||
# convert dict to [{'Key': k, 'Value': v}, ...]
|
||||
a_tags = []
|
||||
for k, v in b_tags.items():
|
||||
# add extra Name tag
|
||||
if k == 'name':
|
||||
a_tags += [{'Key': 'Name', 'Value': str(v)}]
|
||||
|
||||
a_tags += [{'Key': k, 'Value': str(v)}]
|
||||
|
||||
return a_tags
|
||||
|
||||
# cloud-agnostic necessary info about an ec2.Image
|
||||
def _image_info(self, i):
|
||||
tags = Tags(from_list=i.tags)
|
||||
del tags.Name
|
||||
# TODO? realm/partition?
|
||||
return {
|
||||
'id': i.image_id,
|
||||
'region': i.meta.client.meta.region_name,
|
||||
'tags': dict(tags)
|
||||
# TODO? narrow down to these?
|
||||
# imported = i.tags.imported
|
||||
# published = i.tags.published
|
||||
# revision = i.tags.build_revision
|
||||
# source_id = i.image_id,
|
||||
# source_region = i.meta.client.meta.region_name,
|
||||
}
|
||||
|
||||
# get the latest imported image for a given build name
|
||||
def latest_build_image(self, build_name):
|
||||
images = self._get_images_with_tags(tags={'build_name': build_name})
|
||||
if images:
|
||||
# first one is the latest
|
||||
return self._image_info(images[0])
|
||||
|
||||
return None
|
||||
|
||||
## TODO: rework these next two as a Tags class
|
||||
|
||||
# import an image
|
||||
# NOTE: requires 'vmimport' role with read/write of <s3_bucket>.* and its objects
|
||||
def import_image(self, ic):
|
||||
log = logging.getLogger('import')
|
||||
image_path = ic.local_path
|
||||
image_aws = image_path.replace(ic.local_format, 'vhd')
|
||||
description = ic.image_description
|
||||
|
||||
session = self.session()
|
||||
s3r = session.resource('s3')
|
||||
ec2c = session.client('ec2')
|
||||
ec2r = session.resource('ec2')
|
||||
|
||||
# convert QCOW2 to VHD
|
||||
log.info('Converting %s to VHD format', image_path)
|
||||
p = Popen(self.CONVERT_CMD + (image_path, image_aws), stdout=PIPE, stdin=PIPE, encoding='utf8')
|
||||
out, err = p.communicate()
|
||||
if p.returncode:
|
||||
log.error('Unable to convert %s to VHD format (%s)', image_path, p.returncode)
|
||||
log.error('STDOUT:\n%s', out)
|
||||
log.error('STDERR:\n%s', err)
|
||||
sys.exit(p.returncode)
|
||||
|
||||
bucket_name = 'alpine-cloud-images.' + ''.join(
|
||||
random.SystemRandom().choice(string.ascii_lowercase + string.digits)
|
||||
for _ in range(40))
|
||||
s3_key = os.path.basename(image_aws)
|
||||
|
||||
bucket = s3r.Bucket(bucket_name)
|
||||
log.info('Creating S3 bucket %s', bucket.name)
|
||||
bucket.create(
|
||||
CreateBucketConfiguration={'LocationConstraint': ec2c.meta.region_name}
|
||||
)
|
||||
bucket.wait_until_exists()
|
||||
s3_url = f"s3://{bucket.name}/{s3_key}"
|
||||
|
||||
try:
|
||||
log.info('Uploading %s to %s', image_aws, s3_url)
|
||||
bucket.upload_file(image_aws, s3_key)
|
||||
|
||||
# import snapshot from S3
|
||||
log.info('Importing EC2 snapshot from %s', s3_url)
|
||||
ss_import = ec2c.import_snapshot(
|
||||
DiskContainer={
|
||||
'Description': description, # https://github.com/boto/boto3/issues/2286
|
||||
'Format': 'VHD',
|
||||
'Url': s3_url
|
||||
}
|
||||
# NOTE: TagSpecifications -- doesn't work with ResourceType: snapshot?
|
||||
)
|
||||
ss_task_id = ss_import['ImportTaskId']
|
||||
while True:
|
||||
ss_task = ec2c.describe_import_snapshot_tasks(
|
||||
ImportTaskIds=[ss_task_id]
|
||||
)
|
||||
task_detail = ss_task['ImportSnapshotTasks'][0]['SnapshotTaskDetail']
|
||||
if task_detail['Status'] not in ['pending', 'active', 'completed']:
|
||||
msg = f"Bad EC2 snapshot import: {task_detail['Status']} - {task_detail['StatusMessage']}"
|
||||
log.error(msg)
|
||||
raise(RuntimeError, msg)
|
||||
|
||||
if task_detail['Status'] == 'completed':
|
||||
snapshot_id = task_detail['SnapshotId']
|
||||
break
|
||||
|
||||
time.sleep(15)
|
||||
except Exception:
|
||||
log.error('Unable to import snapshot from S3:', exc_info=True)
|
||||
raise
|
||||
finally:
|
||||
# always cleanup S3, even if there was an exception raised
|
||||
log.info('Cleaning up %s', s3_url)
|
||||
bucket.Object(s3_key).delete()
|
||||
bucket.delete()
|
||||
|
||||
# tag snapshot
|
||||
snapshot = ec2r.Snapshot(snapshot_id)
|
||||
try:
|
||||
log.info('Tagging EC2 snapshot %s', snapshot_id)
|
||||
tags = ic.tags
|
||||
tags.Name = tags.name # because AWS is special
|
||||
snapshot.create_tags(Tags=tags.as_list())
|
||||
except Exception:
|
||||
log.error('Unable to tag snapshot:', exc_info=True)
|
||||
log.info('Removing snapshot')
|
||||
snapshot.delete()
|
||||
raise
|
||||
|
||||
# register AMI
|
||||
try:
|
||||
log.info('Registering EC2 AMI from snapshot %s', snapshot_id)
|
||||
img = ec2c.register_image(
|
||||
Architecture=self.ARCH[ic.arch],
|
||||
BlockDeviceMappings=[{
|
||||
'DeviceName': 'xvda',
|
||||
'Ebs': {'SnapshotId': snapshot_id}
|
||||
}],
|
||||
Description=description,
|
||||
EnaSupport=True,
|
||||
Name=ic.image_name,
|
||||
RootDeviceName='xvda',
|
||||
SriovNetSupport='simple',
|
||||
VirtualizationType='hvm',
|
||||
BootMode=self.BOOT_MODE[ic.firmware],
|
||||
)
|
||||
except Exception:
|
||||
log.error('Unable to register image:', exc_info=True)
|
||||
log.info('Removing snapshot')
|
||||
snapshot.delete()
|
||||
raise
|
||||
|
||||
image_id = img['ImageId']
|
||||
image = ec2r.Image(image_id)
|
||||
|
||||
try:
|
||||
# tag image (adds imported tag)
|
||||
log.info('Tagging EC2 AMI %s', image_id)
|
||||
tags.imported = datetime.utcnow().isoformat()
|
||||
tags.source_id = image_id
|
||||
tags.source_region = ec2c.meta.region_name
|
||||
image.create_tags(Tags=tags.as_list())
|
||||
except Exception:
|
||||
log.error('Unable to tag image:', exc_info=True)
|
||||
log.info('Removing image and snapshot')
|
||||
image.delete()
|
||||
snapshot.delete()
|
||||
raise
|
||||
|
||||
return self._image_info(image)
|
||||
|
||||
# remove an (unpublished) image
|
||||
def remove_image(self, image_id):
|
||||
log = logging.getLogger('build')
|
||||
ec2r = self.session().resource('ec2')
|
||||
image = ec2r.Image(image_id)
|
||||
# TODO? protect against removing a published image?
|
||||
snapshot_id = image.block_device_mappings[0]['Ebs']['SnapshotId']
|
||||
snapshot = ec2r.Snapshot(snapshot_id)
|
||||
log.info('Deregistering %s', image_id)
|
||||
image.deregister()
|
||||
log.info('Deleting %s', snapshot_id)
|
||||
snapshot.delete()
|
||||
|
||||
# TODO: this should be standardized and work with cred_provider
|
||||
def _get_all_regions(self):
|
||||
ec2c = self.session().client('ec2')
|
||||
res = ec2c.describe_regions(AllRegions=True)
|
||||
return {
|
||||
r['RegionName']: r['OptInStatus'] != 'not-opted-in'
|
||||
for r in res['Regions']
|
||||
}
|
||||
|
||||
# publish an image
|
||||
def publish_image(self, ic):
|
||||
log = logging.getLogger('publish')
|
||||
source_image = self.latest_build_image(ic.name)
|
||||
if not source_image:
|
||||
log.error('No source image for %s', ic.name)
|
||||
sys.exit(1)
|
||||
|
||||
source_id = source_image['id']
|
||||
log.info('Publishing source: %s, %s', source_image['region'], source_id)
|
||||
source = self.session().resource('ec2').Image(source_id)
|
||||
source_tags = Tags(from_list=source.tags)
|
||||
publish = ic.publish
|
||||
|
||||
# sort out published image access permissions
|
||||
perms = {'groups': [], 'users': []}
|
||||
if 'PUBLIC' in publish['access'] and publish['access']['PUBLIC']:
|
||||
perms['groups'] = ['all']
|
||||
else:
|
||||
for k, v in publish['access'].items():
|
||||
if v:
|
||||
log.debug('users: %s', k)
|
||||
perms['users'].append(str(k))
|
||||
|
||||
log.debug('perms: %s', perms)
|
||||
|
||||
# resolve destination regions
|
||||
regions = self.regions()
|
||||
if 'ALL' in publish['regions'] and publish['regions']['ALL']:
|
||||
log.info('Publishing to ALL available regions')
|
||||
else:
|
||||
# clear ALL out of the way if it's still there
|
||||
publish['regions'].pop('ALL', None)
|
||||
# TODO: politely warn/skip unknown regions in b.aws.regions
|
||||
regions = {r: regions[r] for r in publish['regions']}
|
||||
|
||||
publishing = {}
|
||||
for r in regions.keys():
|
||||
if not regions[r]:
|
||||
log.warning('Skipping unsubscribed AWS region %s', r)
|
||||
continue
|
||||
|
||||
images = self._get_images_with_tags(
|
||||
region=r,
|
||||
tags={
|
||||
'build_name': ic.name,
|
||||
'build_revision': ic.revision
|
||||
}
|
||||
)
|
||||
if images:
|
||||
image = images[0]
|
||||
log.info('%s: Already exists as %s', r, image.id)
|
||||
else:
|
||||
ec2c = self.session(r).client('ec2')
|
||||
try:
|
||||
res = ec2c.copy_image(
|
||||
Description=source.description,
|
||||
Name=source.name,
|
||||
SourceImageId=source.id,
|
||||
SourceRegion=source_image['region'],
|
||||
)
|
||||
except Exception:
|
||||
log.warning('Skipping %s, unable to copy image:', r, exc_info=True)
|
||||
continue
|
||||
|
||||
image_id = res['ImageId']
|
||||
log.info('%s: Publishing to %s', r, image_id)
|
||||
image = self.session(r).resource('ec2').Image(image_id)
|
||||
|
||||
publishing[r] = image
|
||||
|
||||
published = {}
|
||||
copy_wait = 180
|
||||
while len(published) < len(publishing):
|
||||
for r, image in publishing.items():
|
||||
if r not in published:
|
||||
image.reload()
|
||||
if image.state == 'available':
|
||||
# tag image
|
||||
log.info('%s: Adding tags to %s', r, image.id)
|
||||
tags = Tags(from_list=image.tags)
|
||||
fresh = False
|
||||
if 'published' not in tags:
|
||||
fresh = True
|
||||
|
||||
if not tags:
|
||||
# fallback to source image's tags
|
||||
tags = Tags(source_tags)
|
||||
|
||||
if fresh:
|
||||
tags.published = datetime.utcnow().isoformat()
|
||||
|
||||
image.create_tags(Tags=tags.as_list())
|
||||
|
||||
# tag image's snapshot, too
|
||||
snapshot = self.session(r).resource('ec2').Snapshot(
|
||||
image.block_device_mappings[0]['Ebs']['SnapshotId']
|
||||
)
|
||||
snapshot.create_tags(Tags=image.tags)
|
||||
|
||||
# apply launch perms
|
||||
log.info('%s: Applying launch perms to %s', r, image.id)
|
||||
image.reset_attribute(Attribute='launchPermission')
|
||||
image.modify_attribute(
|
||||
Attribute='launchPermission',
|
||||
OperationType='add',
|
||||
UserGroups=perms['groups'],
|
||||
UserIds=perms['users'],
|
||||
)
|
||||
|
||||
# set up AMI deprecation
|
||||
ec2c = image.meta.client
|
||||
log.info('%s: Setting EOL deprecation time on %s', r, image.id)
|
||||
ec2c.enable_image_deprecation(
|
||||
ImageId=image.id,
|
||||
DeprecateAt=f"{source_image['tags']['end_of_life']}T23:59:59Z"
|
||||
)
|
||||
|
||||
published[r] = self._image_info(image)
|
||||
|
||||
if image.state == 'failed':
|
||||
log.error('%s: %s - %s - %s', r, image.id, image.state, image.state_reason)
|
||||
published[r] = None
|
||||
|
||||
remaining = len(publishing) - len(published)
|
||||
if remaining > 0:
|
||||
log.info('Waiting %ds for %d images to complete', copy_wait, remaining)
|
||||
time.sleep(copy_wait)
|
||||
copy_wait = 30
|
||||
|
||||
return published
|
||||
|
||||
|
||||
def register(cloud, cred_provider=None):
|
||||
return AWSCloudAdapter(cloud, cred_provider)
|
|
@ -0,0 +1,131 @@
|
|||
# vim: ts=4 et:
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import urllib.error
|
||||
|
||||
from datetime import datetime
|
||||
from email.utils import parsedate
|
||||
from urllib.request import Request, urlopen
|
||||
|
||||
|
||||
class IdentityBrokerClient:
|
||||
"""Client for identity broker
|
||||
|
||||
Export IDENTITY_BROKER_ENDPOINT to override the default broker endpoint.
|
||||
Export IDENTITY_BROKER_API_KEY to specify an API key for the broker.
|
||||
|
||||
See README_BROKER.md for more information and a spec.
|
||||
"""
|
||||
|
||||
_DEFAULT_ENDPOINT = 'https://aws-access.crute.us/api/account'
|
||||
_DEFAULT_ACCOUNT = 'alpine-amis-user'
|
||||
_LOGFORMAT = '%(name)s - %(levelname)s - %(message)s'
|
||||
|
||||
def __init__(self, endpoint=None, key=None, account=None, debug=False):
|
||||
# log to STDOUT so that it's not all red when executed by Packer
|
||||
self._logger = logging.getLogger('identity-broker')
|
||||
self._logger.setLevel(logging.DEBUG if debug else logging.INFO)
|
||||
console = logging.StreamHandler(sys.stdout)
|
||||
console.setFormatter(logging.Formatter(self._LOGFORMAT))
|
||||
self._logger.addHandler(console)
|
||||
|
||||
self._endpoint = os.environ.get('IDENTITY_BROKER_ENDPOINT') or endpoint \
|
||||
or self._DEFAULT_ENDPOINT
|
||||
self._key = os.environ.get('IDENTITY_BROKER_API_KEY') or key
|
||||
self._account = account or self._DEFAULT_ACCOUNT
|
||||
if not self._key:
|
||||
raise Exception('No identity broker key found')
|
||||
|
||||
self._headers = {
|
||||
'Accept': 'application/vnd.broker.v2+json',
|
||||
'X-API-Key': self._key
|
||||
}
|
||||
self._cache = {}
|
||||
self._expires = {}
|
||||
self._default_region = {}
|
||||
|
||||
def _is_cache_valid(self, path):
|
||||
if path not in self._cache:
|
||||
return False
|
||||
|
||||
# path is subject to expiry AND its time has passed
|
||||
if self._expires[path] and self._expires[path] < datetime.utcnow():
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _get(self, path):
|
||||
if not self._is_cache_valid(path):
|
||||
while True: # to handle rate limits
|
||||
try:
|
||||
res = urlopen(Request(path, headers=self._headers))
|
||||
except urllib.error.HTTPError as ex:
|
||||
if ex.status == 401:
|
||||
raise Exception('Expired or invalid identity broker token')
|
||||
|
||||
if ex.status == 406:
|
||||
raise Exception('Invalid or malformed identity broker token')
|
||||
|
||||
# TODO: will this be entirely handled by the 401 above?
|
||||
if ex.headers.get('Location') == '/logout':
|
||||
raise Exception('Identity broker token is expired')
|
||||
|
||||
if ex.status == 429:
|
||||
self._logger.warning(
|
||||
'Rate-limited by identity broker, sleeping 30 seconds')
|
||||
time.sleep(30)
|
||||
continue
|
||||
|
||||
raise ex
|
||||
|
||||
if res.status not in {200, 429}:
|
||||
raise Exception(res.reason)
|
||||
|
||||
# never expires without valid RFC 1123 Expires header
|
||||
if expires := res.getheader('Expires'):
|
||||
expires = parsedate(expires)
|
||||
# convert RFC 1123 to datetime, if parsed successfully
|
||||
expires = datetime(*expires[:6])
|
||||
|
||||
self._expires[path] = expires
|
||||
self._cache[path] = json.load(res)
|
||||
break
|
||||
|
||||
return self._cache[path]
|
||||
|
||||
def get_credentials_url(self, vendor):
|
||||
accounts = self._get(self._endpoint)
|
||||
if vendor not in accounts:
|
||||
raise Exception(f'No {vendor} credentials found')
|
||||
|
||||
for account in accounts[vendor]:
|
||||
if account['short_name'] == self._account:
|
||||
return account['credentials_url']
|
||||
|
||||
raise Exception('No account credentials found')
|
||||
|
||||
def get_regions(self, vendor):
|
||||
out = {}
|
||||
|
||||
for region in self._get(self.get_credentials_url(vendor)):
|
||||
if region['enabled']:
|
||||
out[region['name']] = region['credentials_url']
|
||||
|
||||
if region['default']:
|
||||
self._default_region[vendor] = region['name']
|
||||
out[None] = region['credentials_url']
|
||||
|
||||
return out
|
||||
|
||||
def get_default_region(self, vendor):
|
||||
if vendor not in self._default_region:
|
||||
self.get_regions(vendor)
|
||||
|
||||
return self._default_region.get(vendor)
|
||||
|
||||
def get_credentials(self, vendor, region=None):
|
||||
return self._get(self.get_regions(vendor)[region])
|
|
@ -0,0 +1,40 @@
|
|||
# vim: ts=4 et:
|
||||
|
||||
class CloudAdapterInterface:
|
||||
|
||||
def __init__(self, cloud, cred_provider=None):
|
||||
self._sdk = None
|
||||
self._sessions = {}
|
||||
self.cloud = cloud
|
||||
self.cred_provider = cred_provider
|
||||
self._default_region = None
|
||||
|
||||
@property
|
||||
def sdk(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def regions(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def default_region(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def credentials(self, region=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def session(self, region=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def latest_build_image(self, build_name):
|
||||
raise NotImplementedError
|
||||
|
||||
# TODO: be more specific about what gets passed into these
|
||||
|
||||
def import_image(self, config):
|
||||
raise NotImplementedError
|
||||
|
||||
def remove_image(self, config, image_id):
|
||||
raise NotImplementedError
|
||||
|
||||
def publish_image(self, config):
|
||||
raise NotImplementedError
|
|
@ -0,0 +1,420 @@
|
|||
# vim: ts=4 et:
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
import mergedeep
|
||||
import os
|
||||
import pyhocon
|
||||
import shutil
|
||||
|
||||
from copy import deepcopy
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
from alpine import get_version_release
|
||||
import clouds
|
||||
|
||||
|
||||
class ImageConfigManager():
|
||||
|
||||
def __init__(self, conf_path, yaml_path, log=__name__, iso_url_format=None):
|
||||
self.conf_path = Path(conf_path)
|
||||
self.yaml_path = Path(yaml_path)
|
||||
self.log = logging.getLogger(log)
|
||||
self.iso_url_format = iso_url_format
|
||||
|
||||
self.now = datetime.utcnow()
|
||||
self.tomorrow = self.now + timedelta(days=1)
|
||||
self._configs = {}
|
||||
|
||||
self.yaml = YAML()
|
||||
self.yaml.register_class(ImageConfig)
|
||||
self.yaml.default_flow_style = False
|
||||
self.yaml.explicit_start = True
|
||||
# hide !ImageConfig tag from Packer
|
||||
self.yaml.representer.org_represent_mapping = self.yaml.representer.represent_mapping
|
||||
self.yaml.representer.represent_mapping = self._strip_yaml_tag_type
|
||||
|
||||
# load resolved YAML, if exists
|
||||
if self.yaml_path.exists():
|
||||
self._load_yaml()
|
||||
else:
|
||||
self._resolve()
|
||||
|
||||
def get(self, key=None):
|
||||
if not key:
|
||||
return self._configs
|
||||
|
||||
return self._configs[key]
|
||||
|
||||
# load already-resolved YAML configs, restoring ImageConfig objects
|
||||
def _load_yaml(self):
|
||||
# TODO: no warning if we're being called from cloud_helper.py
|
||||
self.log.warning('Loading existing %s', self.yaml_path)
|
||||
for key, config in self.yaml.load(self.yaml_path).items():
|
||||
self._configs[key] = ImageConfig(key, config)
|
||||
|
||||
# save resolved configs to YAML
|
||||
def _save_yaml(self):
|
||||
self.log.info('Saving %s', self.yaml_path)
|
||||
self.yaml.dump(self._configs, self.yaml_path)
|
||||
|
||||
# hide !ImageConfig tag from Packer
|
||||
def _strip_yaml_tag_type(self, tag, mapping, flow_style=None):
|
||||
if tag == '!ImageConfig':
|
||||
tag = u'tag:yaml.org,2002:map'
|
||||
|
||||
return self.yaml.representer.org_represent_mapping(tag, mapping, flow_style=flow_style)
|
||||
|
||||
# resolve from HOCON configs
|
||||
def _resolve(self):
|
||||
self.log.info('Generating configs.yaml in work environment')
|
||||
cfg = pyhocon.ConfigFactory.parse_file(self.conf_path)
|
||||
# set version releases
|
||||
for v, vcfg in cfg.Dimensions.version.items():
|
||||
# version keys are quoted to protect dots
|
||||
self.set_version_release(v.strip('"'), vcfg)
|
||||
|
||||
dimensions = list(cfg.Dimensions.keys())
|
||||
self.log.debug('dimensions: %s', dimensions)
|
||||
|
||||
for dim_keys in (itertools.product(*cfg['Dimensions'].values())):
|
||||
image_key = '-'.join(dim_keys).replace('"', '')
|
||||
|
||||
# dict of dimension -> dimension_key
|
||||
dim_map = dict(zip(dimensions, dim_keys))
|
||||
release = cfg.Dimensions.version[dim_map['version']].release
|
||||
image_config = ImageConfig(image_key, {'release': release} | dim_map)
|
||||
|
||||
# merge in the Default config
|
||||
image_config._merge(cfg.Default)
|
||||
skip = False
|
||||
# merge in each dimension key's configs
|
||||
for dim, dim_key in dim_map.items():
|
||||
dim_cfg = deepcopy(cfg.Dimensions[dim][dim_key])
|
||||
exclude = dim_cfg.pop('EXCLUDE', None)
|
||||
if exclude and set(exclude) & set(dim_keys):
|
||||
self.log.debug('%s SKIPPED, %s excludes %s', image_key, dim_key, exclude)
|
||||
skip = True
|
||||
break
|
||||
|
||||
image_config._merge(dim_cfg)
|
||||
|
||||
# now that we're done with ConfigTree/dim_cfg, remove " from dim_keys
|
||||
dim_keys = set(k.replace('"', '') for k in dim_keys)
|
||||
|
||||
# WHEN blocks inside WHEN blocks are considered "and" operations
|
||||
while (when := image_config._pop('WHEN', None)):
|
||||
for when_keys, when_conf in when.items():
|
||||
# WHEN keys with spaces are considered "or" operations
|
||||
if len(set(when_keys.split(' ')) & dim_keys) > 0:
|
||||
image_config._merge(when_conf)
|
||||
|
||||
if skip is True:
|
||||
continue
|
||||
|
||||
# merge in the Mandatory configs at the end
|
||||
image_config._merge(cfg.Mandatory)
|
||||
|
||||
# clean stuff up
|
||||
image_config._normalize()
|
||||
image_config.qemu['iso_url'] = self.iso_url_format.format(arch=image_config.arch)
|
||||
|
||||
# we've resolved everything, add tags attribute to config
|
||||
self._configs[image_key] = image_config
|
||||
|
||||
self._save_yaml()
|
||||
|
||||
# set current version release
|
||||
def set_version_release(self, v, c):
|
||||
if v == 'edge':
|
||||
c.put('release', self.now.strftime('%Y%m%d'))
|
||||
c.put('end_of_life', self.tomorrow.strftime('%F'))
|
||||
else:
|
||||
c.put('release', get_version_release(f"v{v}")['release'])
|
||||
|
||||
# release is also appended to build name array
|
||||
c.put('name', [c.release])
|
||||
|
||||
# update current config status
|
||||
def determine_actions(self, step, only, skip, revise):
|
||||
self.log.info('Determining Actions')
|
||||
has_actions = False
|
||||
for ic in self._configs.values():
|
||||
# clear away any previous actions
|
||||
if hasattr(ic, 'actions'):
|
||||
delattr(ic, 'actions')
|
||||
|
||||
dim_keys = set(ic.image_key.split('-'))
|
||||
if only and len(set(only) & dim_keys) != len(only):
|
||||
self.log.debug("%s SKIPPED, doesn't match --only", ic.image_key)
|
||||
continue
|
||||
|
||||
if skip and len(set(skip) & dim_keys) > 0:
|
||||
self.log.debug('%s SKIPPED, matches --skip', ic.image_key)
|
||||
continue
|
||||
|
||||
ic.determine_actions(step, revise)
|
||||
if not has_actions and len(ic.actions):
|
||||
has_actions = True
|
||||
|
||||
# re-save with updated actions
|
||||
self._save_yaml()
|
||||
return has_actions
|
||||
|
||||
|
||||
class ImageConfig():
|
||||
|
||||
def __init__(self, image_key, obj={}):
|
||||
self.image_key = str(image_key)
|
||||
tags = obj.pop('tags', None)
|
||||
self.__dict__ |= self._deep_dict(obj)
|
||||
# ensure tag values are str() when loading
|
||||
if tags:
|
||||
self.tags = tags
|
||||
|
||||
@property
|
||||
def local_dir(self):
|
||||
return os.path.join('work/images', self.name)
|
||||
|
||||
@property
|
||||
def local_path(self):
|
||||
return os.path.join(self.local_dir, 'image.' + self.local_format)
|
||||
|
||||
@property
|
||||
def image_name(self):
|
||||
return '-r'.join([self.name, str(self.revision)])
|
||||
|
||||
@property
|
||||
def image_description(self):
|
||||
return self.description.format(**self.__dict__)
|
||||
|
||||
@property
|
||||
def tags(self):
|
||||
# stuff that really ought to be there
|
||||
t = {
|
||||
'arch': self.arch,
|
||||
'bootstrap': self.bootstrap,
|
||||
'build_name': self.name,
|
||||
'build_revision': self.revision,
|
||||
'cloud': self.cloud,
|
||||
'description': self.image_description,
|
||||
'end_of_life': self.end_of_life,
|
||||
'firmware': self.firmware,
|
||||
'name': self.image_name,
|
||||
'release': self.release,
|
||||
'version': self.version
|
||||
}
|
||||
# stuff that might not be there yet
|
||||
for k in ['imported', 'published', 'source_id', 'source_region']:
|
||||
if k in self.__dict__:
|
||||
t[k] = self.__dict__[k]
|
||||
return Tags(t)
|
||||
|
||||
# recursively convert a ConfigTree object to a dict object
|
||||
def _deep_dict(self, layer):
|
||||
obj = deepcopy(layer)
|
||||
if isinstance(layer, pyhocon.ConfigTree):
|
||||
obj = dict(obj)
|
||||
|
||||
try:
|
||||
for key, value in layer.items():
|
||||
# some HOCON keys are quoted to preserve dots
|
||||
if '"' in key:
|
||||
obj.pop(key)
|
||||
key = key.strip('"')
|
||||
|
||||
# version values were HOCON keys at one point, too
|
||||
if key == 'version' and '"' in value:
|
||||
value = value.strip('"')
|
||||
|
||||
obj[key] = self._deep_dict(value)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
return obj
|
||||
|
||||
def _merge(self, obj={}):
|
||||
mergedeep.merge(self.__dict__, self._deep_dict(obj), strategy=mergedeep.Strategy.ADDITIVE)
|
||||
|
||||
def _pop(self, attr, default=None):
|
||||
return self.__dict__.pop(attr, default)
|
||||
|
||||
# make data ready for Packer ingestion
|
||||
def _normalize(self):
|
||||
# stringify arrays
|
||||
self.name = '-'.join(self.name)
|
||||
self.description = ' '.join(self.description)
|
||||
self._stringify_repos()
|
||||
self._stringify_packages()
|
||||
self._stringify_services()
|
||||
self._stringify_dict_keys('kernel_modules', ',')
|
||||
self._stringify_dict_keys('kernel_options', ' ')
|
||||
self._stringify_dict_keys('initfs_features', ' ')
|
||||
|
||||
def _stringify_repos(self):
|
||||
# stringify repos map
|
||||
# <repo>: <tag> # @<tag> <repo> enabled
|
||||
# <repo>: false # <repo> disabled (commented out)
|
||||
# <repo>: true # <repo> enabled
|
||||
# <repo>: null # skip <repo> entirely
|
||||
# ...and interpolate {version}
|
||||
self.repos = "\n".join(filter(None, (
|
||||
f"@{v} {r}" if isinstance(v, str) else
|
||||
f"#{r}" if v is False else
|
||||
r if v is True else None
|
||||
for r, v in self.repos.items()
|
||||
))).format(version=self.version)
|
||||
|
||||
def _stringify_packages(self):
|
||||
# resolve/stringify packages map
|
||||
# <pkg>: true # add <pkg>
|
||||
# <pkg>: <tag> # add <pkg>@<tag>
|
||||
# <pkg>: --no-scripts # add --no-scripts <pkg>
|
||||
# <pkg>: --no-scripts <tag> # add --no-scripts <pkg>@<tag>
|
||||
# <pkg>: false # del <pkg>
|
||||
# <pkg>: null # skip explicit add/del <pkg>
|
||||
pkgs = {'add': '', 'del': '', 'noscripts': ''}
|
||||
for p, v in self.packages.items():
|
||||
k = 'add'
|
||||
if isinstance(v, str):
|
||||
if '--no-scripts' in v:
|
||||
k = 'noscripts'
|
||||
v = v.replace('--no-scripts', '')
|
||||
v = v.strip()
|
||||
if len(v):
|
||||
p += f"@{v}"
|
||||
elif v is False:
|
||||
k = 'del'
|
||||
elif v is None:
|
||||
continue
|
||||
|
||||
pkgs[k] = p if len(pkgs[k]) == 0 else pkgs[k] + ' ' + p
|
||||
|
||||
self.packages = pkgs
|
||||
|
||||
def _stringify_services(self):
|
||||
# stringify services map
|
||||
# <level>:
|
||||
# <svc>: true # enable <svc> at <level>
|
||||
# <svc>: false # disable <svc> at <level>
|
||||
# <svc>: null # skip explicit en/disable <svc> at <level>
|
||||
self.services = {
|
||||
'enable': ' '.join(filter(lambda x: not x.endswith('='), (
|
||||
'{}={}'.format(lvl, ','.join(filter(None, (
|
||||
s if v is True else None
|
||||
for s, v in svcs.items()
|
||||
))))
|
||||
for lvl, svcs in self.services.items()
|
||||
))),
|
||||
'disable': ' '.join(filter(lambda x: not x.endswith('='), (
|
||||
'{}={}'.format(lvl, ','.join(filter(None, (
|
||||
s if v is False else None
|
||||
for s, v in svcs.items()
|
||||
))))
|
||||
for lvl, svcs in self.services.items()
|
||||
)))
|
||||
}
|
||||
|
||||
def _stringify_dict_keys(self, d, sep):
|
||||
self.__dict__[d] = sep.join(filter(None, (
|
||||
m if v is True else None
|
||||
for m, v in self.__dict__[d].items()
|
||||
)))
|
||||
|
||||
# TODO? determine_current_state()
|
||||
def determine_actions(self, step, revise):
|
||||
log = logging.getLogger('build')
|
||||
self.revision = 0
|
||||
# TODO: be more specific about our parameters
|
||||
self.remote_image = clouds.latest_build_image(self)
|
||||
actions = {}
|
||||
|
||||
# enable actions based on the specified step
|
||||
if step in ['local', 'import', 'publish']:
|
||||
actions['build'] = True
|
||||
|
||||
if step in ['import', 'publish']:
|
||||
actions['import'] = True
|
||||
|
||||
if step == 'publish':
|
||||
# we will resolve publish destinations (if any) later
|
||||
actions['publish'] = True
|
||||
|
||||
if revise:
|
||||
if os.path.exists(self.local_path):
|
||||
# remove previously built local image artifacts
|
||||
log.warning('Removing existing local image dir %s', self.local_dir)
|
||||
shutil.rmtree(self.local_dir)
|
||||
|
||||
if self.remote_image and 'published' in self.remote_image['tags']:
|
||||
log.warning('Bumping build revision for %s', self.name)
|
||||
self.revision = int(self.remote_image['tags']['build_revision']) + 1
|
||||
|
||||
elif self.remote_image and 'imported' in self.remote_image['tags']:
|
||||
# remove existing imported (but unpublished) image
|
||||
log.warning('Removing unpublished remote image %s', self.remote_image['id'])
|
||||
# TODO: be more specific?
|
||||
clouds.remove_image(self)
|
||||
|
||||
self.remote_image = None
|
||||
|
||||
elif self.remote_image and 'imported' in self.remote_image['tags']:
|
||||
# already imported, don't build/import again
|
||||
log.warning('Already imported, skipping build/import')
|
||||
actions.pop('build', None)
|
||||
actions.pop('import', None)
|
||||
|
||||
if os.path.exists(self.local_path):
|
||||
log.warning('Already built, skipping build')
|
||||
# local image's already built, don't rebuild
|
||||
actions.pop('build', None)
|
||||
|
||||
# set at time of import, carries forward when published
|
||||
if self.remote_image:
|
||||
self.end_of_life = self.remote_image['tags']['end_of_life']
|
||||
self.revision = self.remote_image['tags']['build_revision']
|
||||
|
||||
else:
|
||||
# default to tomorrow's date if unset
|
||||
if 'end_of_life' not in self.__dict__:
|
||||
tomorrow = datetime.utcnow() + timedelta(days=1)
|
||||
self.end_of_life = tomorrow.strftime('%F')
|
||||
|
||||
self.actions = list(actions)
|
||||
log.info('%s/%s-r%s = %s', self.cloud, self.name, self.revision, self.actions)
|
||||
|
||||
|
||||
class Tags(dict):
|
||||
|
||||
def __init__(self, d={}, from_list=None, key_name='Key', value_name='Value'):
|
||||
for key, value in d.items():
|
||||
self.__setattr__(key, value)
|
||||
|
||||
if from_list:
|
||||
self.from_list(from_list, key_name, value_name)
|
||||
|
||||
def __getattr__(self, key):
|
||||
return self[key]
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
self[key] = str(value)
|
||||
|
||||
def __delattr__(self, key):
|
||||
del self[key]
|
||||
|
||||
def pop(self, key, default):
|
||||
value = default
|
||||
if key in self:
|
||||
value = self[key]
|
||||
del self[key]
|
||||
|
||||
return value
|
||||
|
||||
def as_list(self, key_name='Key', value_name='Value'):
|
||||
return [{key_name: k, value_name: v} for k, v in self.items()]
|
||||
|
||||
def from_list(self, list=[], key_name='Key', value_name='Value'):
|
||||
for tag in list:
|
||||
self.__setattr__(tag[key_name], tag[value_name])
|
|
@ -1,7 +1,7 @@
|
|||
#!/bin/sh -eu
|
||||
# vim: ts=4 et:
|
||||
|
||||
[ -z "$DEBUG" ] || set +x
|
||||
[ -z "$DEBUG" ] || [ "$DEBUG" = 0 ] || set -x
|
||||
|
||||
export \
|
||||
TARGET=/mnt
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#!/bin/sh -eu
|
||||
# vim: ts=4 et:
|
||||
|
||||
[ -z "$DEBUG" -o "$DEBUG" = 0 ] || set -x
|
||||
[ -z "$DEBUG" ] || [ "$DEBUG" = 0 ] || set -x
|
||||
|
||||
export \
|
||||
DEVICE=/dev/vda \
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
# It is our intention to eventually incorporate this in a tiny-cloud-bootstrap
|
||||
# package (and per-cloud subpackages).
|
||||
|
||||
[ -z "$DEBUG" -o "$DEBUG" = 0 ] || set -x
|
||||
[ -z "$DEBUG" ] || [ "$DEBUG" = 0 ] || set -x
|
||||
|
||||
TARGET=/mnt
|
||||
SETUP=/tmp/setup-tiny.d
|
||||
|
|
Loading…
Reference in New Issue