Merge branch 'initial/part_three' into 'main'

alpine-cloud-images, part three

See merge request tomalok/alpine-cloud-images!128
This commit is contained in:
Jake Buchholz Göktürk 2021-11-28 23:04:28 +00:00
commit 31b84a9dd1
31 changed files with 534 additions and 290 deletions

View File

@ -28,15 +28,9 @@ locals {
# randomly generated password # randomly generated password
password = uuidv4() password = uuidv4()
# all build configs
all_configs = yamldecode(file("work/configs.yaml"))
# load the build actions to be taken
actions = yamldecode(file("work/actions.yaml"))
# resolve actionable build configs # resolve actionable build configs
configs = { for b, acfg in local.actions: configs = { for b, cfg in yamldecode(file("work/images.yaml")):
b => merge(local.all_configs[b], acfg) if length(acfg.actions) > 0 b => cfg if contains(keys(cfg), "actions")
} }
} }
@ -99,10 +93,10 @@ build {
boot_wait = var.qemu.boot_wait[B.value.arch] boot_wait = var.qemu.boot_wait[B.value.arch]
# results # results
output_directory = B.value.image.dir output_directory = "work/images/${B.value.cloud}/${B.value.image_key}"
disk_size = B.value.image.size disk_size = B.value.size
format = B.value.image.format format = B.value.local_format
vm_name = B.value.image.file vm_name = "image.${B.value.local_format}"
} }
} }
@ -150,16 +144,18 @@ build {
environment_vars = [ environment_vars = [
"DEBUG=${var.DEBUG}", "DEBUG=${var.DEBUG}",
"ARCH=${B.value.arch}", "ARCH=${B.value.arch}",
"BOOTLOADER=${B.value.bootloader}",
"BOOTSTRAP=${B.value.bootstrap}", "BOOTSTRAP=${B.value.bootstrap}",
"BUILD_NAME=${B.value.name}", "BUILD_NAME=${B.value.name}",
"BUILD_REVISION=${B.value.revision}", "BUILD_REVISION=${B.value.revision}",
"CLOUD=${B.value.cloud}", "CLOUD=${B.value.cloud}",
"END_OF_LIFE=${B.value.end_of_life}", "END_OF_LIFE=${B.value.end_of_life}",
"FIRMWARE=${B.value.firmware}", "FIRMWARE=${B.value.firmware}",
"IMAGE_LOGIN=${B.value.image.login}", "IMAGE_LOGIN=${B.value.login}",
"INITFS_FEATURES=${B.value.initfs_features}", "INITFS_FEATURES=${B.value.initfs_features}",
"KERNEL_MODULES=${B.value.kernel_modules}", "KERNEL_MODULES=${B.value.kernel_modules}",
"KERNEL_OPTIONS=${B.value.kernel_options}", "KERNEL_OPTIONS=${B.value.kernel_options}",
"MOTD=${B.value.motd}",
"PACKAGES_ADD=${B.value.packages.add}", "PACKAGES_ADD=${B.value.packages.add}",
"PACKAGES_DEL=${B.value.packages.del}", "PACKAGES_DEL=${B.value.packages.del}",
"PACKAGES_NOSCRIPTS=${B.value.packages.noscripts}", "PACKAGES_NOSCRIPTS=${B.value.packages.noscripts}",

View File

@ -8,6 +8,8 @@ from urllib.request import urlopen
CDN_URL = 'https://dl-cdn.alpinelinux.org/alpine' CDN_URL = 'https://dl-cdn.alpinelinux.org/alpine'
# TODO: also get EOL from authoritative source
def get_version_release(alpine_version): def get_version_release(alpine_version):
apk_ver = get_apk_version(alpine_version, 'main', 'x86_64', 'alpine-base') apk_ver = get_apk_version(alpine_version, 'main', 'x86_64', 'alpine-base')
release = apk_ver.split('-')[0] release = apk_ver.split('-')[0]
@ -16,6 +18,7 @@ def get_version_release(alpine_version):
# TODO? maybe download and parse APKINDEX instead? # TODO? maybe download and parse APKINDEX instead?
# also check out https://dl-cdn.alpinelinux.org/alpine/v3.15/releases/x86_64/latest-releases.yaml
def get_apk_version(alpine_version, repo, arch, apk): def get_apk_version(alpine_version, repo, arch, apk):
repo_url = f"{CDN_URL}/{alpine_version}/{repo}/{arch}" repo_url = f"{CDN_URL}/{alpine_version}/{repo}/{arch}"
apks_re = re.compile(f'"{apk}-(\\d.*)\\.apk"') apks_re = re.compile(f'"{apk}-(\\d.*)\\.apk"')

18
build
View File

@ -13,7 +13,6 @@ if not os.path.exists('work'):
import venv import venv
PIP_LIBS = [ PIP_LIBS = [
'pip',
'mergedeep', 'mergedeep',
'pyhocon', 'pyhocon',
'python-dateutil', 'python-dateutil',
@ -21,6 +20,7 @@ if not os.path.exists('work'):
] ]
print('Work environment does not exist, creating...', file=sys.stderr) print('Work environment does not exist, creating...', file=sys.stderr)
venv.create('work', with_pip=True) venv.create('work', with_pip=True)
subprocess.run(['work/bin/pip', 'install', '-U', 'pip', 'wheel'])
subprocess.run(['work/bin/pip', 'install', '-U', *PIP_LIBS]) subprocess.run(['work/bin/pip', 'install', '-U', *PIP_LIBS])
# Re-execute using the right virtual environment, if necessary. # Re-execute using the right virtual environment, if necessary.
@ -109,6 +109,7 @@ def is_same_dir_symlink(x):
return x_link == os.path.basename(x_link) return x_link == os.path.basename(x_link)
# TODO: revisit this to improve --custom overlay implementation
def install_overlay(overlay): def install_overlay(overlay):
log.info("Installing '%s' overlay in work environment", overlay) log.info("Installing '%s' overlay in work environment", overlay)
dest_dir = os.path.join('work', overlay) dest_dir = os.path.join('work', overlay)
@ -123,7 +124,7 @@ def install_overlay(overlay):
else: else:
rel_x = os.path.relpath(src_x, dest_dir) rel_x = os.path.relpath(src_x, dest_dir)
# TODO: only images.conf symlink can be overridden # TODO: only images.conf symlink can be overridden, in reality
if os.path.islink(dest_x): if os.path.islink(dest_x):
# only same-dir symlinks can be overridden # only same-dir symlinks can be overridden
if not is_same_dir_symlink(dest_x): if not is_same_dir_symlink(dest_x):
@ -230,7 +231,7 @@ log.debug(args)
# set up credential provider, if we're going to use it # set up credential provider, if we're going to use it
if args.use_broker: if args.use_broker:
clouds.set_credential_provider() clouds.set_credential_provider(debug=args.debug)
### Setup Configs ### Setup Configs
@ -256,7 +257,7 @@ if args.step == 'configs':
### What needs doing? ### What needs doing?
if not image_configs.determine_actions( if not image_configs.refresh_state(
step=args.step, only=args.only, skip=args.skip, revise=args.revise): step=args.step, only=args.only, skip=args.skip, revise=args.revise):
log.info('No pending actions to take at this time.') log.info('No pending actions to take at this time.')
sys.exit(0) sys.exit(0)
@ -307,4 +308,11 @@ if p.returncode != 0:
log.info('Packer Completed') log.info('Packer Completed')
# TODO? collect artifacts? # update final state in work/images.yaml
image_configs.refresh_state(
step='final',
only=args.only,
skip=args.skip
)
log.info('Build Finished')

View File

@ -42,28 +42,6 @@ ACTIONS = ['import', 'publish']
LOGFORMAT = '%(name)s - %(levelname)s - %(message)s' LOGFORMAT = '%(name)s - %(levelname)s - %(message)s'
### Functions
# TODO? be more specific with args?
# import image config's local image to cloud
def import_image(ic):
imported = clouds.import_image(ic)
# write imported metadata
imported_yaml = Path(os.path.join(ic.local_dir, 'imported.yaml'))
yaml.dump(imported, imported_yaml)
# TODO? be more specific with args?
# publish image config's imported image to target regions with expected permissions
def publish_image(ic):
published = clouds.publish_image(ic)
# ensure image work directory exists
os.makedirs(ic.local_dir, exist_ok=True)
# write published metadata
published_yaml = Path(os.path.join(ic.local_dir, 'published.yaml'))
yaml.dump(published, published_yaml)
### Command Line & Logging ### Command Line & Logging
parser = argparse.ArgumentParser(description=NOTE) parser = argparse.ArgumentParser(description=NOTE)
@ -85,7 +63,7 @@ log.debug(args)
# set up credential provider, if we're going to use it # set up credential provider, if we're going to use it
if args.use_broker: if args.use_broker:
clouds.set_credential_provider() clouds.set_credential_provider(debug=args.debug)
# load build configs # load build configs
configs = ImageConfigManager( configs = ImageConfigManager(
@ -95,13 +73,15 @@ configs = ImageConfigManager(
) )
yaml = YAML() yaml = YAML()
yaml.default_flow_style = False yaml.explicit_start = True
for image_key in args.image_keys: for image_key in args.image_keys:
image_config = configs.get(image_key) image_config = configs.get(image_key)
if args.action == 'import': if args.action == 'import':
import_image(image_config) clouds.import_image(image_config)
elif args.action == 'publish': elif args.action == 'publish':
publish_image(image_config) os.makedirs(image_config.local_dir, exist_ok=True)
artifacts = clouds.publish_image(image_config)
yaml.dump(artifacts, Path(image_config.local_dir) / 'artifacts.yaml')

View File

@ -16,9 +16,9 @@ register(aws) # , oci, azure, gcp)
# using a credential provider is optional, set across all adapters # using a credential provider is optional, set across all adapters
def set_credential_provider(): def set_credential_provider(debug=False):
from .identity_broker_client import IdentityBrokerClient from .identity_broker_client import IdentityBrokerClient
cred_provider = IdentityBrokerClient() cred_provider = IdentityBrokerClient(debug=debug)
for adapter in ADAPTERS.values(): for adapter in ADAPTERS.values():
adapter.cred_provider = cred_provider adapter.cred_provider = cred_provider
@ -26,15 +26,18 @@ def set_credential_provider():
### forward to the correct adapter ### forward to the correct adapter
def latest_build_image(config): def latest_build_image(config):
return ADAPTERS[config.cloud].latest_build_image(config.name) return ADAPTERS[config.cloud].latest_build_image(
config.project,
config.image_key
)
def import_image(config): def import_image(config):
return ADAPTERS[config.cloud].import_image(config) return ADAPTERS[config.cloud].import_image(config)
def remove_image(config): def remove_image(config, image_id):
return ADAPTERS[config.cloud].remove_image(config.remote_image['id']) return ADAPTERS[config.cloud].remove_image(image_id)
def publish_image(config): def publish_image(config):

View File

@ -2,20 +2,22 @@
# vim: ts=4 et: # vim: ts=4 et:
import logging import logging
import hashlib
import os import os
import random
import string
import sys
import time import time
from datetime import datetime from datetime import datetime
from subprocess import Popen, PIPE, run from subprocess import Popen, PIPE, run
from .interfaces.adapter import CloudAdapterInterface from .interfaces.adapter import CloudAdapterInterface
from image_configs import Tags from image_configs import Tags, DictObj
class AWSCloudAdapter(CloudAdapterInterface): class AWSCloudAdapter(CloudAdapterInterface):
IMAGE_INFO = [
'revision', 'imported', 'import_id', 'import_region', 'published',
'end_of_life',
]
CRED_MAP = { CRED_MAP = {
'access_key': 'aws_access_key_id', 'access_key': 'aws_access_key_id',
'secret_key': 'aws_secret_access_key', 'secret_key': 'aws_secret_access_key',
@ -54,7 +56,7 @@ class AWSCloudAdapter(CloudAdapterInterface):
return self._sessions[region] return self._sessions[region]
# TODO: property? @property
def regions(self): def regions(self):
if self.cred_provider: if self.cred_provider:
return self.cred_provider.get_regions(self.cloud) return self.cred_provider.get_regions(self.cloud)
@ -62,7 +64,7 @@ class AWSCloudAdapter(CloudAdapterInterface):
# list of all subscribed regions # list of all subscribed regions
return {r['RegionName']: True for r in self.session().client('ec2').describe_regions()['Regions']} return {r['RegionName']: True for r in self.session().client('ec2').describe_regions()['Regions']}
# TODO: property? @property
def default_region(self): def default_region(self):
if self.cred_provider: if self.cred_provider:
return self.cred_provider.get_default_region(self.cloud) return self.cred_provider.get_default_region(self.cloud)
@ -79,82 +81,63 @@ class AWSCloudAdapter(CloudAdapterInterface):
# return dict suitable to use for session() # return dict suitable to use for session()
return {self.CRED_MAP[k]: v for k, v in creds.items() if k in self.CRED_MAP} return {self.CRED_MAP[k]: v for k, v in creds.items() if k in self.CRED_MAP}
def _get_images_with_tags(self, tags={}, region=None): def _get_images_with_tags(self, project, image_key, tags={}, region=None):
ec2r = self.session(region).resource('ec2') ec2r = self.session(region).resource('ec2')
req = {'Owners': ['self'], 'Filters': []} req = {'Owners': ['self'], 'Filters': []}
tags |= {
'project': project,
'image_key': image_key,
}
for k, v in tags.items(): for k, v in tags.items():
req['Filters'].append({'Name': f"tag:{k}", 'Values': [str(v)]}) req['Filters'].append({'Name': f"tag:{k}", 'Values': [str(v)]})
return sorted( return sorted(
ec2r.images.filter(**req), key=lambda k: k.creation_date, reverse=True) ec2r.images.filter(**req), key=lambda k: k.creation_date, reverse=True)
def _aws_tags(self, b_tags): # necessary cloud-agnostic image info
# convert dict to [{'Key': k, 'Value': v}, ...]
a_tags = []
for k, v in b_tags.items():
# add extra Name tag
if k == 'name':
a_tags += [{'Key': 'Name', 'Value': str(v)}]
a_tags += [{'Key': k, 'Value': str(v)}]
return a_tags
# cloud-agnostic necessary info about an ec2.Image
def _image_info(self, i): def _image_info(self, i):
tags = Tags(from_list=i.tags) tags = Tags(from_list=i.tags)
del tags.Name return DictObj({k: tags.get(k, None) for k in self.IMAGE_INFO})
# TODO? realm/partition?
return {
'id': i.image_id,
'region': i.meta.client.meta.region_name,
'tags': dict(tags)
# TODO? narrow down to these?
# imported = i.tags.imported
# published = i.tags.published
# revision = i.tags.build_revision
# source_id = i.image_id,
# source_region = i.meta.client.meta.region_name,
}
# get the latest imported image for a given build name # get the latest imported image for a given build name
def latest_build_image(self, build_name): def latest_build_image(self, project, image_key):
images = self._get_images_with_tags(tags={'build_name': build_name}) images = self._get_images_with_tags(
project=project,
image_key=image_key,
)
if images: if images:
# first one is the latest # first one is the latest
return self._image_info(images[0]) return self._image_info(images[0])
return None return None
## TODO: rework these next two as a Tags class
# import an image # import an image
# NOTE: requires 'vmimport' role with read/write of <s3_bucket>.* and its objects # NOTE: requires 'vmimport' role with read/write of <s3_bucket>.* and its objects
def import_image(self, ic): def import_image(self, ic):
log = logging.getLogger('import') log = logging.getLogger('import')
image_path = ic.local_path image_path = ic.local_path
image_aws = image_path.replace(ic.local_format, 'vhd') image_aws = ic.local_dir / 'image.vhd'
name = ic.image_name
description = ic.image_description description = ic.image_description
session = self.session()
s3r = session.resource('s3')
ec2c = session.client('ec2')
ec2r = session.resource('ec2')
# convert QCOW2 to VHD # convert QCOW2 to VHD
log.info('Converting %s to VHD format', image_path) log.info('Converting %s to VHD format', image_path)
p = Popen(self.CONVERT_CMD + (image_path, image_aws), stdout=PIPE, stdin=PIPE, encoding='utf8') p = Popen(self.CONVERT_CMD + (image_path, image_aws), stdout=PIPE, stdin=PIPE, encoding='utf8')
out, err = p.communicate() out, err = p.communicate()
if p.returncode: if p.returncode:
log.error('Unable to convert %s to VHD format (%s)', image_path, p.returncode) log.error('Unable to convert %s to VHD format (%s)', image_path, p.returncode)
log.error('EXIT: %d', p.returncode)
log.error('STDOUT:\n%s', out) log.error('STDOUT:\n%s', out)
log.error('STDERR:\n%s', err) log.error('STDERR:\n%s', err)
sys.exit(p.returncode) raise RuntimeError
bucket_name = 'alpine-cloud-images.' + ''.join( session = self.session()
random.SystemRandom().choice(string.ascii_lowercase + string.digits) s3r = session.resource('s3')
for _ in range(40)) ec2c = session.client('ec2')
s3_key = os.path.basename(image_aws) ec2r = session.resource('ec2')
bucket_name = 'alpine-cloud-images.' + hashlib.sha1(os.urandom(40)).hexdigest()
s3_key = name + '.vhd'
bucket = s3r.Bucket(bucket_name) bucket = s3r.Bucket(bucket_name)
log.info('Creating S3 bucket %s', bucket.name) log.info('Creating S3 bucket %s', bucket.name)
@ -166,7 +149,7 @@ class AWSCloudAdapter(CloudAdapterInterface):
try: try:
log.info('Uploading %s to %s', image_aws, s3_url) log.info('Uploading %s to %s', image_aws, s3_url)
bucket.upload_file(image_aws, s3_key) bucket.upload_file(str(image_aws), s3_key)
# import snapshot from S3 # import snapshot from S3
log.info('Importing EC2 snapshot from %s', s3_url) log.info('Importing EC2 snapshot from %s', s3_url)
@ -187,7 +170,7 @@ class AWSCloudAdapter(CloudAdapterInterface):
if task_detail['Status'] not in ['pending', 'active', 'completed']: if task_detail['Status'] not in ['pending', 'active', 'completed']:
msg = f"Bad EC2 snapshot import: {task_detail['Status']} - {task_detail['StatusMessage']}" msg = f"Bad EC2 snapshot import: {task_detail['Status']} - {task_detail['StatusMessage']}"
log.error(msg) log.error(msg)
raise(RuntimeError, msg) raise RuntimeError(msg)
if task_detail['Status'] == 'completed': if task_detail['Status'] == 'completed':
snapshot_id = task_detail['SnapshotId'] snapshot_id = task_detail['SnapshotId']
@ -246,8 +229,8 @@ class AWSCloudAdapter(CloudAdapterInterface):
# tag image (adds imported tag) # tag image (adds imported tag)
log.info('Tagging EC2 AMI %s', image_id) log.info('Tagging EC2 AMI %s', image_id)
tags.imported = datetime.utcnow().isoformat() tags.imported = datetime.utcnow().isoformat()
tags.source_id = image_id tags.import_id = image_id
tags.source_region = ec2c.meta.region_name tags.import_region = ec2c.meta.region_name
image.create_tags(Tags=tags.as_list()) image.create_tags(Tags=tags.as_list())
except Exception: except Exception:
log.error('Unable to tag image:', exc_info=True) log.error('Unable to tag image:', exc_info=True)
@ -263,7 +246,6 @@ class AWSCloudAdapter(CloudAdapterInterface):
log = logging.getLogger('build') log = logging.getLogger('build')
ec2r = self.session().resource('ec2') ec2r = self.session().resource('ec2')
image = ec2r.Image(image_id) image = ec2r.Image(image_id)
# TODO? protect against removing a published image?
snapshot_id = image.block_device_mappings[0]['Ebs']['SnapshotId'] snapshot_id = image.block_device_mappings[0]['Ebs']['SnapshotId']
snapshot = ec2r.Snapshot(snapshot_id) snapshot = ec2r.Snapshot(snapshot_id)
log.info('Deregistering %s', image_id) log.info('Deregistering %s', image_id)
@ -271,35 +253,29 @@ class AWSCloudAdapter(CloudAdapterInterface):
log.info('Deleting %s', snapshot_id) log.info('Deleting %s', snapshot_id)
snapshot.delete() snapshot.delete()
# TODO: this should be standardized and work with cred_provider
def _get_all_regions(self):
ec2c = self.session().client('ec2')
res = ec2c.describe_regions(AllRegions=True)
return {
r['RegionName']: r['OptInStatus'] != 'not-opted-in'
for r in res['Regions']
}
# publish an image # publish an image
def publish_image(self, ic): def publish_image(self, ic):
log = logging.getLogger('publish') log = logging.getLogger('publish')
source_image = self.latest_build_image(ic.name) source_image = self.latest_build_image(
ic.project,
ic.image_key,
)
if not source_image: if not source_image:
log.error('No source image for %s', ic.name) log.error('No source image for %s', ic.image_key)
sys.exit(1) raise RuntimeError('Missing source imamge')
source_id = source_image['id'] source_id = source_image.import_id
log.info('Publishing source: %s, %s', source_image['region'], source_id) source_region = source_image.import_region
log.info('Publishing source: %s/%s', source_region, source_id)
source = self.session().resource('ec2').Image(source_id) source = self.session().resource('ec2').Image(source_id)
source_tags = Tags(from_list=source.tags) source_tags = Tags(from_list=source.tags)
publish = ic.publish
# sort out published image access permissions # sort out published image access permissions
perms = {'groups': [], 'users': []} perms = {'groups': [], 'users': []}
if 'PUBLIC' in publish['access'] and publish['access']['PUBLIC']: if ic.access.get('PUBLIC', None):
perms['groups'] = ['all'] perms['groups'] = ['all']
else: else:
for k, v in publish['access'].items(): for k, v in ic.access.items():
if v: if v:
log.debug('users: %s', k) log.debug('users: %s', k)
perms['users'].append(str(k)) perms['users'].append(str(k))
@ -307,14 +283,13 @@ class AWSCloudAdapter(CloudAdapterInterface):
log.debug('perms: %s', perms) log.debug('perms: %s', perms)
# resolve destination regions # resolve destination regions
regions = self.regions() regions = self.regions
if 'ALL' in publish['regions'] and publish['regions']['ALL']: if ic.regions.pop('ALL', None):
log.info('Publishing to ALL available regions') log.info('Publishing to ALL available regions')
else: else:
# clear ALL out of the way if it's still there # clear ALL out of the way if it's still there
publish['regions'].pop('ALL', None) ic.regions.pop('ALL', None)
# TODO: politely warn/skip unknown regions in b.aws.regions regions = {r: regions[r] for r in ic.regions}
regions = {r: regions[r] for r in publish['regions']}
publishing = {} publishing = {}
for r in regions.keys(): for r in regions.keys():
@ -324,10 +299,9 @@ class AWSCloudAdapter(CloudAdapterInterface):
images = self._get_images_with_tags( images = self._get_images_with_tags(
region=r, region=r,
tags={ project=ic.project,
'build_name': ic.name, image_key=ic.image_key,
'build_revision': ic.revision tags={'revision': ic.revision}
}
) )
if images: if images:
image = images[0] image = images[0]
@ -338,8 +312,8 @@ class AWSCloudAdapter(CloudAdapterInterface):
res = ec2c.copy_image( res = ec2c.copy_image(
Description=source.description, Description=source.description,
Name=source.name, Name=source.name,
SourceImageId=source.id, SourceImageId=source_id,
SourceRegion=source_image['region'], SourceRegion=source_region,
) )
except Exception: except Exception:
log.warning('Skipping %s, unable to copy image:', r, exc_info=True) log.warning('Skipping %s, unable to copy image:', r, exc_info=True)
@ -351,11 +325,11 @@ class AWSCloudAdapter(CloudAdapterInterface):
publishing[r] = image publishing[r] = image
published = {} artifacts = {}
copy_wait = 180 copy_wait = 180
while len(published) < len(publishing): while len(artifacts) < len(publishing):
for r, image in publishing.items(): for r, image in publishing.items():
if r not in published: if r not in artifacts:
image.reload() image.reload()
if image.state == 'available': if image.state == 'available':
# tag image # tag image
@ -395,22 +369,22 @@ class AWSCloudAdapter(CloudAdapterInterface):
log.info('%s: Setting EOL deprecation time on %s', r, image.id) log.info('%s: Setting EOL deprecation time on %s', r, image.id)
ec2c.enable_image_deprecation( ec2c.enable_image_deprecation(
ImageId=image.id, ImageId=image.id,
DeprecateAt=f"{source_image['tags']['end_of_life']}T23:59:59Z" DeprecateAt=f"{source_image.end_of_life}T23:59:59Z"
) )
published[r] = self._image_info(image) artifacts[r] = image.id
if image.state == 'failed': if image.state == 'failed':
log.error('%s: %s - %s - %s', r, image.id, image.state, image.state_reason) log.error('%s: %s - %s - %s', r, image.id, image.state, image.state_reason)
published[r] = None artifacts[r] = None
remaining = len(publishing) - len(published) remaining = len(publishing) - len(artifacts)
if remaining > 0: if remaining > 0:
log.info('Waiting %ds for %d images to complete', copy_wait, remaining) log.info('Waiting %ds for %d images to complete', copy_wait, remaining)
time.sleep(copy_wait) time.sleep(copy_wait)
copy_wait = 30 copy_wait = 30
return published return artifacts
def register(cloud, cred_provider=None): def register(cloud, cred_provider=None):

View File

@ -59,6 +59,7 @@ class IdentityBrokerClient:
return True return True
def _get(self, path): def _get(self, path):
self._logger.debug("request: %s", path)
if not self._is_cache_valid(path): if not self._is_cache_valid(path):
while True: # to handle rate limits while True: # to handle rate limits
try: try:
@ -95,6 +96,7 @@ class IdentityBrokerClient:
self._cache[path] = json.load(res) self._cache[path] = json.load(res)
break break
self._logger.debug("response: %s", self._cache[path])
return self._cache[path] return self._cache[path]
def get_credentials_url(self, vendor): def get_credentials_url(self, vendor):
@ -117,7 +119,6 @@ class IdentityBrokerClient:
if region['default']: if region['default']:
self._default_region[vendor] = region['name'] self._default_region[vendor] = region['name']
out[None] = region['credentials_url']
return out return out
@ -128,4 +129,7 @@ class IdentityBrokerClient:
return self._default_region.get(vendor) return self._default_region.get(vendor)
def get_credentials(self, vendor, region=None): def get_credentials(self, vendor, region=None):
if not region:
region = self.get_default_region(vendor)
return self._get(self.get_regions(vendor)[region]) return self._get(self.get_regions(vendor)[region])

View File

@ -13,9 +13,11 @@ class CloudAdapterInterface:
def sdk(self): def sdk(self):
raise NotImplementedError raise NotImplementedError
@property
def regions(self): def regions(self):
raise NotImplementedError raise NotImplementedError
@property
def default_region(self): def default_region(self):
raise NotImplementedError raise NotImplementedError
@ -25,11 +27,9 @@ class CloudAdapterInterface:
def session(self, region=None): def session(self, region=None):
raise NotImplementedError raise NotImplementedError
def latest_build_image(self, build_name): def latest_build_image(self, project, image_key):
raise NotImplementedError raise NotImplementedError
# TODO: be more specific about what gets passed into these
def import_image(self, config): def import_image(self, config):
raise NotImplementedError raise NotImplementedError

View File

@ -1,24 +1,45 @@
# vim: ts=2 et: # vim: ts=2 et:
# NOTE: If you are using alpine-cloud-images to build public cloud images
# for something/someone other than Alpine Linux, you *MUST* override
# *AT LEAST* the 'project' setting with a unique identifier string value
# via a "config overlay" to avoid image import and publishing collisions.
project = https://alpinelinux.org/cloud
# all build configs start with these # all build configs start with these
Default { Default {
project = ${project}
# image name/description components # image name/description components
name = [ alpine ] name = [ alpine ]
description = [ "Alpine Linux {release}-r{revision}" ] description = [ Alpine Linux ]
motd {
welcome = "Welcome to Alpine!"
wiki = \
"The Alpine Wiki contains a large amount of how-to guides and general\n"\
"information about administrating Alpine systems.\n"\
"See <https://wiki.alpinelinux.org/>."
version_notes = "Release Notes:\n"\
"* <https://alpinelinux.org/posts/alpine-{version}.0/released.html>"
release_notes = "* <https://alpinelinux.org/posts/{release}/released.html"
}
# initial provisioning script and data directory # initial provisioning script and data directory
scripts = [ setup ] scripts = [ setup ]
script_dirs = [ setup.d ] script_dirs = [ setup.d ]
# image settings size = 1G
image.format = qcow2 login = alpine
image.size = 1G
image.login = alpine local_format = qcow2
} }
# profile build matrix # profile build matrix
Dimensions { Dimensions {
version { version {
"3.15" { include required("version/3.15.conf") }
"3.14" { include required("version/3.14.conf") } "3.14" { include required("version/3.14.conf") }
"3.13" { include required("version/3.13.conf") } "3.13" { include required("version/3.13.conf") }
"3.12" { include required("version/3.12.conf") } "3.12" { include required("version/3.12.conf") }
@ -35,32 +56,21 @@ Dimensions {
} }
bootstrap { bootstrap {
tiny { include required("bootstrap/tiny.conf") } tiny { include required("bootstrap/tiny.conf") }
# cloudinit { include required("bootstrap/cloudinit.conf") }
} }
cloud { cloud {
aws { include required("cloud/aws.conf") } aws { include required("cloud/aws.conf") }
# oci { include required("cloud/oci.conf") }
# gcp { include required("cloud/gcp.conf") }
# azure { include required("cloud/azure.conf") }
} }
} }
# all build configs merge these at the very end # all build configs merge these at the very end
Mandatory { Mandatory {
description = [ - https://alpinelinux.org/cloud ] name = [ "r{revision}" ]
description = [ - https://alpine.linux.org/cloud ]
motd {
motd_change = "You may change this message by editing /etc/motd."
}
# final provisioning script # final provisioning script
scripts = [ cleanup ] scripts = [ cleanup ]
# override while developing/testing...
aws.publish {
access {
PUBLIC = false
}
regions {
ALL = false
us-west-2 = true # where i'm building
us-east-1 = true # test publishing
}
}
} }

View File

@ -1,5 +1,8 @@
# vim: ts=2 et: # vim: ts=2 et:
name = [x86_64] name = [x86_64]
# TODO: until we have a image metadata service, let's avoid UEFI
EXCLUDE = [uefi]
qemu.machine_type = null qemu.machine_type = null
qemu.args = null qemu.args = null

View File

@ -1,2 +0,0 @@
# vim: ts=2 et:
name = [cloudinit]

View File

@ -1,10 +1,11 @@
# vim: ts=2 et: # vim: ts=2 et:
name = [tiny] name = [tiny]
WHEN { WHEN {
aws { aws {
packages.tiny-ec2-bootstrap = true packages.tiny-ec2-bootstrap = true
services.default.tiny-ec2-bootstrap = true services.default.tiny-ec2-bootstrap = true
scripts = [ setup-tiny ] scripts = [ setup-tiny ]
script_dirs = [ setup-tiny.d ] script_dirs = [ setup-tiny.d ]
} }
} }

View File

@ -1,40 +1,23 @@
# vim: ts=2 et: # vim: ts=2 et:
builder = qemu builder = qemu
aws { kernel_modules.ena = true
s3_bucket = alpine-cloud-images initfs_features.ena = true
publish {
access { access.PUBLIC = true
PUBLIC = true regions.ALL = true
# alternately...
#PUBLIC = false
# and/or
#<aws-userid> = true
# ...
}
regions {
ALL = true
# alternately...
#<region> = true
# ...
}
}
}
WHEN { WHEN {
# Arch
aarch64 { aarch64 {
aws.arch = arm64 # new AWS aarch64 default...
kernel_modules.gpio_pl061 = true
initfs_features.gpio_pl061 = true
WHEN {
"3.14 3.13 3.12" {
# ...but not supported for older versions
kernel_modules.gpio_pl061 = false
initfs_features.gpio_pl061 = false
}
}
} }
x86_64 { }
aws.arch = x86_64
}
# Firmware
bios {
aws.boot_mode = legacy-bios
}
uefi {
aws.boot_mode = uefi
}
}

View File

@ -1,2 +0,0 @@
# vim: ts=2 et:
builder = qemu

View File

@ -1,8 +1,6 @@
# vim: ts=2 et: # vim: ts=2 et:
name = [bios] name = [bios]
packages { bootloader = extlinux
syslinux = --no-scripts packages.syslinux = --no-scripts
} qemu.firmware = null
qemu.firmware = null

View File

@ -1,8 +1,10 @@
# vim: ts=2 et: # vim: ts=2 et:
name = [uefi] name = [uefi]
bootloader = grub-efi
packages { packages {
grub-efi = --no-scripts grub-efi = --no-scripts
dosfstools = true
} }
WHEN { WHEN {

View File

@ -0,0 +1,9 @@
# vim: ts=2 et:
include required("base/3.conf")
end_of_life = "2023-11-01"
motd {
sudo_deprecated = "NOTE: 'sudo' has been deprecated, please use 'doas' instead."
}

View File

@ -52,7 +52,6 @@ kernel_modules {
usb-storage = true usb-storage = true
ext4 = true ext4 = true
nvme = true nvme = true
ena = true
} }
kernel_options { kernel_options {
@ -62,5 +61,4 @@ kernel_options {
initfs_features { initfs_features {
nvme = true nvme = true
ena = true
} }

View File

@ -5,6 +5,7 @@ include required("1.conf")
packages { packages {
# drop old alpine-mirrors # drop old alpine-mirrors
alpine-mirrors = null alpine-mirrors = null
# use iproute2-minimal instead of full iproute2 # use iproute2-minimal instead of full iproute2
iproute2 = null iproute2 = null
iproute2-minimal = true iproute2-minimal = true

View File

@ -3,7 +3,6 @@
include required("2.conf") include required("2.conf")
packages { packages {
# doas replaces sudo # doas will officially replace sudo in 3.16
sudo = null
doas = true doas = true
} }

View File

@ -0,0 +1,8 @@
# vim: ts=2 et:
include required("3.conf")
packages {
# doas officially replaces sudo in 3.16
sudo = false
}

View File

@ -1,6 +1,10 @@
# vim: ts=2 et: # vim: ts=2 et:
include required("base/3.conf") include required("base/4.conf")
motd {
sudo_removed = "NOTE: 'sudo' is no longer installed by default, please use 'doas' instead."
}
# clear out inherited repos # clear out inherited repos
repos = null repos = null

100
gen_releases.py Executable file
View File

@ -0,0 +1,100 @@
#!/usr/bin/env python3
# vim: ts=4 et:
# Ensure we're using the Python virtual env with our installed dependencies
import os
import sys
import textwrap
NOTE = textwrap.dedent("""
This script's output is meant to be compatible with alpine-ec2-ami's
releases.yaml, in order to bridge the gap until https://alpinelinux.org/cloud
can be updated to be generated from another source, or dynamically calls an
published image metadata service. This script should only be run after
the main 'build' script has been used successfully to publish all images.
""")
sys.pycache_prefix = 'work/__pycache__'
if not os.path.exists('work'):
print('FATAL: Work directory does not exist.', file=sys.stderr)
print(NOTE, file=sys.stderr)
exit(1)
# Re-execute using the right virtual environment, if necessary.
venv_args = [os.path.join('work', 'bin', 'python3')] + sys.argv
if os.path.join(os.getcwd(), venv_args[0]) != sys.executable:
print("Re-executing with work environment's Python...\n", file=sys.stderr)
os.execv(venv_args[0], venv_args)
# We're now in the right Python environment
import argparse
import logging
from collections import defaultdict
from ruamel.yaml import YAML
import clouds
from image_configs import ImageConfigManager
### Constants & Variables
LOGFORMAT = '%(name)s - %(levelname)s - %(message)s'
### Functions
# allows us to set values deep within an object that might not be fully defined
def dictfactory():
return defaultdict(dictfactory)
# undo dictfactory() objects to normal objects
def undictfactory(o):
if isinstance(o, defaultdict):
o = {k: undictfactory(v) for k, v in o.items()}
return o
### Command Line & Logging
parser = argparse.ArgumentParser(description=NOTE)
parser.add_argument(
'--use-broker', action='store_true',
help='use the identity broker to get credentials')
parser.add_argument('--debug', action='store_true', help='enable debug output')
args = parser.parse_args()
log = logging.getLogger('gen_releases')
log.setLevel(logging.DEBUG if args.debug else logging.INFO)
console = logging.StreamHandler(sys.stderr)
console.setFormatter(logging.Formatter(LOGFORMAT))
log.addHandler(console)
log.debug(args)
# set up credential provider, if we're going to use it
if args.use_broker:
clouds.set_credential_provider()
# load build configs
configs = ImageConfigManager(
conf_path='work/configs/images.conf',
yaml_path='work/images.yaml',
log='gen_releases'
)
# make sure images.yaml is up-to-date with reality
configs.refresh_state('final')
yaml = YAML()
releases = dictfactory()
for i_key, i_cfg in configs.get().items():
release = i_cfg.version if i_cfg.version == 'edge' else i_cfg.release
releases[release][i_key][i_cfg.tags.name] = dict(i_cfg.tags) | {
'creation_date': i_cfg.published,
'artifacts': i_cfg.artifacts,
}
yaml.dump(undictfactory(releases), sys.stdout)

View File

@ -3,7 +3,6 @@
import itertools import itertools
import logging import logging
import mergedeep import mergedeep
import os
import pyhocon import pyhocon
import shutil import shutil
@ -30,7 +29,6 @@ class ImageConfigManager():
self.yaml = YAML() self.yaml = YAML()
self.yaml.register_class(ImageConfig) self.yaml.register_class(ImageConfig)
self.yaml.default_flow_style = False
self.yaml.explicit_start = True self.yaml.explicit_start = True
# hide !ImageConfig tag from Packer # hide !ImageConfig tag from Packer
self.yaml.representer.org_represent_mapping = self.yaml.representer.represent_mapping self.yaml.representer.org_represent_mapping = self.yaml.representer.represent_mapping
@ -50,8 +48,7 @@ class ImageConfigManager():
# load already-resolved YAML configs, restoring ImageConfig objects # load already-resolved YAML configs, restoring ImageConfig objects
def _load_yaml(self): def _load_yaml(self):
# TODO: no warning if we're being called from cloud_helper.py self.log.info('Loading existing %s', self.yaml_path)
self.log.warning('Loading existing %s', self.yaml_path)
for key, config in self.yaml.load(self.yaml_path).items(): for key, config in self.yaml.load(self.yaml_path).items():
self._configs[key] = ImageConfig(key, config) self._configs[key] = ImageConfig(key, config)
@ -74,18 +71,29 @@ class ImageConfigManager():
# set version releases # set version releases
for v, vcfg in cfg.Dimensions.version.items(): for v, vcfg in cfg.Dimensions.version.items():
# version keys are quoted to protect dots # version keys are quoted to protect dots
self.set_version_release(v.strip('"'), vcfg) self._set_version_release(v.strip('"'), vcfg)
dimensions = list(cfg.Dimensions.keys()) dimensions = list(cfg.Dimensions.keys())
self.log.debug('dimensions: %s', dimensions) self.log.debug('dimensions: %s', dimensions)
for dim_keys in (itertools.product(*cfg['Dimensions'].values())): for dim_keys in (itertools.product(*cfg['Dimensions'].values())):
image_key = '-'.join(dim_keys).replace('"', '') config_key = '-'.join(dim_keys).replace('"', '')
# dict of dimension -> dimension_key # dict of dimension -> dimension_key
dim_map = dict(zip(dimensions, dim_keys)) dim_map = dict(zip(dimensions, dim_keys))
# replace version with release, and make image_key from that
release = cfg.Dimensions.version[dim_map['version']].release release = cfg.Dimensions.version[dim_map['version']].release
image_config = ImageConfig(image_key, {'release': release} | dim_map) (rel_map := dim_map.copy())['version'] = release
image_key = '-'.join(rel_map.values())
image_config = ImageConfig(
config_key,
{
'image_key': image_key,
'release': release
} | dim_map
)
# merge in the Default config # merge in the Default config
image_config._merge(cfg.Default) image_config._merge(cfg.Default)
@ -93,12 +101,19 @@ class ImageConfigManager():
# merge in each dimension key's configs # merge in each dimension key's configs
for dim, dim_key in dim_map.items(): for dim, dim_key in dim_map.items():
dim_cfg = deepcopy(cfg.Dimensions[dim][dim_key]) dim_cfg = deepcopy(cfg.Dimensions[dim][dim_key])
exclude = dim_cfg.pop('EXCLUDE', None) exclude = dim_cfg.pop('EXCLUDE', None)
if exclude and set(exclude) & set(dim_keys): if exclude and set(exclude) & set(dim_keys):
self.log.debug('%s SKIPPED, %s excludes %s', image_key, dim_key, exclude) self.log.debug('%s SKIPPED, %s excludes %s', config_key, dim_key, exclude)
skip = True skip = True
break break
if eol := dim_cfg.get('end_of_life', None):
if self.now > datetime.fromisoformat(eol):
self.log.warning('%s SKIPPED, %s end_of_life %s', config_key, dim_key, eol)
skip = True
break
image_config._merge(dim_cfg) image_config._merge(dim_cfg)
# now that we're done with ConfigTree/dim_cfg, remove " from dim_keys # now that we're done with ConfigTree/dim_cfg, remove " from dim_keys
@ -122,40 +137,41 @@ class ImageConfigManager():
image_config.qemu['iso_url'] = self.iso_url_format.format(arch=image_config.arch) image_config.qemu['iso_url'] = self.iso_url_format.format(arch=image_config.arch)
# we've resolved everything, add tags attribute to config # we've resolved everything, add tags attribute to config
self._configs[image_key] = image_config self._configs[config_key] = image_config
self._save_yaml() self._save_yaml()
# set current version release # set current version release
def set_version_release(self, v, c): def _set_version_release(self, v, c):
if v == 'edge': if v == 'edge':
c.put('release', self.now.strftime('%Y%m%d')) c.put('release', self.now.strftime('%Y%m%d'))
c.put('end_of_life', self.tomorrow.strftime('%F')) c.put('end_of_life', self.tomorrow.strftime('%F'))
else: else:
c.put('release', get_version_release(f"v{v}")['release']) c.put('release', get_version_release(f"v{v}")['release'])
# release is also appended to build name array # release is also appended to name & description arrays
c.put('name', [c.release]) c.put('name', [c.release])
c.put('description', [c.release])
# update current config status # update current config status
def determine_actions(self, step, only, skip, revise): def refresh_state(self, step, only=[], skip=[], revise=False):
self.log.info('Determining Actions') self.log.info('Refreshing State')
has_actions = False has_actions = False
for ic in self._configs.values(): for ic in self._configs.values():
# clear away any previous actions # clear away any previous actions
if hasattr(ic, 'actions'): if hasattr(ic, 'actions'):
delattr(ic, 'actions') delattr(ic, 'actions')
dim_keys = set(ic.image_key.split('-')) dim_keys = set(ic.config_key.split('-'))
if only and len(set(only) & dim_keys) != len(only): if only and len(set(only) & dim_keys) != len(only):
self.log.debug("%s SKIPPED, doesn't match --only", ic.image_key) self.log.debug("%s SKIPPED, doesn't match --only", ic.config_key)
continue continue
if skip and len(set(skip) & dim_keys) > 0: if skip and len(set(skip) & dim_keys) > 0:
self.log.debug('%s SKIPPED, matches --skip', ic.image_key) self.log.debug('%s SKIPPED, matches --skip', ic.config_key)
continue continue
ic.determine_actions(step, revise) ic.refresh_state(step, revise)
if not has_actions and len(ic.actions): if not has_actions and len(ic.actions):
has_actions = True has_actions = True
@ -166,8 +182,8 @@ class ImageConfigManager():
class ImageConfig(): class ImageConfig():
def __init__(self, image_key, obj={}): def __init__(self, config_key, obj={}):
self.image_key = str(image_key) self.config_key = str(config_key)
tags = obj.pop('tags', None) tags = obj.pop('tags', None)
self.__dict__ |= self._deep_dict(obj) self.__dict__ |= self._deep_dict(obj)
# ensure tag values are str() when loading # ensure tag values are str() when loading
@ -176,15 +192,19 @@ class ImageConfig():
@property @property
def local_dir(self): def local_dir(self):
return os.path.join('work/images', self.name) return Path('work/images') / self.cloud / self.image_key
@property @property
def local_path(self): def local_path(self):
return os.path.join(self.local_dir, 'image.' + self.local_format) return self.local_dir / ('image.' + self.local_format)
@property
def published_yaml(self):
return self.local_dir / 'published.yaml'
@property @property
def image_name(self): def image_name(self):
return '-r'.join([self.name, str(self.revision)]) return self.name.format(**self.__dict__)
@property @property
def image_description(self): def image_description(self):
@ -196,19 +216,20 @@ class ImageConfig():
t = { t = {
'arch': self.arch, 'arch': self.arch,
'bootstrap': self.bootstrap, 'bootstrap': self.bootstrap,
'build_name': self.name,
'build_revision': self.revision,
'cloud': self.cloud, 'cloud': self.cloud,
'description': self.image_description, 'description': self.image_description,
'end_of_life': self.end_of_life, 'end_of_life': self.end_of_life,
'firmware': self.firmware, 'firmware': self.firmware,
'image_key': self.image_key,
'name': self.image_name, 'name': self.image_name,
'project': self.project,
'release': self.release, 'release': self.release,
'revision': self.revision,
'version': self.version 'version': self.version
} }
# stuff that might not be there yet # stuff that might not be there yet
for k in ['imported', 'published', 'source_id', 'source_region']: for k in ['imported', 'import_id', 'import_region', 'published']:
if k in self.__dict__: if self.__dict__.get(k, None):
t[k] = self.__dict__[k] t[k] = self.__dict__[k]
return Tags(t) return Tags(t)
@ -246,6 +267,7 @@ class ImageConfig():
# stringify arrays # stringify arrays
self.name = '-'.join(self.name) self.name = '-'.join(self.name)
self.description = ' '.join(self.description) self.description = ' '.join(self.description)
self._resolve_motd()
self._stringify_repos() self._stringify_repos()
self._stringify_packages() self._stringify_packages()
self._stringify_services() self._stringify_services()
@ -253,6 +275,26 @@ class ImageConfig():
self._stringify_dict_keys('kernel_options', ' ') self._stringify_dict_keys('kernel_options', ' ')
self._stringify_dict_keys('initfs_features', ' ') self._stringify_dict_keys('initfs_features', ' ')
def _resolve_motd(self):
# merge version/release notes, as apporpriate
if self.motd.get('version_notes', None) and self.motd.get('release_notes', None):
if self.version == 'edge':
# edge is, by definition, not released
self.motd.pop('version_notes', None)
self.motd.pop('release_notes', None)
elif self.release == self.version + '.0':
# no point in showing the same URL twice
self.motd.pop('release_notes')
else:
# combine version and release notes
self.motd['release_notes'] = self.motd.pop('version_notes') + '\n' + \
self.motd['release_notes']
# TODO: be rid of null values
self.motd = '\n\n'.join(self.motd.values()).format(**self.__dict__)
def _stringify_repos(self): def _stringify_repos(self):
# stringify repos map # stringify repos map
# <repo>: <tag> # @<tag> <repo> enabled # <repo>: <tag> # @<tag> <repo> enabled
@ -323,13 +365,11 @@ class ImageConfig():
for m, v in self.__dict__[d].items() for m, v in self.__dict__[d].items()
))) )))
# TODO? determine_current_state() def refresh_state(self, step, revise=False):
def determine_actions(self, step, revise):
log = logging.getLogger('build') log = logging.getLogger('build')
self.revision = 0
# TODO: be more specific about our parameters
self.remote_image = clouds.latest_build_image(self)
actions = {} actions = {}
revision = 0
remote_image = clouds.latest_build_image(self)
# enable actions based on the specified step # enable actions based on the specified step
if step in ['local', 'import', 'publish']: if step in ['local', 'import', 'publish']:
@ -343,50 +383,83 @@ class ImageConfig():
actions['publish'] = True actions['publish'] = True
if revise: if revise:
if os.path.exists(self.local_path): if self.local_path.exists():
# remove previously built local image artifacts # remove previously built local image artifacts
log.warning('Removing existing local image dir %s', self.local_dir) log.warning('Removing existing local image dir %s', self.local_dir)
shutil.rmtree(self.local_dir) shutil.rmtree(self.local_dir)
if self.remote_image and 'published' in self.remote_image['tags']: if remote_image and remote_image.published:
log.warning('Bumping build revision for %s', self.name) log.warning('Bumping image revision for %s', self.image_key)
self.revision = int(self.remote_image['tags']['build_revision']) + 1 revision = int(remote_image.revision) + 1
elif self.remote_image and 'imported' in self.remote_image['tags']: elif remote_image and remote_image.imported:
# remove existing imported (but unpublished) image # remove existing imported (but unpublished) image
log.warning('Removing unpublished remote image %s', self.remote_image['id']) log.warning('Removing unpublished remote image %s', remote_image.import_id)
# TODO: be more specific? clouds.remove_image(self, remote_image.import_id)
clouds.remove_image(self)
self.remote_image = None remote_image = None
elif self.remote_image and 'imported' in self.remote_image['tags']: elif remote_image:
# already imported, don't build/import again if remote_image.imported:
log.warning('Already imported, skipping build/import') # already imported, don't build/import again
actions.pop('build', None) log.info('%s - already imported', self.image_key)
actions.pop('import', None) actions.pop('build', None)
actions.pop('import', None)
if os.path.exists(self.local_path): if remote_image.published:
log.warning('Already built, skipping build') # NOTE: re-publishing can update perms or push to new regions
log.info('%s - already published', self.image_key)
if self.local_path.exists():
# local image's already built, don't rebuild # local image's already built, don't rebuild
log.info('%s - already locally built', self.image_key)
actions.pop('build', None) actions.pop('build', None)
# set at time of import, carries forward when published # merge remote_image data into image state
if self.remote_image: if remote_image:
self.end_of_life = self.remote_image['tags']['end_of_life'] self.__dict__ |= dict(remote_image)
self.revision = self.remote_image['tags']['build_revision']
else: else:
# default to tomorrow's date if unset self.__dict__ |= {
if 'end_of_life' not in self.__dict__: 'revision': revision,
tomorrow = datetime.utcnow() + timedelta(days=1) 'imported': None,
self.end_of_life = tomorrow.strftime('%F') 'import_id': None,
'import_region': None,
'published': None,
}
self.end_of_life = self.__dict__.pop(
'end_of_life',
# EOL is tomorrow, if otherwise unset
(datetime.utcnow() + timedelta(days=1)).strftime('%F')
)
# update artifacts, if we've got 'em
artifacts_yaml = self.local_dir / 'artifacts.yaml'
if artifacts_yaml.exists():
yaml = YAML()
self.artifacts = yaml.load(artifacts_yaml)
else:
self.artifacts = None
self.actions = list(actions) self.actions = list(actions)
log.info('%s/%s-r%s = %s', self.cloud, self.name, self.revision, self.actions) log.info('%s/%s = %s', self.cloud, self.image_name, self.actions)
self.state_updated = datetime.utcnow().isoformat()
class Tags(dict): class DictObj(dict):
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
del self[key]
class Tags(DictObj):
def __init__(self, d={}, from_list=None, key_name='Key', value_name='Value'): def __init__(self, d={}, from_list=None, key_name='Key', value_name='Value'):
for key, value in d.items(): for key, value in d.items():
@ -395,23 +468,9 @@ class Tags(dict):
if from_list: if from_list:
self.from_list(from_list, key_name, value_name) self.from_list(from_list, key_name, value_name)
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value): def __setattr__(self, key, value):
self[key] = str(value) self[key] = str(value)
def __delattr__(self, key):
del self[key]
def pop(self, key, default):
value = default
if key in self:
value = self[key]
del self[key]
return value
def as_list(self, key_name='Key', value_name='Value'): def as_list(self, key_name='Key', value_name='Value'):
return [{key_name: k, value_name: v} for k, v in self.items()] return [{key_name: k, value_name: v} for k, v in self.items()]

View File

@ -0,0 +1,39 @@
# vim: ts=2 et:
# Overlay for testing alpine-cloud-images
# start with the production alpine config
include required("alpine.conf")
# override specific things...
project = alpine-cloud-images__test
Default {
# unset before resetting
name = null
name = [ test ]
description = null
description = [ Alpine Test ]
}
Dimensions {
bootstrap {
# not quite working yet
#cloudinit { include required("testing/cloudinit.conf") }
}
cloud {
# adapters need to be written
#oci { include required("testing/oci.conf") }
#gcp { include required("testing/gcp.conf") }
#azure { include required("testing/azure.conf") }
}
}
# test in private, and only in a couple regions
Mandatory.access.PUBLIC = false
Mandatory.regions = {
ALL = false
us-west-2 = true
us-east-1 = true
}

View File

@ -0,0 +1 @@
alpine-testing.conf

View File

@ -0,0 +1,9 @@
# vim: ts=2 et:
name = [cloudinit]
packages {
cloud-init = true
openssh-server-pam = true
}
scripts = [ setup-cloudinit ]
script_dirs = [ setup-cloudinit.d ]

View File

@ -0,0 +1,4 @@
# vim: ts=2 et:
builder = qemu
# TBD

View File

@ -0,0 +1,27 @@
#!/bin/sh -eu
# vim: ts=4 et:
[ -z "$DEBUG" ] || [ "$DEBUG" = 0 ] || set -x
TARGET=/mnt
#SETUP=/tmp/setup-cloudinit.d
die() {
printf '\033[1;7;31m FATAL: %s \033[0m\n' "$@" >&2 # bold reversed red
exit 1
}
einfo() {
printf '\n\033[1;7;36m> %s <\033[0m\n' "$@" >&2 # bold reversed cyan
}
einfo "Installing up cloud-init bootstrap components..."
# This adds the init scripts at the correct boot phases
chroot "$TARGET" /sbin/setup-cloud-init
# cloud-init locks our user by default which means alpine can't login from
# SSH. This seems like a bug in cloud-init that should be fixed but we can
# hack around it for now here.
if [ -f "$TARGET"/etc/cloud/cloud.cfg ]; then
sed -i '/lock_passwd:/s/True/False/' "$TARGET"/etc/cloud/cloud.cfg
fi

View File

@ -51,7 +51,7 @@ make_filesystem() {
unit MiB print unit MiB print
root_dev="${DEVICE}2" root_dev="${DEVICE}2"
/usr/sbin/mkfs.fat -n EFI "${DEVICE}1" mkfs.fat -n EFI "${DEVICE}1"
fi fi
mkfs.ext4 -O ^64bit -L / "$root_dev" mkfs.ext4 -O ^64bit -L / "$root_dev"
@ -69,9 +69,12 @@ install_base() {
mkdir -p "$TARGET/etc/apk" mkdir -p "$TARGET/etc/apk"
echo "$REPOS" > "$TARGET/etc/apk/repositories" echo "$REPOS" > "$TARGET/etc/apk/repositories"
cp -a /etc/apk/keys "$TARGET/etc/apk" cp -a /etc/apk/keys "$TARGET/etc/apk"
# shellcheck disable=SC2086
apk --root "$TARGET" --initdb --no-cache add $PACKAGES_ADD apk --root "$TARGET" --initdb --no-cache add $PACKAGES_ADD
# shellcheck disable=SC2086
[ -z "$PACKAGES_NOSCRIPTS" ] || \ [ -z "$PACKAGES_NOSCRIPTS" ] || \
apk --root "$TARGET" --no-cache --no-scripts add $PACKAGES_NOSCRIPTS apk --root "$TARGET" --no-cache --no-scripts add $PACKAGES_NOSCRIPTS
# shellcheck disable=SC2086
[ -z "$PACKAGES_DEL" ] || \ [ -z "$PACKAGES_DEL" ] || \
apk --root "$TARGET" --no-cache del $PACKAGES_DEL apk --root "$TARGET" --no-cache del $PACKAGES_DEL
} }
@ -89,10 +92,28 @@ install_bootloader() {
einfo "Installing Bootloader" einfo "Installing Bootloader"
# create initfs # create initfs
# shellcheck disable=SC2046
kernel=$(basename $(find "$TARGET/lib/modules/"* -maxdepth 0))
# ensure features can be found by mkinitfs
for FEATURE in $INITFS_FEATURES; do
# already taken care of?
[ -f "$TARGET/etc/mkinitfs/features.d/$FEATURE.modules" ] || \
[ -f "$TARGET/etc/mkinitfs/features.d/$FEATURE.files" ] && continue
# find the kernel module directory
module=$(chroot "$TARGET" /sbin/modinfo -k "$kernel" -n "$FEATURE")
[ -z "$module" ] && die "initfs_feature '$FEATURE' kernel module not found"
# replace everything after .ko with a *
echo "$module" | cut -d/ -f5- | sed -e 's/\.ko.*/.ko*/' \
> "$TARGET/etc/mkinitfs/features.d/$FEATURE.modules"
done
# TODO? this appends INITFS_FEATURES, we may want to allow removal someday?
sed -Ei "s/^features=\"([^\"]+)\"/features=\"\1 $INITFS_FEATURES\"/" \ sed -Ei "s/^features=\"([^\"]+)\"/features=\"\1 $INITFS_FEATURES\"/" \
"$TARGET/etc/mkinitfs/mkinitfs.conf" "$TARGET/etc/mkinitfs/mkinitfs.conf"
# shellcheck disable=SC2046
chroot "$TARGET" /sbin/mkinitfs $(basename $(find "$TARGET/lib/modules/"* -maxdepth 0)) chroot "$TARGET" /sbin/mkinitfs "$kernel"
if [ "$FIRMWARE" = uefi ]; then if [ "$FIRMWARE" = uefi ]; then
install_grub_efi install_grub_efi
@ -174,6 +195,7 @@ configure_system() {
fi fi
# explicitly lock the root account # explicitly lock the root account
chroot "$TARGET" /bin/sh -c "/bin/echo 'root:*' | /usr/sbin/chpasswd -e"
chroot "$TARGET" /usr/bin/passwd -l root chroot "$TARGET" /usr/bin/passwd -l root
# set up image user # set up image user
@ -181,7 +203,7 @@ configure_system() {
chroot "$TARGET" /usr/sbin/addgroup "$user" chroot "$TARGET" /usr/sbin/addgroup "$user"
chroot "$TARGET" /usr/sbin/adduser -h "/home/$user" -s /bin/sh -G "$user" -D "$user" chroot "$TARGET" /usr/sbin/adduser -h "/home/$user" -s /bin/sh -G "$user" -D "$user"
chroot "$TARGET" /usr/sbin/addgroup "$user" wheel chroot "$TARGET" /usr/sbin/addgroup "$user" wheel
chroot "$TARGET" /usr/bin/passwd -u "$user" chroot "$TARGET" /bin/sh -c "echo '$user:*' | /usr/sbin/chpasswd -e"
# modify PS1s in /etc/profile to add user # modify PS1s in /etc/profile to add user
sed -Ei \ sed -Ei \
@ -190,6 +212,9 @@ configure_system() {
-e "s/( PS1=')(%m:)/\\1%n@\\2/" \ -e "s/( PS1=')(%m:)/\\1%n@\\2/" \
"$TARGET"/etc/profile "$TARGET"/etc/profile
# write /etc/motd
echo "$MOTD" > "$TARGET"/etc/motd
setup_services setup_services
} }