Merge branch 'initial/part_three' into 'main'

alpine-cloud-images, part three

See merge request tomalok/alpine-cloud-images!128
This commit is contained in:
Jake Buchholz Göktürk 2021-11-28 23:04:28 +00:00
commit 31b84a9dd1
31 changed files with 534 additions and 290 deletions

View File

@ -28,15 +28,9 @@ locals {
# randomly generated password
password = uuidv4()
# all build configs
all_configs = yamldecode(file("work/configs.yaml"))
# load the build actions to be taken
actions = yamldecode(file("work/actions.yaml"))
# resolve actionable build configs
configs = { for b, acfg in local.actions:
b => merge(local.all_configs[b], acfg) if length(acfg.actions) > 0
configs = { for b, cfg in yamldecode(file("work/images.yaml")):
b => cfg if contains(keys(cfg), "actions")
}
}
@ -99,10 +93,10 @@ build {
boot_wait = var.qemu.boot_wait[B.value.arch]
# results
output_directory = B.value.image.dir
disk_size = B.value.image.size
format = B.value.image.format
vm_name = B.value.image.file
output_directory = "work/images/${B.value.cloud}/${B.value.image_key}"
disk_size = B.value.size
format = B.value.local_format
vm_name = "image.${B.value.local_format}"
}
}
@ -150,16 +144,18 @@ build {
environment_vars = [
"DEBUG=${var.DEBUG}",
"ARCH=${B.value.arch}",
"BOOTLOADER=${B.value.bootloader}",
"BOOTSTRAP=${B.value.bootstrap}",
"BUILD_NAME=${B.value.name}",
"BUILD_REVISION=${B.value.revision}",
"CLOUD=${B.value.cloud}",
"END_OF_LIFE=${B.value.end_of_life}",
"FIRMWARE=${B.value.firmware}",
"IMAGE_LOGIN=${B.value.image.login}",
"IMAGE_LOGIN=${B.value.login}",
"INITFS_FEATURES=${B.value.initfs_features}",
"KERNEL_MODULES=${B.value.kernel_modules}",
"KERNEL_OPTIONS=${B.value.kernel_options}",
"MOTD=${B.value.motd}",
"PACKAGES_ADD=${B.value.packages.add}",
"PACKAGES_DEL=${B.value.packages.del}",
"PACKAGES_NOSCRIPTS=${B.value.packages.noscripts}",

View File

@ -8,6 +8,8 @@ from urllib.request import urlopen
CDN_URL = 'https://dl-cdn.alpinelinux.org/alpine'
# TODO: also get EOL from authoritative source
def get_version_release(alpine_version):
apk_ver = get_apk_version(alpine_version, 'main', 'x86_64', 'alpine-base')
release = apk_ver.split('-')[0]
@ -16,6 +18,7 @@ def get_version_release(alpine_version):
# TODO? maybe download and parse APKINDEX instead?
# also check out https://dl-cdn.alpinelinux.org/alpine/v3.15/releases/x86_64/latest-releases.yaml
def get_apk_version(alpine_version, repo, arch, apk):
repo_url = f"{CDN_URL}/{alpine_version}/{repo}/{arch}"
apks_re = re.compile(f'"{apk}-(\\d.*)\\.apk"')

18
build
View File

@ -13,7 +13,6 @@ if not os.path.exists('work'):
import venv
PIP_LIBS = [
'pip',
'mergedeep',
'pyhocon',
'python-dateutil',
@ -21,6 +20,7 @@ if not os.path.exists('work'):
]
print('Work environment does not exist, creating...', file=sys.stderr)
venv.create('work', with_pip=True)
subprocess.run(['work/bin/pip', 'install', '-U', 'pip', 'wheel'])
subprocess.run(['work/bin/pip', 'install', '-U', *PIP_LIBS])
# Re-execute using the right virtual environment, if necessary.
@ -109,6 +109,7 @@ def is_same_dir_symlink(x):
return x_link == os.path.basename(x_link)
# TODO: revisit this to improve --custom overlay implementation
def install_overlay(overlay):
log.info("Installing '%s' overlay in work environment", overlay)
dest_dir = os.path.join('work', overlay)
@ -123,7 +124,7 @@ def install_overlay(overlay):
else:
rel_x = os.path.relpath(src_x, dest_dir)
# TODO: only images.conf symlink can be overridden
# TODO: only images.conf symlink can be overridden, in reality
if os.path.islink(dest_x):
# only same-dir symlinks can be overridden
if not is_same_dir_symlink(dest_x):
@ -230,7 +231,7 @@ log.debug(args)
# set up credential provider, if we're going to use it
if args.use_broker:
clouds.set_credential_provider()
clouds.set_credential_provider(debug=args.debug)
### Setup Configs
@ -256,7 +257,7 @@ if args.step == 'configs':
### What needs doing?
if not image_configs.determine_actions(
if not image_configs.refresh_state(
step=args.step, only=args.only, skip=args.skip, revise=args.revise):
log.info('No pending actions to take at this time.')
sys.exit(0)
@ -307,4 +308,11 @@ if p.returncode != 0:
log.info('Packer Completed')
# TODO? collect artifacts?
# update final state in work/images.yaml
image_configs.refresh_state(
step='final',
only=args.only,
skip=args.skip
)
log.info('Build Finished')

View File

@ -42,28 +42,6 @@ ACTIONS = ['import', 'publish']
LOGFORMAT = '%(name)s - %(levelname)s - %(message)s'
### Functions
# TODO? be more specific with args?
# import image config's local image to cloud
def import_image(ic):
imported = clouds.import_image(ic)
# write imported metadata
imported_yaml = Path(os.path.join(ic.local_dir, 'imported.yaml'))
yaml.dump(imported, imported_yaml)
# TODO? be more specific with args?
# publish image config's imported image to target regions with expected permissions
def publish_image(ic):
published = clouds.publish_image(ic)
# ensure image work directory exists
os.makedirs(ic.local_dir, exist_ok=True)
# write published metadata
published_yaml = Path(os.path.join(ic.local_dir, 'published.yaml'))
yaml.dump(published, published_yaml)
### Command Line & Logging
parser = argparse.ArgumentParser(description=NOTE)
@ -85,7 +63,7 @@ log.debug(args)
# set up credential provider, if we're going to use it
if args.use_broker:
clouds.set_credential_provider()
clouds.set_credential_provider(debug=args.debug)
# load build configs
configs = ImageConfigManager(
@ -95,13 +73,15 @@ configs = ImageConfigManager(
)
yaml = YAML()
yaml.default_flow_style = False
yaml.explicit_start = True
for image_key in args.image_keys:
image_config = configs.get(image_key)
if args.action == 'import':
import_image(image_config)
clouds.import_image(image_config)
elif args.action == 'publish':
publish_image(image_config)
os.makedirs(image_config.local_dir, exist_ok=True)
artifacts = clouds.publish_image(image_config)
yaml.dump(artifacts, Path(image_config.local_dir) / 'artifacts.yaml')

View File

@ -16,9 +16,9 @@ register(aws) # , oci, azure, gcp)
# using a credential provider is optional, set across all adapters
def set_credential_provider():
def set_credential_provider(debug=False):
from .identity_broker_client import IdentityBrokerClient
cred_provider = IdentityBrokerClient()
cred_provider = IdentityBrokerClient(debug=debug)
for adapter in ADAPTERS.values():
adapter.cred_provider = cred_provider
@ -26,15 +26,18 @@ def set_credential_provider():
### forward to the correct adapter
def latest_build_image(config):
return ADAPTERS[config.cloud].latest_build_image(config.name)
return ADAPTERS[config.cloud].latest_build_image(
config.project,
config.image_key
)
def import_image(config):
return ADAPTERS[config.cloud].import_image(config)
def remove_image(config):
return ADAPTERS[config.cloud].remove_image(config.remote_image['id'])
def remove_image(config, image_id):
return ADAPTERS[config.cloud].remove_image(image_id)
def publish_image(config):

View File

@ -2,20 +2,22 @@
# vim: ts=4 et:
import logging
import hashlib
import os
import random
import string
import sys
import time
from datetime import datetime
from subprocess import Popen, PIPE, run
from .interfaces.adapter import CloudAdapterInterface
from image_configs import Tags
from image_configs import Tags, DictObj
class AWSCloudAdapter(CloudAdapterInterface):
IMAGE_INFO = [
'revision', 'imported', 'import_id', 'import_region', 'published',
'end_of_life',
]
CRED_MAP = {
'access_key': 'aws_access_key_id',
'secret_key': 'aws_secret_access_key',
@ -54,7 +56,7 @@ class AWSCloudAdapter(CloudAdapterInterface):
return self._sessions[region]
# TODO: property?
@property
def regions(self):
if self.cred_provider:
return self.cred_provider.get_regions(self.cloud)
@ -62,7 +64,7 @@ class AWSCloudAdapter(CloudAdapterInterface):
# list of all subscribed regions
return {r['RegionName']: True for r in self.session().client('ec2').describe_regions()['Regions']}
# TODO: property?
@property
def default_region(self):
if self.cred_provider:
return self.cred_provider.get_default_region(self.cloud)
@ -79,82 +81,63 @@ class AWSCloudAdapter(CloudAdapterInterface):
# return dict suitable to use for session()
return {self.CRED_MAP[k]: v for k, v in creds.items() if k in self.CRED_MAP}
def _get_images_with_tags(self, tags={}, region=None):
def _get_images_with_tags(self, project, image_key, tags={}, region=None):
ec2r = self.session(region).resource('ec2')
req = {'Owners': ['self'], 'Filters': []}
tags |= {
'project': project,
'image_key': image_key,
}
for k, v in tags.items():
req['Filters'].append({'Name': f"tag:{k}", 'Values': [str(v)]})
return sorted(
ec2r.images.filter(**req), key=lambda k: k.creation_date, reverse=True)
def _aws_tags(self, b_tags):
# convert dict to [{'Key': k, 'Value': v}, ...]
a_tags = []
for k, v in b_tags.items():
# add extra Name tag
if k == 'name':
a_tags += [{'Key': 'Name', 'Value': str(v)}]
a_tags += [{'Key': k, 'Value': str(v)}]
return a_tags
# cloud-agnostic necessary info about an ec2.Image
# necessary cloud-agnostic image info
def _image_info(self, i):
tags = Tags(from_list=i.tags)
del tags.Name
# TODO? realm/partition?
return {
'id': i.image_id,
'region': i.meta.client.meta.region_name,
'tags': dict(tags)
# TODO? narrow down to these?
# imported = i.tags.imported
# published = i.tags.published
# revision = i.tags.build_revision
# source_id = i.image_id,
# source_region = i.meta.client.meta.region_name,
}
return DictObj({k: tags.get(k, None) for k in self.IMAGE_INFO})
# get the latest imported image for a given build name
def latest_build_image(self, build_name):
images = self._get_images_with_tags(tags={'build_name': build_name})
def latest_build_image(self, project, image_key):
images = self._get_images_with_tags(
project=project,
image_key=image_key,
)
if images:
# first one is the latest
return self._image_info(images[0])
return None
## TODO: rework these next two as a Tags class
# import an image
# NOTE: requires 'vmimport' role with read/write of <s3_bucket>.* and its objects
def import_image(self, ic):
log = logging.getLogger('import')
image_path = ic.local_path
image_aws = image_path.replace(ic.local_format, 'vhd')
image_aws = ic.local_dir / 'image.vhd'
name = ic.image_name
description = ic.image_description
session = self.session()
s3r = session.resource('s3')
ec2c = session.client('ec2')
ec2r = session.resource('ec2')
# convert QCOW2 to VHD
log.info('Converting %s to VHD format', image_path)
p = Popen(self.CONVERT_CMD + (image_path, image_aws), stdout=PIPE, stdin=PIPE, encoding='utf8')
out, err = p.communicate()
if p.returncode:
log.error('Unable to convert %s to VHD format (%s)', image_path, p.returncode)
log.error('EXIT: %d', p.returncode)
log.error('STDOUT:\n%s', out)
log.error('STDERR:\n%s', err)
sys.exit(p.returncode)
raise RuntimeError
bucket_name = 'alpine-cloud-images.' + ''.join(
random.SystemRandom().choice(string.ascii_lowercase + string.digits)
for _ in range(40))
s3_key = os.path.basename(image_aws)
session = self.session()
s3r = session.resource('s3')
ec2c = session.client('ec2')
ec2r = session.resource('ec2')
bucket_name = 'alpine-cloud-images.' + hashlib.sha1(os.urandom(40)).hexdigest()
s3_key = name + '.vhd'
bucket = s3r.Bucket(bucket_name)
log.info('Creating S3 bucket %s', bucket.name)
@ -166,7 +149,7 @@ class AWSCloudAdapter(CloudAdapterInterface):
try:
log.info('Uploading %s to %s', image_aws, s3_url)
bucket.upload_file(image_aws, s3_key)
bucket.upload_file(str(image_aws), s3_key)
# import snapshot from S3
log.info('Importing EC2 snapshot from %s', s3_url)
@ -187,7 +170,7 @@ class AWSCloudAdapter(CloudAdapterInterface):
if task_detail['Status'] not in ['pending', 'active', 'completed']:
msg = f"Bad EC2 snapshot import: {task_detail['Status']} - {task_detail['StatusMessage']}"
log.error(msg)
raise(RuntimeError, msg)
raise RuntimeError(msg)
if task_detail['Status'] == 'completed':
snapshot_id = task_detail['SnapshotId']
@ -246,8 +229,8 @@ class AWSCloudAdapter(CloudAdapterInterface):
# tag image (adds imported tag)
log.info('Tagging EC2 AMI %s', image_id)
tags.imported = datetime.utcnow().isoformat()
tags.source_id = image_id
tags.source_region = ec2c.meta.region_name
tags.import_id = image_id
tags.import_region = ec2c.meta.region_name
image.create_tags(Tags=tags.as_list())
except Exception:
log.error('Unable to tag image:', exc_info=True)
@ -263,7 +246,6 @@ class AWSCloudAdapter(CloudAdapterInterface):
log = logging.getLogger('build')
ec2r = self.session().resource('ec2')
image = ec2r.Image(image_id)
# TODO? protect against removing a published image?
snapshot_id = image.block_device_mappings[0]['Ebs']['SnapshotId']
snapshot = ec2r.Snapshot(snapshot_id)
log.info('Deregistering %s', image_id)
@ -271,35 +253,29 @@ class AWSCloudAdapter(CloudAdapterInterface):
log.info('Deleting %s', snapshot_id)
snapshot.delete()
# TODO: this should be standardized and work with cred_provider
def _get_all_regions(self):
ec2c = self.session().client('ec2')
res = ec2c.describe_regions(AllRegions=True)
return {
r['RegionName']: r['OptInStatus'] != 'not-opted-in'
for r in res['Regions']
}
# publish an image
def publish_image(self, ic):
log = logging.getLogger('publish')
source_image = self.latest_build_image(ic.name)
source_image = self.latest_build_image(
ic.project,
ic.image_key,
)
if not source_image:
log.error('No source image for %s', ic.name)
sys.exit(1)
log.error('No source image for %s', ic.image_key)
raise RuntimeError('Missing source imamge')
source_id = source_image['id']
log.info('Publishing source: %s, %s', source_image['region'], source_id)
source_id = source_image.import_id
source_region = source_image.import_region
log.info('Publishing source: %s/%s', source_region, source_id)
source = self.session().resource('ec2').Image(source_id)
source_tags = Tags(from_list=source.tags)
publish = ic.publish
# sort out published image access permissions
perms = {'groups': [], 'users': []}
if 'PUBLIC' in publish['access'] and publish['access']['PUBLIC']:
if ic.access.get('PUBLIC', None):
perms['groups'] = ['all']
else:
for k, v in publish['access'].items():
for k, v in ic.access.items():
if v:
log.debug('users: %s', k)
perms['users'].append(str(k))
@ -307,14 +283,13 @@ class AWSCloudAdapter(CloudAdapterInterface):
log.debug('perms: %s', perms)
# resolve destination regions
regions = self.regions()
if 'ALL' in publish['regions'] and publish['regions']['ALL']:
regions = self.regions
if ic.regions.pop('ALL', None):
log.info('Publishing to ALL available regions')
else:
# clear ALL out of the way if it's still there
publish['regions'].pop('ALL', None)
# TODO: politely warn/skip unknown regions in b.aws.regions
regions = {r: regions[r] for r in publish['regions']}
ic.regions.pop('ALL', None)
regions = {r: regions[r] for r in ic.regions}
publishing = {}
for r in regions.keys():
@ -324,10 +299,9 @@ class AWSCloudAdapter(CloudAdapterInterface):
images = self._get_images_with_tags(
region=r,
tags={
'build_name': ic.name,
'build_revision': ic.revision
}
project=ic.project,
image_key=ic.image_key,
tags={'revision': ic.revision}
)
if images:
image = images[0]
@ -338,8 +312,8 @@ class AWSCloudAdapter(CloudAdapterInterface):
res = ec2c.copy_image(
Description=source.description,
Name=source.name,
SourceImageId=source.id,
SourceRegion=source_image['region'],
SourceImageId=source_id,
SourceRegion=source_region,
)
except Exception:
log.warning('Skipping %s, unable to copy image:', r, exc_info=True)
@ -351,11 +325,11 @@ class AWSCloudAdapter(CloudAdapterInterface):
publishing[r] = image
published = {}
artifacts = {}
copy_wait = 180
while len(published) < len(publishing):
while len(artifacts) < len(publishing):
for r, image in publishing.items():
if r not in published:
if r not in artifacts:
image.reload()
if image.state == 'available':
# tag image
@ -395,22 +369,22 @@ class AWSCloudAdapter(CloudAdapterInterface):
log.info('%s: Setting EOL deprecation time on %s', r, image.id)
ec2c.enable_image_deprecation(
ImageId=image.id,
DeprecateAt=f"{source_image['tags']['end_of_life']}T23:59:59Z"
DeprecateAt=f"{source_image.end_of_life}T23:59:59Z"
)
published[r] = self._image_info(image)
artifacts[r] = image.id
if image.state == 'failed':
log.error('%s: %s - %s - %s', r, image.id, image.state, image.state_reason)
published[r] = None
artifacts[r] = None
remaining = len(publishing) - len(published)
remaining = len(publishing) - len(artifacts)
if remaining > 0:
log.info('Waiting %ds for %d images to complete', copy_wait, remaining)
time.sleep(copy_wait)
copy_wait = 30
return published
return artifacts
def register(cloud, cred_provider=None):

View File

@ -59,6 +59,7 @@ class IdentityBrokerClient:
return True
def _get(self, path):
self._logger.debug("request: %s", path)
if not self._is_cache_valid(path):
while True: # to handle rate limits
try:
@ -95,6 +96,7 @@ class IdentityBrokerClient:
self._cache[path] = json.load(res)
break
self._logger.debug("response: %s", self._cache[path])
return self._cache[path]
def get_credentials_url(self, vendor):
@ -117,7 +119,6 @@ class IdentityBrokerClient:
if region['default']:
self._default_region[vendor] = region['name']
out[None] = region['credentials_url']
return out
@ -128,4 +129,7 @@ class IdentityBrokerClient:
return self._default_region.get(vendor)
def get_credentials(self, vendor, region=None):
if not region:
region = self.get_default_region(vendor)
return self._get(self.get_regions(vendor)[region])

View File

@ -13,9 +13,11 @@ class CloudAdapterInterface:
def sdk(self):
raise NotImplementedError
@property
def regions(self):
raise NotImplementedError
@property
def default_region(self):
raise NotImplementedError
@ -25,11 +27,9 @@ class CloudAdapterInterface:
def session(self, region=None):
raise NotImplementedError
def latest_build_image(self, build_name):
def latest_build_image(self, project, image_key):
raise NotImplementedError
# TODO: be more specific about what gets passed into these
def import_image(self, config):
raise NotImplementedError

View File

@ -1,24 +1,45 @@
# vim: ts=2 et:
# NOTE: If you are using alpine-cloud-images to build public cloud images
# for something/someone other than Alpine Linux, you *MUST* override
# *AT LEAST* the 'project' setting with a unique identifier string value
# via a "config overlay" to avoid image import and publishing collisions.
project = https://alpinelinux.org/cloud
# all build configs start with these
Default {
project = ${project}
# image name/description components
name = [ alpine ]
description = [ "Alpine Linux {release}-r{revision}" ]
description = [ Alpine Linux ]
motd {
welcome = "Welcome to Alpine!"
wiki = \
"The Alpine Wiki contains a large amount of how-to guides and general\n"\
"information about administrating Alpine systems.\n"\
"See <https://wiki.alpinelinux.org/>."
version_notes = "Release Notes:\n"\
"* <https://alpinelinux.org/posts/alpine-{version}.0/released.html>"
release_notes = "* <https://alpinelinux.org/posts/{release}/released.html"
}
# initial provisioning script and data directory
scripts = [ setup ]
script_dirs = [ setup.d ]
# image settings
image.format = qcow2
image.size = 1G
image.login = alpine
size = 1G
login = alpine
local_format = qcow2
}
# profile build matrix
Dimensions {
version {
"3.15" { include required("version/3.15.conf") }
"3.14" { include required("version/3.14.conf") }
"3.13" { include required("version/3.13.conf") }
"3.12" { include required("version/3.12.conf") }
@ -35,32 +56,21 @@ Dimensions {
}
bootstrap {
tiny { include required("bootstrap/tiny.conf") }
# cloudinit { include required("bootstrap/cloudinit.conf") }
}
cloud {
aws { include required("cloud/aws.conf") }
# oci { include required("cloud/oci.conf") }
# gcp { include required("cloud/gcp.conf") }
# azure { include required("cloud/azure.conf") }
}
}
# all build configs merge these at the very end
Mandatory {
description = [ - https://alpinelinux.org/cloud ]
name = [ "r{revision}" ]
description = [ - https://alpine.linux.org/cloud ]
motd {
motd_change = "You may change this message by editing /etc/motd."
}
# final provisioning script
scripts = [ cleanup ]
# override while developing/testing...
aws.publish {
access {
PUBLIC = false
}
regions {
ALL = false
us-west-2 = true # where i'm building
us-east-1 = true # test publishing
}
}
}

View File

@ -1,5 +1,8 @@
# vim: ts=2 et:
name = [x86_64]
# TODO: until we have a image metadata service, let's avoid UEFI
EXCLUDE = [uefi]
qemu.machine_type = null
qemu.args = null

View File

@ -1,2 +0,0 @@
# vim: ts=2 et:
name = [cloudinit]

View File

@ -1,10 +1,11 @@
# vim: ts=2 et:
name = [tiny]
WHEN {
aws {
packages.tiny-ec2-bootstrap = true
packages.tiny-ec2-bootstrap = true
services.default.tiny-ec2-bootstrap = true
scripts = [ setup-tiny ]
scripts = [ setup-tiny ]
script_dirs = [ setup-tiny.d ]
}
}

View File

@ -1,40 +1,23 @@
# vim: ts=2 et:
builder = qemu
builder = qemu
aws {
s3_bucket = alpine-cloud-images
publish {
access {
PUBLIC = true
# alternately...
#PUBLIC = false
# and/or
#<aws-userid> = true
# ...
}
regions {
ALL = true
# alternately...
#<region> = true
# ...
}
}
}
kernel_modules.ena = true
initfs_features.ena = true
access.PUBLIC = true
regions.ALL = true
WHEN {
# Arch
aarch64 {
aws.arch = arm64
# new AWS aarch64 default...
kernel_modules.gpio_pl061 = true
initfs_features.gpio_pl061 = true
WHEN {
"3.14 3.13 3.12" {
# ...but not supported for older versions
kernel_modules.gpio_pl061 = false
initfs_features.gpio_pl061 = false
}
}
}
x86_64 {
aws.arch = x86_64
}
# Firmware
bios {
aws.boot_mode = legacy-bios
}
uefi {
aws.boot_mode = uefi
}
}
}

View File

@ -1,2 +0,0 @@
# vim: ts=2 et:
builder = qemu

View File

@ -1,8 +1,6 @@
# vim: ts=2 et:
name = [bios]
packages {
syslinux = --no-scripts
}
qemu.firmware = null
bootloader = extlinux
packages.syslinux = --no-scripts
qemu.firmware = null

View File

@ -1,8 +1,10 @@
# vim: ts=2 et:
name = [uefi]
bootloader = grub-efi
packages {
grub-efi = --no-scripts
grub-efi = --no-scripts
dosfstools = true
}
WHEN {

View File

@ -0,0 +1,9 @@
# vim: ts=2 et:
include required("base/3.conf")
end_of_life = "2023-11-01"
motd {
sudo_deprecated = "NOTE: 'sudo' has been deprecated, please use 'doas' instead."
}

View File

@ -52,7 +52,6 @@ kernel_modules {
usb-storage = true
ext4 = true
nvme = true
ena = true
}
kernel_options {
@ -62,5 +61,4 @@ kernel_options {
initfs_features {
nvme = true
ena = true
}

View File

@ -5,6 +5,7 @@ include required("1.conf")
packages {
# drop old alpine-mirrors
alpine-mirrors = null
# use iproute2-minimal instead of full iproute2
iproute2 = null
iproute2-minimal = true

View File

@ -3,7 +3,6 @@
include required("2.conf")
packages {
# doas replaces sudo
sudo = null
# doas will officially replace sudo in 3.16
doas = true
}

View File

@ -0,0 +1,8 @@
# vim: ts=2 et:
include required("3.conf")
packages {
# doas officially replaces sudo in 3.16
sudo = false
}

View File

@ -1,6 +1,10 @@
# vim: ts=2 et:
include required("base/3.conf")
include required("base/4.conf")
motd {
sudo_removed = "NOTE: 'sudo' is no longer installed by default, please use 'doas' instead."
}
# clear out inherited repos
repos = null

100
gen_releases.py Executable file
View File

@ -0,0 +1,100 @@
#!/usr/bin/env python3
# vim: ts=4 et:
# Ensure we're using the Python virtual env with our installed dependencies
import os
import sys
import textwrap
NOTE = textwrap.dedent("""
This script's output is meant to be compatible with alpine-ec2-ami's
releases.yaml, in order to bridge the gap until https://alpinelinux.org/cloud
can be updated to be generated from another source, or dynamically calls an
published image metadata service. This script should only be run after
the main 'build' script has been used successfully to publish all images.
""")
sys.pycache_prefix = 'work/__pycache__'
if not os.path.exists('work'):
print('FATAL: Work directory does not exist.', file=sys.stderr)
print(NOTE, file=sys.stderr)
exit(1)
# Re-execute using the right virtual environment, if necessary.
venv_args = [os.path.join('work', 'bin', 'python3')] + sys.argv
if os.path.join(os.getcwd(), venv_args[0]) != sys.executable:
print("Re-executing with work environment's Python...\n", file=sys.stderr)
os.execv(venv_args[0], venv_args)
# We're now in the right Python environment
import argparse
import logging
from collections import defaultdict
from ruamel.yaml import YAML
import clouds
from image_configs import ImageConfigManager
### Constants & Variables
LOGFORMAT = '%(name)s - %(levelname)s - %(message)s'
### Functions
# allows us to set values deep within an object that might not be fully defined
def dictfactory():
return defaultdict(dictfactory)
# undo dictfactory() objects to normal objects
def undictfactory(o):
if isinstance(o, defaultdict):
o = {k: undictfactory(v) for k, v in o.items()}
return o
### Command Line & Logging
parser = argparse.ArgumentParser(description=NOTE)
parser.add_argument(
'--use-broker', action='store_true',
help='use the identity broker to get credentials')
parser.add_argument('--debug', action='store_true', help='enable debug output')
args = parser.parse_args()
log = logging.getLogger('gen_releases')
log.setLevel(logging.DEBUG if args.debug else logging.INFO)
console = logging.StreamHandler(sys.stderr)
console.setFormatter(logging.Formatter(LOGFORMAT))
log.addHandler(console)
log.debug(args)
# set up credential provider, if we're going to use it
if args.use_broker:
clouds.set_credential_provider()
# load build configs
configs = ImageConfigManager(
conf_path='work/configs/images.conf',
yaml_path='work/images.yaml',
log='gen_releases'
)
# make sure images.yaml is up-to-date with reality
configs.refresh_state('final')
yaml = YAML()
releases = dictfactory()
for i_key, i_cfg in configs.get().items():
release = i_cfg.version if i_cfg.version == 'edge' else i_cfg.release
releases[release][i_key][i_cfg.tags.name] = dict(i_cfg.tags) | {
'creation_date': i_cfg.published,
'artifacts': i_cfg.artifacts,
}
yaml.dump(undictfactory(releases), sys.stdout)

View File

@ -3,7 +3,6 @@
import itertools
import logging
import mergedeep
import os
import pyhocon
import shutil
@ -30,7 +29,6 @@ class ImageConfigManager():
self.yaml = YAML()
self.yaml.register_class(ImageConfig)
self.yaml.default_flow_style = False
self.yaml.explicit_start = True
# hide !ImageConfig tag from Packer
self.yaml.representer.org_represent_mapping = self.yaml.representer.represent_mapping
@ -50,8 +48,7 @@ class ImageConfigManager():
# load already-resolved YAML configs, restoring ImageConfig objects
def _load_yaml(self):
# TODO: no warning if we're being called from cloud_helper.py
self.log.warning('Loading existing %s', self.yaml_path)
self.log.info('Loading existing %s', self.yaml_path)
for key, config in self.yaml.load(self.yaml_path).items():
self._configs[key] = ImageConfig(key, config)
@ -74,18 +71,29 @@ class ImageConfigManager():
# set version releases
for v, vcfg in cfg.Dimensions.version.items():
# version keys are quoted to protect dots
self.set_version_release(v.strip('"'), vcfg)
self._set_version_release(v.strip('"'), vcfg)
dimensions = list(cfg.Dimensions.keys())
self.log.debug('dimensions: %s', dimensions)
for dim_keys in (itertools.product(*cfg['Dimensions'].values())):
image_key = '-'.join(dim_keys).replace('"', '')
config_key = '-'.join(dim_keys).replace('"', '')
# dict of dimension -> dimension_key
dim_map = dict(zip(dimensions, dim_keys))
# replace version with release, and make image_key from that
release = cfg.Dimensions.version[dim_map['version']].release
image_config = ImageConfig(image_key, {'release': release} | dim_map)
(rel_map := dim_map.copy())['version'] = release
image_key = '-'.join(rel_map.values())
image_config = ImageConfig(
config_key,
{
'image_key': image_key,
'release': release
} | dim_map
)
# merge in the Default config
image_config._merge(cfg.Default)
@ -93,12 +101,19 @@ class ImageConfigManager():
# merge in each dimension key's configs
for dim, dim_key in dim_map.items():
dim_cfg = deepcopy(cfg.Dimensions[dim][dim_key])
exclude = dim_cfg.pop('EXCLUDE', None)
if exclude and set(exclude) & set(dim_keys):
self.log.debug('%s SKIPPED, %s excludes %s', image_key, dim_key, exclude)
self.log.debug('%s SKIPPED, %s excludes %s', config_key, dim_key, exclude)
skip = True
break
if eol := dim_cfg.get('end_of_life', None):
if self.now > datetime.fromisoformat(eol):
self.log.warning('%s SKIPPED, %s end_of_life %s', config_key, dim_key, eol)
skip = True
break
image_config._merge(dim_cfg)
# now that we're done with ConfigTree/dim_cfg, remove " from dim_keys
@ -122,40 +137,41 @@ class ImageConfigManager():
image_config.qemu['iso_url'] = self.iso_url_format.format(arch=image_config.arch)
# we've resolved everything, add tags attribute to config
self._configs[image_key] = image_config
self._configs[config_key] = image_config
self._save_yaml()
# set current version release
def set_version_release(self, v, c):
def _set_version_release(self, v, c):
if v == 'edge':
c.put('release', self.now.strftime('%Y%m%d'))
c.put('end_of_life', self.tomorrow.strftime('%F'))
else:
c.put('release', get_version_release(f"v{v}")['release'])
# release is also appended to build name array
# release is also appended to name & description arrays
c.put('name', [c.release])
c.put('description', [c.release])
# update current config status
def determine_actions(self, step, only, skip, revise):
self.log.info('Determining Actions')
def refresh_state(self, step, only=[], skip=[], revise=False):
self.log.info('Refreshing State')
has_actions = False
for ic in self._configs.values():
# clear away any previous actions
if hasattr(ic, 'actions'):
delattr(ic, 'actions')
dim_keys = set(ic.image_key.split('-'))
dim_keys = set(ic.config_key.split('-'))
if only and len(set(only) & dim_keys) != len(only):
self.log.debug("%s SKIPPED, doesn't match --only", ic.image_key)
self.log.debug("%s SKIPPED, doesn't match --only", ic.config_key)
continue
if skip and len(set(skip) & dim_keys) > 0:
self.log.debug('%s SKIPPED, matches --skip', ic.image_key)
self.log.debug('%s SKIPPED, matches --skip', ic.config_key)
continue
ic.determine_actions(step, revise)
ic.refresh_state(step, revise)
if not has_actions and len(ic.actions):
has_actions = True
@ -166,8 +182,8 @@ class ImageConfigManager():
class ImageConfig():
def __init__(self, image_key, obj={}):
self.image_key = str(image_key)
def __init__(self, config_key, obj={}):
self.config_key = str(config_key)
tags = obj.pop('tags', None)
self.__dict__ |= self._deep_dict(obj)
# ensure tag values are str() when loading
@ -176,15 +192,19 @@ class ImageConfig():
@property
def local_dir(self):
return os.path.join('work/images', self.name)
return Path('work/images') / self.cloud / self.image_key
@property
def local_path(self):
return os.path.join(self.local_dir, 'image.' + self.local_format)
return self.local_dir / ('image.' + self.local_format)
@property
def published_yaml(self):
return self.local_dir / 'published.yaml'
@property
def image_name(self):
return '-r'.join([self.name, str(self.revision)])
return self.name.format(**self.__dict__)
@property
def image_description(self):
@ -196,19 +216,20 @@ class ImageConfig():
t = {
'arch': self.arch,
'bootstrap': self.bootstrap,
'build_name': self.name,
'build_revision': self.revision,
'cloud': self.cloud,
'description': self.image_description,
'end_of_life': self.end_of_life,
'firmware': self.firmware,
'image_key': self.image_key,
'name': self.image_name,
'project': self.project,
'release': self.release,
'revision': self.revision,
'version': self.version
}
# stuff that might not be there yet
for k in ['imported', 'published', 'source_id', 'source_region']:
if k in self.__dict__:
for k in ['imported', 'import_id', 'import_region', 'published']:
if self.__dict__.get(k, None):
t[k] = self.__dict__[k]
return Tags(t)
@ -246,6 +267,7 @@ class ImageConfig():
# stringify arrays
self.name = '-'.join(self.name)
self.description = ' '.join(self.description)
self._resolve_motd()
self._stringify_repos()
self._stringify_packages()
self._stringify_services()
@ -253,6 +275,26 @@ class ImageConfig():
self._stringify_dict_keys('kernel_options', ' ')
self._stringify_dict_keys('initfs_features', ' ')
def _resolve_motd(self):
# merge version/release notes, as apporpriate
if self.motd.get('version_notes', None) and self.motd.get('release_notes', None):
if self.version == 'edge':
# edge is, by definition, not released
self.motd.pop('version_notes', None)
self.motd.pop('release_notes', None)
elif self.release == self.version + '.0':
# no point in showing the same URL twice
self.motd.pop('release_notes')
else:
# combine version and release notes
self.motd['release_notes'] = self.motd.pop('version_notes') + '\n' + \
self.motd['release_notes']
# TODO: be rid of null values
self.motd = '\n\n'.join(self.motd.values()).format(**self.__dict__)
def _stringify_repos(self):
# stringify repos map
# <repo>: <tag> # @<tag> <repo> enabled
@ -323,13 +365,11 @@ class ImageConfig():
for m, v in self.__dict__[d].items()
)))
# TODO? determine_current_state()
def determine_actions(self, step, revise):
def refresh_state(self, step, revise=False):
log = logging.getLogger('build')
self.revision = 0
# TODO: be more specific about our parameters
self.remote_image = clouds.latest_build_image(self)
actions = {}
revision = 0
remote_image = clouds.latest_build_image(self)
# enable actions based on the specified step
if step in ['local', 'import', 'publish']:
@ -343,50 +383,83 @@ class ImageConfig():
actions['publish'] = True
if revise:
if os.path.exists(self.local_path):
if self.local_path.exists():
# remove previously built local image artifacts
log.warning('Removing existing local image dir %s', self.local_dir)
shutil.rmtree(self.local_dir)
if self.remote_image and 'published' in self.remote_image['tags']:
log.warning('Bumping build revision for %s', self.name)
self.revision = int(self.remote_image['tags']['build_revision']) + 1
if remote_image and remote_image.published:
log.warning('Bumping image revision for %s', self.image_key)
revision = int(remote_image.revision) + 1
elif self.remote_image and 'imported' in self.remote_image['tags']:
elif remote_image and remote_image.imported:
# remove existing imported (but unpublished) image
log.warning('Removing unpublished remote image %s', self.remote_image['id'])
# TODO: be more specific?
clouds.remove_image(self)
log.warning('Removing unpublished remote image %s', remote_image.import_id)
clouds.remove_image(self, remote_image.import_id)
self.remote_image = None
remote_image = None
elif self.remote_image and 'imported' in self.remote_image['tags']:
# already imported, don't build/import again
log.warning('Already imported, skipping build/import')
actions.pop('build', None)
actions.pop('import', None)
elif remote_image:
if remote_image.imported:
# already imported, don't build/import again
log.info('%s - already imported', self.image_key)
actions.pop('build', None)
actions.pop('import', None)
if os.path.exists(self.local_path):
log.warning('Already built, skipping build')
if remote_image.published:
# NOTE: re-publishing can update perms or push to new regions
log.info('%s - already published', self.image_key)
if self.local_path.exists():
# local image's already built, don't rebuild
log.info('%s - already locally built', self.image_key)
actions.pop('build', None)
# set at time of import, carries forward when published
if self.remote_image:
self.end_of_life = self.remote_image['tags']['end_of_life']
self.revision = self.remote_image['tags']['build_revision']
# merge remote_image data into image state
if remote_image:
self.__dict__ |= dict(remote_image)
else:
# default to tomorrow's date if unset
if 'end_of_life' not in self.__dict__:
tomorrow = datetime.utcnow() + timedelta(days=1)
self.end_of_life = tomorrow.strftime('%F')
self.__dict__ |= {
'revision': revision,
'imported': None,
'import_id': None,
'import_region': None,
'published': None,
}
self.end_of_life = self.__dict__.pop(
'end_of_life',
# EOL is tomorrow, if otherwise unset
(datetime.utcnow() + timedelta(days=1)).strftime('%F')
)
# update artifacts, if we've got 'em
artifacts_yaml = self.local_dir / 'artifacts.yaml'
if artifacts_yaml.exists():
yaml = YAML()
self.artifacts = yaml.load(artifacts_yaml)
else:
self.artifacts = None
self.actions = list(actions)
log.info('%s/%s-r%s = %s', self.cloud, self.name, self.revision, self.actions)
log.info('%s/%s = %s', self.cloud, self.image_name, self.actions)
self.state_updated = datetime.utcnow().isoformat()
class Tags(dict):
class DictObj(dict):
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
del self[key]
class Tags(DictObj):
def __init__(self, d={}, from_list=None, key_name='Key', value_name='Value'):
for key, value in d.items():
@ -395,23 +468,9 @@ class Tags(dict):
if from_list:
self.from_list(from_list, key_name, value_name)
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = str(value)
def __delattr__(self, key):
del self[key]
def pop(self, key, default):
value = default
if key in self:
value = self[key]
del self[key]
return value
def as_list(self, key_name='Key', value_name='Value'):
return [{key_name: k, value_name: v} for k, v in self.items()]

View File

@ -0,0 +1,39 @@
# vim: ts=2 et:
# Overlay for testing alpine-cloud-images
# start with the production alpine config
include required("alpine.conf")
# override specific things...
project = alpine-cloud-images__test
Default {
# unset before resetting
name = null
name = [ test ]
description = null
description = [ Alpine Test ]
}
Dimensions {
bootstrap {
# not quite working yet
#cloudinit { include required("testing/cloudinit.conf") }
}
cloud {
# adapters need to be written
#oci { include required("testing/oci.conf") }
#gcp { include required("testing/gcp.conf") }
#azure { include required("testing/azure.conf") }
}
}
# test in private, and only in a couple regions
Mandatory.access.PUBLIC = false
Mandatory.regions = {
ALL = false
us-west-2 = true
us-east-1 = true
}

View File

@ -0,0 +1 @@
alpine-testing.conf

View File

@ -0,0 +1,9 @@
# vim: ts=2 et:
name = [cloudinit]
packages {
cloud-init = true
openssh-server-pam = true
}
scripts = [ setup-cloudinit ]
script_dirs = [ setup-cloudinit.d ]

View File

@ -0,0 +1,4 @@
# vim: ts=2 et:
builder = qemu
# TBD

View File

@ -0,0 +1,27 @@
#!/bin/sh -eu
# vim: ts=4 et:
[ -z "$DEBUG" ] || [ "$DEBUG" = 0 ] || set -x
TARGET=/mnt
#SETUP=/tmp/setup-cloudinit.d
die() {
printf '\033[1;7;31m FATAL: %s \033[0m\n' "$@" >&2 # bold reversed red
exit 1
}
einfo() {
printf '\n\033[1;7;36m> %s <\033[0m\n' "$@" >&2 # bold reversed cyan
}
einfo "Installing up cloud-init bootstrap components..."
# This adds the init scripts at the correct boot phases
chroot "$TARGET" /sbin/setup-cloud-init
# cloud-init locks our user by default which means alpine can't login from
# SSH. This seems like a bug in cloud-init that should be fixed but we can
# hack around it for now here.
if [ -f "$TARGET"/etc/cloud/cloud.cfg ]; then
sed -i '/lock_passwd:/s/True/False/' "$TARGET"/etc/cloud/cloud.cfg
fi

View File

@ -51,7 +51,7 @@ make_filesystem() {
unit MiB print
root_dev="${DEVICE}2"
/usr/sbin/mkfs.fat -n EFI "${DEVICE}1"
mkfs.fat -n EFI "${DEVICE}1"
fi
mkfs.ext4 -O ^64bit -L / "$root_dev"
@ -69,9 +69,12 @@ install_base() {
mkdir -p "$TARGET/etc/apk"
echo "$REPOS" > "$TARGET/etc/apk/repositories"
cp -a /etc/apk/keys "$TARGET/etc/apk"
# shellcheck disable=SC2086
apk --root "$TARGET" --initdb --no-cache add $PACKAGES_ADD
# shellcheck disable=SC2086
[ -z "$PACKAGES_NOSCRIPTS" ] || \
apk --root "$TARGET" --no-cache --no-scripts add $PACKAGES_NOSCRIPTS
# shellcheck disable=SC2086
[ -z "$PACKAGES_DEL" ] || \
apk --root "$TARGET" --no-cache del $PACKAGES_DEL
}
@ -89,10 +92,28 @@ install_bootloader() {
einfo "Installing Bootloader"
# create initfs
# shellcheck disable=SC2046
kernel=$(basename $(find "$TARGET/lib/modules/"* -maxdepth 0))
# ensure features can be found by mkinitfs
for FEATURE in $INITFS_FEATURES; do
# already taken care of?
[ -f "$TARGET/etc/mkinitfs/features.d/$FEATURE.modules" ] || \
[ -f "$TARGET/etc/mkinitfs/features.d/$FEATURE.files" ] && continue
# find the kernel module directory
module=$(chroot "$TARGET" /sbin/modinfo -k "$kernel" -n "$FEATURE")
[ -z "$module" ] && die "initfs_feature '$FEATURE' kernel module not found"
# replace everything after .ko with a *
echo "$module" | cut -d/ -f5- | sed -e 's/\.ko.*/.ko*/' \
> "$TARGET/etc/mkinitfs/features.d/$FEATURE.modules"
done
# TODO? this appends INITFS_FEATURES, we may want to allow removal someday?
sed -Ei "s/^features=\"([^\"]+)\"/features=\"\1 $INITFS_FEATURES\"/" \
"$TARGET/etc/mkinitfs/mkinitfs.conf"
# shellcheck disable=SC2046
chroot "$TARGET" /sbin/mkinitfs $(basename $(find "$TARGET/lib/modules/"* -maxdepth 0))
chroot "$TARGET" /sbin/mkinitfs "$kernel"
if [ "$FIRMWARE" = uefi ]; then
install_grub_efi
@ -174,6 +195,7 @@ configure_system() {
fi
# explicitly lock the root account
chroot "$TARGET" /bin/sh -c "/bin/echo 'root:*' | /usr/sbin/chpasswd -e"
chroot "$TARGET" /usr/bin/passwd -l root
# set up image user
@ -181,7 +203,7 @@ configure_system() {
chroot "$TARGET" /usr/sbin/addgroup "$user"
chroot "$TARGET" /usr/sbin/adduser -h "/home/$user" -s /bin/sh -G "$user" -D "$user"
chroot "$TARGET" /usr/sbin/addgroup "$user" wheel
chroot "$TARGET" /usr/bin/passwd -u "$user"
chroot "$TARGET" /bin/sh -c "echo '$user:*' | /usr/sbin/chpasswd -e"
# modify PS1s in /etc/profile to add user
sed -Ei \
@ -190,6 +212,9 @@ configure_system() {
-e "s/( PS1=')(%m:)/\\1%n@\\2/" \
"$TARGET"/etc/profile
# write /etc/motd
echo "$MOTD" > "$TARGET"/etc/motd
setup_services
}