feat: First WIP of Alpine 3.20.1 minimal

This commit is contained in:
Stefan Reimer 2024-07-15 12:52:22 +00:00
parent fc9a5b3eae
commit 35810a9dd3
24 changed files with 195 additions and 157 deletions

View File

@ -1,16 +1,22 @@
OVERLAY := $(shell pwd)/overlay
ONLY :=
SKIP :=
FILTER := --only $(ONLY) --skip aarch64 $(SKIP)
STEP := publish
ifneq ($(ONLY),)
FONLY := --only $(ONLY)
endif
ifneq ($(SKIP),)
FSKIP := --skip $(SKIP)
endif
all: build
build:
cd alpine-cloud-images && ./build $(STEP) --clean --pad-uefi-bin-arch '' --revise $(FILTER) --custom $(OVERLAY)/zdt --vars $(OVERLAY)/zdt/zdt.hcl
cd alpine-cloud-images && ./build $(STEP) --clean --pad-uefi-bin-arch '' --revise $(FONLY) $(FSKIP) --custom $(OVERLAY)/zdt --vars $(OVERLAY)/zdt/zdt.hcl
clean:
rm -rf alpine-cloud-images/work
rm -rf ~/tmp/alpine-cloud-images
# Adds all tracked encrypted files to .gitignore as safety net
age-add-gitignore:

View File

@ -1,6 +1,6 @@
# vim: ts=4 et:
from . import aws, nocloud, azure, gcp, oci
from . import aws, nocloud, azure, gcp, oci, generic
ADAPTERS = {}
@ -14,10 +14,11 @@ def register(*mods):
register(
aws, # well-tested and fully supported
nocloud, # beta, but supported
azure, # alpha, needs testing, lacks import and publish
gcp, # alpha, needs testing, lacks import and publish
oci, # alpha, needs testing, lacks import and publish
nocloud, # beta, supported, lacks import and publish
azure, # beta, supported, lacks import and publish
gcp, # beta, supported, lacks import and publish
oci, # beta, supported, lacks import and publish
generic, # alpha, needs testing, lacks import and publish
)

View File

@ -5,6 +5,7 @@ import logging
import hashlib
import os
import subprocess
import sys
import time
from datetime import datetime
@ -94,24 +95,14 @@ class AWSCloudAdapter(CloudAdapterInterface):
return sorted(
ec2r.images.filter(**req), key=lambda k: k.creation_date, reverse=True)
# necessary cloud-agnostic image info
# TODO: still necessary? maybe just incoroporate into new latest_imported_tags()?
def _image_info(self, i):
tags = ImageTags(from_list=i.tags)
return DictObj({k: tags.get(k, None) for k in self.IMAGE_INFO})
# TODO: deprectate/remove
# get the latest imported image's tags for a given build key
def get_latest_imported_tags(self, project, image_key):
def _get_this_image(self, ic, region=None):
images = self._get_images_with_tags(
project=project,
image_key=image_key,
ic.project,
ic.image_key,
tags={'revision': ic.revision},
region=region
)
if images:
# first one is the latest
return ImageTags(from_list=images[0].tags)
return None
return None if not images else images[0]
# import an image
# NOTE: requires 'vmimport' role with read/write of <s3_bucket>.* and its objects
@ -256,27 +247,24 @@ class AWSCloudAdapter(CloudAdapterInterface):
# publish an image
def publish_image(self, ic):
log = logging.getLogger('publish')
source_image = self.get_latest_imported_tags(
ic.project,
ic.image_key,
)
# TODO: might be the wrong source image?
if not source_image or source_image.name != ic.tags.name:
log.warning('No source image for %s, reimporting', ic.tags.name)
# TODO: try importing it again?
self.import_image(ic, log)
source_image = self.get_latest_imported_tags(
ic.project,
ic.image_key,
)
if not source_image or source_image.name != ic.tags.name:
log.error('No source image for %s', ic.tags.name)
raise RuntimeError('Missing source image')
source_id = source_image.import_id
source = self._get_this_image(ic)
source_image = None if not source else ImageTags(from_list=source.tags)
if not source or source_image.name != ic.tags.name:
if not source:
log.warning('No source AMI for %s, reimporting...', ic.tags.name)
elif source_image.name != ic.tags.name:
log.warning('Unexpected source AMI name for %s (%s), reimporting...', ic.tags.name, source_image.name)
self.import_image(ic, log)
source = self._get_this_image(ic)
source_image = None if not source else ImageTags(from_list=source.tags)
if not source or source_image.name != ic.tags.name:
raise RuntimeError('Unable to reimport source AMI')
ic.tags.source_id = source.id # it might have been updated with a re/import!
source_region = source_image.import_region
log.info('Publishing source: %s/%s', source_region, source_id)
source = self.session().resource('ec2').Image(source_id)
log.info('Publishing source: %s/%s', source_region, source.id)
# we may be updating tags, get them from image config
tags = ic.tags
@ -308,21 +296,15 @@ class AWSCloudAdapter(CloudAdapterInterface):
log.warning('Skipping unsubscribed AWS region %s', r)
continue
images = self._get_images_with_tags(
region=r,
project=ic.project,
image_key=ic.image_key,
tags={'revision': ic.revision}
)
if images:
image = images[0]
image = self._get_this_image(ic, region=r)
if image:
log.info('%s: Already exists as %s', r, image.id)
else:
ec2c = self.session(r).client('ec2')
copy_image_opts = {
'Description': source.description,
'Name': source.name,
'SourceImageId': source_id,
'SourceImageId': source.id,
'SourceRegion': source_region,
'Encrypted': True if ic.encrypted else False,
}
@ -343,6 +325,7 @@ class AWSCloudAdapter(CloudAdapterInterface):
artifacts = {}
copy_wait = 180
over_quota = {}
while len(artifacts) < len(publishing):
for r, image in publishing.items():
if r not in artifacts:
@ -377,12 +360,17 @@ class AWSCloudAdapter(CloudAdapterInterface):
if perms['groups'] or perms['users']:
log.info('%s: Applying launch perms to %s', r, image.id)
image.reset_attribute(Attribute='launchPermission')
try:
image.modify_attribute(
Attribute='launchPermission',
OperationType='add',
UserGroups=perms['groups'],
UserIds=perms['users'],
)
except Exception:
log.error('%s: Unable to apply launch perms to %s', r, image.id)
over_quota[r] = True
# defer raising exception until later
# set up AMI deprecation
ec2c = image.meta.client
@ -407,6 +395,12 @@ class AWSCloudAdapter(CloudAdapterInterface):
time.sleep(copy_wait)
copy_wait = 30
if over_quota:
# don't block release for other regions, but try to call this out more visibly
print("\n==> ALERT -- Unable to Make AMIs Public...", file=sys.stderr)
for r in over_quota.keys():
print(f"----> {r}", file=sys.stderr)
# update image config with published information
ic.artifacts = artifacts
ic.published = datetime.utcnow().isoformat()

View File

@ -0,0 +1,21 @@
from .interfaces.adapter import CloudAdapterInterface
# NOTE: Generic images are never imported or published because there's
# no actual cloud provider associated with them.
class GenericAdapter(CloudAdapterInterface):
def get_latest_imported_tags(self, project, image_key):
return None
def import_image(self, ic):
pass
def delete_image(self, config, image_id):
pass
def publish_image(self, ic):
pass
def register(cloud, cred_provider=None):
return GenericAdapter(cloud, cred_provider)

View File

@ -59,10 +59,10 @@ Default {
# profile build matrix
Dimensions {
version {
"3.20" { include required("version/3.20.conf") }
"3.19" { include required("version/3.19.conf") }
"3.18" { include required("version/3.18.conf") }
"3.17" { include required("version/3.17.conf") }
"3.16" { include required("version/3.16.conf") }
edge { include required("version/edge.conf") }
}
arch {
@ -77,6 +77,10 @@ Dimensions {
tiny { include required("bootstrap/tiny.conf") }
cloudinit { include required("bootstrap/cloudinit.conf") }
}
machine {
vm { include required("machine/vm.conf") }
metal { include required("machine/metal.conf") }
}
cloud {
aws { include required("cloud/aws.conf") }
# considered beta...
@ -84,6 +88,8 @@ Dimensions {
azure { include required("cloud/azure.conf") }
gcp { include required("cloud/gcp.conf") }
oci { include required("cloud/oci.conf") }
# considered alpha...
generic { include required("cloud/generic.conf") }
}
}

View File

@ -3,7 +3,7 @@ name = [cloudinit]
bootstrap_name = cloud-init
bootstrap_url = "https://cloud-init.io"
disk_size = [64]
disk_size = [80] # include space for optional APKs
# start cloudinit images with 3.15
EXCLUDE = ["3.12", "3.13", "3.14"]

View File

@ -0,0 +1,12 @@
# vim: ts=2 et:
cloud_name = Generic (alpha)
image_format = qcow2
EXCLUDE = [
# start with 3.20
"3.12", "3.13", "3.14", "3.15", "3.16", "3.17", "3.18", "3.19"
# only cloud-init tries to autodetect the cloud
"tiny"
]
ntp_server = ""

View File

@ -0,0 +1,15 @@
# vim: ts=2 et:
name = [metal]
machine_name = "Bare Metal"
# support on 3.20 and later
EXCLUDE = [ "3.19", "3.18", "3.17", "3.16", "3.15", "3.14", "3.13", "3.12" ]
# linux-lts is a little bigger
disk_size = [160] # TODO: is this right?
packages.linux-virt = null
packages.linux-lts = true
packages.linux-firmware-none = true # specific firmware is per-cloud
# TODO: other kernel_modules, kernel_options, or initfs_features?

View File

@ -0,0 +1,5 @@
# vim: ts=2 et:
#name = [vm] # TODO: include in name for new releases
machine_name = "Virtual"
# all image defaults are for virutal machines

View File

@ -0,0 +1,7 @@
# vim: ts=2 et:
include required("base/5.conf")
motd {
sudo_removed = "NOTE: 'sudo' is not installed by default, please use 'doas' instead."
}

View File

@ -110,9 +110,10 @@ for i_key, i_cfg in configs.get().items():
arch = i_cfg.arch
firmware = i_cfg.firmware
bootstrap = i_cfg.bootstrap
machine = i_cfg.machine
cloud = i_cfg.cloud
# key on "variant" (but do not include cloud!)
variant = f"{release} {arch} {firmware} {bootstrap}"
variant = f"{release} {arch} {firmware} {bootstrap} {machine}"
if cloud not in filters['clouds']:
filters['clouds'][cloud] = {
@ -138,6 +139,12 @@ for i_key, i_cfg in configs.get().items():
'bootstrap_name': i_cfg.bootstrap_name,
}
if machine not in filters['machines']:
filters['machines'][machine] = {
'machine': machine,
'machine_name': i_cfg.machine_name,
}
versions[version] |= {
'version': version,
'release': release,
@ -148,6 +155,7 @@ for i_key, i_cfg in configs.get().items():
'arch': arch,
'firmware': firmware,
'bootstrap': bootstrap,
'machine': machine,
#'released': i_cfg.released.split('T')[0], # just the date
'released': released
}
@ -184,7 +192,7 @@ log.info('Making data mustache-compatible')
# convert filters to mustache-compatible format
data['filters'] = {}
for f in ['clouds', 'regions', 'archs', 'firmwares', 'bootstraps']:
for f in ['clouds', 'regions', 'archs', 'firmwares', 'bootstraps', 'machines']:
data['filters'][f] = [
filters[f][k] for k in filters[f] # order as they appear in work/images.yaml
]

View File

@ -126,6 +126,7 @@ for region in sorted(regions):
# parse image name for more information
# NOTE: we can't rely on tags, because they may not have been set successfully
# TODO: but we should use them if we got them!
m = RE_STUFF.search(name)
if not m:
log.error(f'!PARSE\t{region}\t{id}\t{name}')
@ -145,6 +146,7 @@ for region in sorted(regions):
eol = None # we don't know for sure, unless we have a deprecation time
if image.deprecation_time:
eol = time.strptime(image.deprecation_time, '%Y-%m-%dT%H:%M:%S.%fZ') < now
# TODO: when did we start setting deprecation time?
# keep track of images
data[region]['images'][id] = {

View File

@ -18,23 +18,12 @@ Default {
}
Dimensions {
cloud {
# add a machine type dimension
machine {
vm { include required("machine/vm.conf") }
metal { include required("machine/metal.conf") }
}
# just test in these regions
cloud {
aws.regions {
us-west-2 = true
us-east-1 = true
}
# adapters need to be written
#oci { include required("testing/oci.conf") }
#gcp { include required("testing/gcp.conf") }
#azure { include required("testing/azure.conf") }
#generic
#nocloud
}
}

View File

@ -1,9 +0,0 @@
# bare metal
name = ["metal"]
machine_name = "Bare Metal"
packages.linux-virt = null
packages.linux-lts = true
# TODO: other kernel_modules, kernel_options, or initfs_features?

View File

@ -1,4 +0,0 @@
#name = [vm] # don't append anything to the name
machine_name = "Virtual"
# all image defaults are for virutal machines

View File

@ -1,4 +0,0 @@
# vim: ts=2 et:
builder = qemu
# TBD

View File

@ -38,11 +38,16 @@ case "$CLOUD" in
oci)
DATASOURCE="Oracle"
;;
generic)
# default is to autodetect
;;
*)
echo "Unsupported Cloud '$CLOUD'" >&2
exit 1
;;
esac
printf '\n\n# Cloud-Init will use default configuration for this DataSource\n'
printf 'datasource_list: ["%s"]\n' "$DATASOURCE" >> "$TARGET"/etc/cloud/cloud.cfg
if [ "$CLOUD" != "generic" ]; then
printf '\n\n# Cloud-Init will use default configuration for this DataSource\n'
printf 'datasource_list: ["%s"]\n' "$DATASOURCE" >> "$TARGET"/etc/cloud/cloud.cfg
fi

View File

@ -1,7 +1,7 @@
#!/bin/bash
# set -x
TAG_FILTER="Name=tag:Name,Values=zdt-alpine-3.18*" #-dev" #kubezero*"
TAG_FILTER="Name=tag:Name,Values=zdt-alpine-3.20*dev*" #-dev" #kubezero*"
#TAG_FILTER="Name=tag:Name,Values=zdt-alpine-*kubezero*1.28*"
echo "Are you really sure to delete AMIs matching \"$TAG_FILTER\" ?"

View File

@ -32,6 +32,33 @@ WHEN {
}
}
vm {
services {
boot {
# Fails on cloudhypervisor anyways
hwclock = null
}
}
}
metal {
EXCLUDE = ["aws"]
packages {
mdadm = true
pciutils = true
}
initfs_features {
xfs = true
nvme = true
raid = true
}
kernel_modules {
xfs = true
}
}
# Disable KVM during cross build
aarch64 {
qemu.args = [

View File

@ -0,0 +1,3 @@
default {
falco = true
}

View File

@ -3,6 +3,7 @@
include required("common.conf")
packages { include required("minimal-packages.conf") }
services { include required("minimal-services.conf") }
description = [ "- https://zero-downtime.net/cloud" ]
name = [ minimal ]

View File

@ -1,33 +0,0 @@
# bare metal
name = ["metal"]
machine_name = "Bare Metal"
# disk_size = [1024]
EXCLUDE = ["aws"]
packages {
linux-virt = null
linux-lts = true
falco-kernel-lts = kubezero
# Keep the image minimal, add firmware as needed
linux-firmware-none = true
mdadm = true
pciutils = true
}
initfs_features {
xfs = true
nvme = true
raid = true
}
kernel_modules {
xfs = true
}

View File

@ -1,13 +0,0 @@
name = [vm] # we need a name to be able to skip
machine_name = "Virtual"
packages {
falco-kernel-virt = kubezero
}
services {
boot {
# Fails on cloudhypervisor anyways
hwclock = null
}
}

View File

@ -40,16 +40,16 @@ Default {
Dimensions {
version {
"3.19" { include required("version/3.19.conf")
"3.20" { include required("version/3.20.conf")
repos {
"https://cdn.zero-downtime.net/alpine/v3.19/kubezero" = kubezero
"https://cdn.zero-downtime.net/alpine/v3.20/kubezero" = kubezero
}
}
# edge { include required("version/edge.conf") }
}
arch {
x86_64 { include required("arch/x86_64.conf") }
aarch64 { include required("arch/aarch64.conf") }
#aarch64 { include required("arch/aarch64.conf") }
}
firmware {
@ -62,16 +62,21 @@ Dimensions {
tiny { include required("bootstrap/tiny.conf") }
}
machine {
vm { include required("machine/vm.conf") }
metal { include required("machine/metal.conf") }
}
cloud {
aws { include required("cloud/aws.conf") }
aws.regions {
ALL = false
ap-southeast-2 = true
ca-central-1 = true
#ap-southeast-2 = true
#ca-central-1 = true
eu-central-1 = true
us-east-1 = true
us-west-1 = true
us-west-2 = true
#us-east-1 = true
#us-west-1 = true
#us-west-2 = true
}
nocloud { include required("cloud/nocloud.conf") }
}
@ -80,18 +85,12 @@ Dimensions {
minimal { include required("edition/minimal.conf") }
kubezero { include required("edition/kubezero.conf") }
}
machine {
vm { include required("machine/vm.conf") }
metal { include required("machine/metal.conf") }
}
}
# all build configs merge these at the very end
Mandatory {
# name = [ "dev", "r{revision}" ]
name = [ "r{revision}" ]
name = [ "dev", "r{revision}" ]
#name = [ "r{revision}" ]
encrypted = "alias/zdt/amis"
# We use neofetch custom branding