feat: make UEFI work for x86_64, KubeZero version bump to 1.24

This commit is contained in:
Stefan Reimer 2022-10-31 17:20:05 +01:00
parent 4bb4791a55
commit 8bff1d5b0f
29 changed files with 183 additions and 310 deletions

View File

@ -1,6 +1,6 @@
OVERLAY := $(shell pwd)/overlay
ONLY :=
FILTER := --only 3.16 $(ONLY) --skip aarch64
FILTER := --only 3.16 $(ONLY)
STEP := publish
all: build

View File

@ -1,5 +1,7 @@
*~
*.bak
*.swp
.DS_Store
.vscode/
/work/
releases*yaml

View File

@ -208,10 +208,6 @@ may not) be further partitioned, based on other factors.
The image's primary login user, set to `alpine`.
### `local_format` string
The local VM's disk image format, set to `qcow2`.
### `repos` map
Defines the contents of the image's `/etc/apk/repositories` file. The map's

View File

@ -29,6 +29,11 @@ variable "qemu" {
### Local Data
locals {
# possible actions for the post-processor
actions = [
"build", "upload", "import", "publish", "release"
]
debug_arg = var.DEBUG == 0 ? "" : "--debug"
broker_arg = var.USE_BROKER == 0 ? "" : "--use-broker"
@ -102,8 +107,8 @@ build {
# results
output_directory = "work/images/${B.value.cloud}/${B.value.image_key}"
disk_size = B.value.size
format = B.value.local_format
vm_name = "image.${B.value.local_format}"
format = "qcow2"
vm_name = "image.qcow2"
}
}
@ -181,13 +186,13 @@ build {
# import and/or publish cloud images
dynamic "post-processor" {
for_each = { for b, c in local.configs:
b => c if contains(c.actions, "import") || contains(c.actions, "publish")
b => c if length(setintersection(c.actions, local.actions)) > 0
}
iterator = B
labels = ["shell-local"]
content {
only = [ "qemu.${B.key}", "null.${B.key}" ]
inline = [ for action in ["import", "publish"]:
inline = [ for action in local.actions:
"./cloud_helper.py ${action} ${local.debug_arg} ${local.broker_arg} ${B.key}" if contains(B.value.actions, action)
]
}

View File

@ -48,7 +48,7 @@ from image_configs import ImageConfigManager
### Constants & Variables
STEPS = ['configs', 'state', 'rollback', 'local', 'import', 'publish']
STEPS = ['configs', 'state', 'rollback', 'local', 'upload', 'import', 'publish', 'release']
LOGFORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
WORK_CLEAN = {'bin', 'include', 'lib', 'pyvenv.cfg', '__pycache__'}
WORK_OVERLAYS = ['configs', 'scripts']
@ -198,8 +198,6 @@ def install_qemu_firmware():
firm_bin = os.path.join(firm_dir, f"uefi-{arch}.bin")
os.symlink(bin, firm_bin)
log.info('Padding "%s" to 67108864 bytes', firm_bin)
subprocess.run(['truncate', '-s', '67108864', firm_bin])
### Command Line & Logging
@ -224,8 +222,8 @@ parser.add_argument(
default=[], help='only variants with dimension key(s)')
parser.add_argument(
'--revise', action='store_true',
help='remove existing local/imported image, or bump revision and rebuild'
'if published')
help='remove existing local/uploaded/imported image, or bump revision and '
' rebuild if published or released')
parser.add_argument(
'--use-broker', action='store_true',
help='use the identity broker to get credentials')
@ -252,7 +250,9 @@ console.setFormatter(logfmt)
log.addHandler(console)
log.debug(args)
# TODO: rollback requires --revise
if args.step == 'rollback':
log.warning('"rollback" step enables --revise option')
args.revise = True
# set up credential provider, if we're going to use it
if args.use_broker:

View File

@ -38,7 +38,7 @@ from image_configs import ImageConfigManager
### Constants & Variables
ACTIONS = ['import', 'publish']
ACTIONS = ['build', 'upload', 'import', 'publish', 'release']
LOGFORMAT = '%(name)s - %(levelname)s - %(message)s'
@ -78,13 +78,26 @@ yaml.explicit_start = True
for image_key in args.image_keys:
image_config = configs.get(image_key)
if args.action == 'import':
clouds.convert_image(image_config)
if args.action == 'build':
image_config.convert_image()
elif args.action == 'upload':
# TODO: image_config.upload_image()
pass
elif args.action == 'import':
clouds.import_image(image_config)
#clouds.upload_image(image_config)
elif args.action == 'publish':
# TODO: we should probably always ensure the directory exists
os.makedirs(image_config.local_dir, exist_ok=True)
# TODO: save artifacts to image_config itself
artifacts = clouds.publish_image(image_config)
yaml.dump(artifacts, image_config.artifacts_yaml)
#clouds.release_image(image_config) # sha256, sign, metadata, put in place for downloading
elif args.action == 'release':
pass
# TODO: image_config.release_image() - configurable steps to take on remote host
# save per-image metadata
image_config.save_metadata(upload=(False if args.action =='build' else True))

View File

@ -1,6 +1,6 @@
# vim: ts=4 et:
from . import aws, nocloud # , oci, gcp, azure
from . import aws # , oci, gcp, azure
ADAPTERS = {}
@ -12,7 +12,7 @@ def register(*mods):
ADAPTERS[cloud] = p
register(aws, nocloud) # , oci, azure, gcp)
register(aws) # , oci, azure, gcp)
# using a credential provider is optional, set across all adapters
@ -32,25 +32,13 @@ def latest_build_image(config):
)
def convert_image(config):
return ADAPTERS[config.cloud].convert_image(config)
def import_image(config):
return ADAPTERS[config.cloud].import_image(config)
def remove_image(config, image_id):
return ADAPTERS[config.cloud].remove_image(image_id)
def upload_image(config):
return ADAPTERS[config.cloud].upload_image(config)
def delete_image(config, image_id):
return ADAPTERS[config.cloud].delete_image(image_id)
def publish_image(config):
return ADAPTERS[config.cloud].publish_image(config)
def release_image(config):
return ADAPTERS[config.cloud].release_image(config)

View File

@ -7,7 +7,7 @@ import os
import time
from datetime import datetime
from subprocess import Popen, PIPE, run
from subprocess import run
from .interfaces.adapter import CloudAdapterInterface
from image_configs import Tags, DictObj
@ -110,7 +110,6 @@ class AWSCloudAdapter(CloudAdapterInterface):
# import an image
# NOTE: requires 'vmimport' role with read/write of <s3_bucket>.* and its objects
def import_image(self, ic):
log = logging.getLogger('import')
description = ic.image_description
@ -120,6 +119,7 @@ class AWSCloudAdapter(CloudAdapterInterface):
ec2r = session.resource('ec2')
bucket_name = 'alpine-cloud-images.' + hashlib.sha1(os.urandom(40)).hexdigest()
bucket = s3r.Bucket(bucket_name)
log.info('Creating S3 bucket %s', bucket.name)
bucket.create(
@ -192,7 +192,8 @@ class AWSCloudAdapter(CloudAdapterInterface):
Architecture=self.ARCH[ic.arch],
BlockDeviceMappings=[{
'DeviceName': '/dev/xvda',
'Ebs': {'SnapshotId': snapshot_id, 'VolumeType': 'gp3'}
'Ebs': {'SnapshotId': snapshot_id,
'VolumeType': 'gp3'}
}],
Description=description,
EnaSupport=True,
@ -227,8 +228,8 @@ class AWSCloudAdapter(CloudAdapterInterface):
return self._image_info(image)
# remove an (unpublished) image
def remove_image(self, image_id):
# delete an (unpublished) image
def delete_image(self, image_id):
log = logging.getLogger('build')
ec2r = self.session().resource('ec2')
image = ec2r.Image(image_id)

View File

@ -1,20 +1,10 @@
# vim: ts=4 et:
import logging
from subprocess import Popen, PIPE
class CloudAdapterInterface:
CONVERT_CMD = {
'qcow2': ['ln', '-f'],
'vhd': ['qemu-img', 'convert', '-f', 'qcow2', '-O', 'vpc', '-o', 'force_size=on'],
}
def __init__(self, cloud, cred_provider=None):
self._sdk = None
self._sessions = {}
self._clients = {}
self.cloud = cloud
self.cred_provider = cred_provider
self._default_region = None
@ -37,47 +27,14 @@ class CloudAdapterInterface:
def session(self, region=None):
raise NotImplementedError
def client(self, client, region=None):
raise NotImplementedError
# get information about the latest released image
def latest_build_image(self, project, image_key):
raise NotImplementedError
# convert local QCOW2 to format appropriate for a cloud
def convert_image(self, ic):
log = logging.getLogger('import')
local_path = ic.local_path
image_path = ic.local_dir / ic.image_file
log.info('Converting %s to %s', image_path, image_path)
p = Popen(self.CONVERT_CMD[ic.image_format] + [ic.local_path, ic.image_path], stdout=PIPE, stdin=PIPE, encoding='utf8')
out, err = p.communicate()
if p.returncode:
log.error('Unable to convert %s to %s format (%s)', ic.local_path, ic.image_path, p.returncode)
log.error('EXIT: %d', p.returncode)
log.error('STDOUT:\n%s', out)
log.error('STDERR:\n%s', err)
raise RuntimeError
# import local image to cloud provider
def import_image(self, config):
raise NotImplementedError
# remove unpublished image from cloud provider
def remove_image(self, config, image_id):
def delete_image(self, config, image_id):
raise NotImplementedError
# upload cloud image for testing, if upload_path
def upload_image(self, config):
raise NotImplementedError
# TODO: implement here
# publish image to cloud provider regions
def publish_image(self, config):
raise NotImplementedError
# generate image checksum, save metadata, sign image, make downloadable, if download_path
def release_image(self, config):
raise NotImplementedError
# TODO: implement here!

View File

@ -1,51 +0,0 @@
# NOTE: not meant to be executed directly
# vim: ts=4 et:
import logging
import hashlib
import os
import time
from datetime import datetime
from subprocess import Popen, PIPE, run
from .interfaces.adapter import CloudAdapterInterface
from image_configs import Tags, DictObj
# For NoCloud, this will mostly be a no-op.
class NoCloudAdapter(CloudAdapterInterface):
IMAGE_INFO = [
'revision', 'imported', 'import_id', 'import_region', 'published',
]
# get the latest imported image for a given build name
def latest_build_image(self, project, image_key):
# TODO: get info from permanently published image (if exists)
return None
# import an image
def import_image(self, ic):
# TODO: what exactly should be returned?
return DictObj({
'revision': ic.revision,
'imported': datetime.now(),
# 'import_id': '?',
})
# remove an (unpublished) image
def remove_image(self, image_id):
# TODO: remove image from temporary location
pass
# publish an image
def publish_image(self, ic):
# TODO: what exaclty should be returned? nocloud isn't launchabl.
return {
'generic?': 'url?'
}
def register(cloud, cred_provider=None):
return NoCloudAdapter(cloud, cred_provider)

View File

@ -34,14 +34,12 @@ Default {
size = 1G
login = alpine
local_format = qcow2
image_format = qcow2
# these paths are subject to change, as image downloads are developed
upload_path = "ssh://dev.alpinelinux.org/~tomalok/public_html/alpine-cloud-images"
download_path = "https://dl-cdn.alpinelinux.org/alpine"
#download_path = "https://dev.alpinelinux.org/~tomalok/alpine-cloud-images" # development
remote_path = "{v_version}/cloud/{cloud}"
storage_url = "ssh://tomalok@dev.alpinelinux.org/public_html/alpine-cloud-images/{v_version}/cloud/{cloud}"
download_url = "https://dev.alpinelinux.org/~tomalok/alpine-cloud-images/{v_version}/cloud/{cloud}" # development
#download_url = "https://dl-cdn.alpinelinux.org/alpine/{v_version}/cloud/{cloud}"
# image access
access.PUBLIC = true
@ -72,8 +70,7 @@ Dimensions {
cloudinit { include required("bootstrap/cloudinit.conf") }
}
cloud {
aws { include required("cloud/aws.conf") }
nocloud { include required("cloud/nocloud.conf") }
aws { include required("cloud/aws.conf") }
}
}

View File

@ -2,8 +2,5 @@
name = [x86_64]
arch_name = x86_64
# TODO: until we have a image metadata service, let's avoid UEFI
EXCLUDE = [uefi]
qemu.machine_type = null
qemu.args = null

View File

@ -25,12 +25,6 @@ WHEN {
}
}
}
nocloud {
# tiny-cloud's nocloud support is currently > 3.16
EXCLUDE = ["3.12", "3.13", "3.14", "3.15", "3.16"]
packages.tiny-cloud-nocloud = true
}
# azure.packages.tiny-cloud-azure = true
# gcp.packages.tiny-cloud-gcp = true
# oci.packages.tiny-cloud-oci = true

View File

@ -13,6 +13,7 @@ initfs_features {
ena = true
nvme = true
}
ntp_server = 169.254.169.123
access.PUBLIC = true
@ -34,4 +35,4 @@ WHEN {
}
}
}
}
}

View File

@ -1,5 +0,0 @@
# vim: ts=2 et:
cloud_name = NoCloud
image_format = qcow2
ntp_server = ""

View File

@ -1,6 +1,8 @@
#!/usr/bin/env python3
# vim: ts=4 et:
# TODO: perhaps integrate into "./build release"
# Ensure we're using the Python virtual env with our installed dependencies
import os
import sys

View File

@ -1,109 +0,0 @@
#!/usr/bin/env python3
# vim: ts=4 et:
# Ensure we're using the Python virtual env with our installed dependencies
import os
import sys
import textwrap
NOTE = textwrap.dedent("""
NOTE: This is an old script, replaced by 'gen_mksite_releases.py' after
https://gitlab.alpinelinux.org/alpine/infra/alpine-mksite/-/merge_requests/52
is merged.
This script's output is compatible with the retired alpine-ec2-ami repo's
releases/alpine.yaml, in order to bridge the gap until
https://alpinelinux.org/cloud dynamically calls a published-image metadata
service. This script should only be run after the main 'build' script has
been used successfully to publish ALL images, and the STDOUT should be
committed to the https://gitlab.alpinelinux.org/alpine/infra/alpine-mksite
repo as 'cloud/releases-in.yaml'.
""")
sys.pycache_prefix = 'work/__pycache__'
if not os.path.exists('work'):
print('FATAL: Work directory does not exist.', file=sys.stderr)
print(NOTE, file=sys.stderr)
exit(1)
# Re-execute using the right virtual environment, if necessary.
venv_args = [os.path.join('work', 'bin', 'python3')] + sys.argv
if os.path.join(os.getcwd(), venv_args[0]) != sys.executable:
print("Re-executing with work environment's Python...\n", file=sys.stderr)
os.execv(venv_args[0], venv_args)
# We're now in the right Python environment
import argparse
import logging
from collections import defaultdict
from ruamel.yaml import YAML
import clouds
from image_configs import ImageConfigManager
### Constants & Variables
LOGFORMAT = '%(name)s - %(levelname)s - %(message)s'
### Functions
# allows us to set values deep within an object that might not be fully defined
def dictfactory():
return defaultdict(dictfactory)
# undo dictfactory() objects to normal objects
def undictfactory(o):
if isinstance(o, defaultdict):
o = {k: undictfactory(v) for k, v in o.items()}
return o
### Command Line & Logging
parser = argparse.ArgumentParser(description=NOTE)
parser.add_argument(
'--use-broker', action='store_true',
help='use the identity broker to get credentials')
parser.add_argument('--debug', action='store_true', help='enable debug output')
args = parser.parse_args()
log = logging.getLogger('gen_releases')
log.setLevel(logging.DEBUG if args.debug else logging.INFO)
console = logging.StreamHandler(sys.stderr)
console.setFormatter(logging.Formatter(LOGFORMAT))
log.addHandler(console)
log.debug(args)
# set up credential provider, if we're going to use it
if args.use_broker:
clouds.set_credential_provider()
# load build configs
configs = ImageConfigManager(
conf_path='work/configs/images.conf',
yaml_path='work/images.yaml',
log='gen_releases'
)
# make sure images.yaml is up-to-date with reality
configs.refresh_state('final')
yaml = YAML()
releases = dictfactory()
for i_key, i_cfg in configs.get().items():
if i_cfg.bootstrap != 'tiny':
continue
release = i_cfg.version if i_cfg.version == 'edge' else i_cfg.release
releases[release][i_key][i_cfg.tags.name] = dict(i_cfg.tags) | {
'creation_date': i_cfg.published,
'artifacts': i_cfg.artifacts,
}
yaml.dump(undictfactory(releases), sys.stdout)

View File

@ -1,8 +1,10 @@
# vim: ts=4 et:
import hashlib
import itertools
import logging
import mergedeep
import os
import pyhocon
import shutil
@ -10,6 +12,8 @@ from copy import deepcopy
from datetime import datetime
from pathlib import Path
from ruamel.yaml import YAML
from subprocess import Popen, PIPE
from urllib.parse import urlparse
import clouds
@ -48,7 +52,8 @@ class ImageConfigManager():
def _load_yaml(self):
self.log.info('Loading existing %s', self.yaml_path)
for key, config in self.yaml.load(self.yaml_path).items():
self._configs[key] = ImageConfig(key, config)
self._configs[key] = ImageConfig(key, config, log=self.log, yaml=self.yaml)
# TODO: also pull in additional per-image metatdata from the build process?
# save resolved configs to YAML
def _save_yaml(self):
@ -90,7 +95,9 @@ class ImageConfigManager():
{
'image_key': image_key,
'release': release
} | dim_map
} | dim_map,
log=self.log,
yaml=self.yaml
)
# merge in the Default config
@ -178,7 +185,18 @@ class ImageConfigManager():
class ImageConfig():
def __init__(self, config_key, obj={}):
CONVERT_CMD = {
'qcow2': ['ln', '-f'],
'vhd': ['qemu-img', 'convert', '-f', 'qcow2', '-O', 'vpc', '-o', 'force_size=on'],
}
# these tags may-or-may-not exist at various times
OPTIONAL_TAGS = [
'built', 'uploaded', 'imported', 'import_id', 'import_region', 'published', 'released'
]
def __init__(self, config_key, obj={}, log=None, yaml=None):
self._log = log
self._yaml = yaml
self.config_key = str(config_key)
tags = obj.pop('tags', None)
self.__dict__ |= self._deep_dict(obj)
@ -186,6 +204,18 @@ class ImageConfig():
if tags:
self.tags = tags
@classmethod
def to_yaml(cls, representer, node):
d = {}
for k in node.__dict__:
# don't serialize attributes starting with _
if k.startswith('_'):
continue
d[k] = node.__getattribute__(k)
return representer.represent_mapping('!ImageConfig', d)
@property
def v_version(self):
return 'edge' if self.version == 'edge' else 'v' + self.version
@ -196,11 +226,7 @@ class ImageConfig():
@property
def local_path(self):
return self.local_dir / ('image.' + self.local_format)
@property
def published_yaml(self):
return self.local_dir / 'published.yaml'
return self.local_dir / ('image.qcow2')
@property
def artifacts_yaml(self):
@ -223,14 +249,13 @@ class ImageConfig():
return self.local_dir / self.image_file
@property
def upload_url(self):
return '/'.join([self.upload_path, self.remote_path, self.image_file]).format(v_version=self.v_version, **self.__dict__)
def image_metadata_file(self):
return '.'.join([self.image_name, 'yaml'])
@property
def download_url(self):
return '/'.join([self.download_path, self.remote_path, self.image_file]).format(v_version=self.v_version, **self.__dict__)
def image_metadata_path(self):
return self.local_dir / self.image_metadata_file
# TODO? region_url instead?
def region_url(self, region, image_id):
return self.cloud_region_url.format(region=region, image_id=image_id, **self.__dict__)
@ -255,9 +280,10 @@ class ImageConfig():
'version': self.version
}
# stuff that might not be there yet
for k in ['imported', 'import_id', 'import_region', 'published']:
for k in self.OPTIONAL_TAGS:
if self.__dict__.get(k, None):
t[k] = self.__dict__[k]
return Tags(t)
# recursively convert a ConfigTree object to a dict object
@ -395,8 +421,9 @@ class ImageConfig():
for m, v in self.__dict__[d].items()
)))
# TODO: this needs to be sorted out for 'upload' and 'release' steps
def refresh_state(self, step, revise=False):
log = logging.getLogger('build')
log = self._log
actions = {}
revision = 0
remote_image = clouds.latest_build_image(self)
@ -435,7 +462,7 @@ class ImageConfig():
'Would remove' if step_state else 'Removing',
remote_image.import_id)
if not step_state:
clouds.remove_image(self, remote_image.import_id)
clouds.delete_image(self, remote_image.import_id)
remote_image = None
@ -470,8 +497,8 @@ class ImageConfig():
# update artifacts, if we've got 'em
if self.artifacts_yaml.exists():
yaml = YAML()
self.artifacts = yaml.load(self.artifacts_yaml)
self.artifacts = self.yaml.load(self.artifacts_yaml)
else:
self.artifacts = None
@ -480,6 +507,50 @@ class ImageConfig():
self.state_updated = datetime.utcnow().isoformat()
def _run(self, cmd, errmsg=None, errvals=[]):
log = self._log
p = Popen(cmd, stdout=PIPE, stdin=PIPE, encoding='utf8')
out, err = p.communicate()
if p.returncode:
if log:
if errmsg:
log.error(errmsg, *errvals)
log.error('COMMAND: %s', ' '.join(cmd))
log.error('EXIT: %d', p.returncode)
log.error('STDOUT:\n%s', out)
log.error('STDERR:\n%s', err)
raise RuntimeError
return out, err
def _save_checksum(self, file):
self._log.info("Calculating checksum for '%s'", file)
sha256_hash = hashlib.sha256()
with open(file, 'rb') as f:
for block in iter(lambda: f.read(4096), b''):
sha256_hash.update(block)
with open(str(file) + '.sha256', 'w') as f:
print(sha256_hash.hexdigest(), file=f)
# convert local QCOW2 to format appropriate for a cloud
def convert_image(self):
self._log.info('Converting %s to %s', self.local_path, self.image_path)
self._run(
self.CONVERT_CMD[self.image_format] + [self.local_path, self.image_path],
errmsg='Unable to convert %s to %s', errvals=[self.local_path, self.image_path],
)
self._save_checksum(self.image_path)
self.built = datetime.utcnow().isoformat()
def save_metadata(self, upload=True):
os.makedirs(self.local_dir, exist_ok=True)
self._log.info('Saving image metadata')
self._yaml.dump(dict(self.tags), self.image_metadata_path)
self._save_checksum(self.image_metadata_path)
class DictObj(dict):

View File

@ -1,3 +1,4 @@
# vim: ts=2 et:
builder = qemu
# TBD

View File

@ -26,9 +26,6 @@ case "$CLOUD" in
aws)
DATASOURCE="Ec2"
;;
nocloud)
DATASOURCE="NoCloud"
;;
*)
echo "Unsupported Cloud '$CLOUD'" >&2
exit 1

View File

@ -4,12 +4,12 @@ set -x
echo "Are you really sure as AMIs might be used by customers !!"
read
#TAG_FILTER="Name=tag:project,Values=zdt-alpine"
TAG_FILTER="Name=tag:Name,Values=zdt-alpine-3.16.2-x86_64-bios-tiny-minimal-r1"
#TAG_FILTER="Name=tag:Name,Values=zdt-alpine-3.16.2-x86_64-bios-tiny-kubezero-1.23.10-r0"
TAG_FILTER="Name=tag:Name,Values=*-uefi-*"
# TAG_FILTER="Name=tag:Name,Values=zdt-alpine-3.16.2-x86_64-bios-tiny-kubezero-1.23.10-r0"
# TAG_FILTER="Name=tag:Name,Values=zdt-alpine-3.16.2-x86_64-bios-tiny-minimal-r2"
#for r in $(aws ec2 describe-regions --query "Regions[].{Name:RegionName}" --output text); do
for r in ap-southeast-2 ca-central-1 eu-central-1 us-east-1 us-west-1 us-west-2; do
for r in eu-central-1 us-west-2 ap-southeast-2 ca-central-1 us-east-1 us-west-1; do
amis=$(aws ec2 describe-images --region $r --owners self --output json --filters $TAG_FILTER | jq -r '.Images[].ImageId')
for a in $amis; do
aws ec2 deregister-image --region $r --image-id $a && echo "Deleted AMI $a in $r"

View File

@ -16,6 +16,7 @@ dhclient = true
monit = true
busybox-extras = true
tcpdump = true
uuidgen = true
neofetch = edge-community
tiny-cloud = edge-main
tiny-cloud-openrc = edge-main

View File

@ -16,4 +16,11 @@ WHEN {
py3-boto3 = true
}
}
# Disable KVM during cross build
aarch64 {
qemu.args = [
[-machine, "type=virt"],
]
}
}

View File

@ -5,7 +5,7 @@ include required("common.conf")
packages { include required("kubezero-packages.conf") }
description = [ "- https://kubezero.com" ]
name = [ kubezero-1.23.11 ]
name = [ kubezero-1.24.7 ]
# size = 2G
WHEN {

View File

@ -5,7 +5,7 @@ project = zdt-alpine
# all build configs start with these
Default {
project = ${project}
kubeversion = 1.23
kubeversion = 1.24
# image name/description components
name = [ zdt-alpine ]
@ -35,10 +35,10 @@ Dimensions {
}
arch {
x86_64 { include required("arch/x86_64.conf") }
# aarch64 { include required("arch/aarch64.conf") }
aarch64 { include required("arch/aarch64.conf") }
}
firmware {
bios { include required("firmware/bios.conf") }
#bios { include required("firmware/bios.conf") }
uefi { include required("firmware/uefi.conf") }
}
bootstrap {
@ -68,6 +68,9 @@ Mandatory {
name = [ "r{revision}" ]
encrypted = "alias/zdt/amis"
# We use neofetch custom branding
motd = {}
# final provisioning script
scripts = [ cleanup ]

View File

@ -10,13 +10,13 @@ TARGET=/mnt
echo "@kubezero https://cdn.zero-downtime.net/alpine/v${VERSION}/kubezero" >> "$TARGET/etc/apk/repositories"
wget -q -O $TARGET/etc/apk/keys/stefan@zero-downtime.net-61bb6bfb.rsa.pub https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub
# Install custom sysctl settings
cp $SETUP/zdt-sysctl.conf $TARGET/etc/sysctl.d/60-zdt.conf
# Install fluent-bit
# Install ZDT packages here after repo is available
apk -U --root "$TARGET" --no-cache add \
fluent-bit@kubezero
# Install custom sysctl settings
cp $SETUP/zdt-sysctl.conf $TARGET/etc/sysctl.d/60-zdt.conf
# Fix dhcp to set MTU properly
install -o root -g root -Dm644 -t $TARGET/etc/dhcp $SETUP/dhclient.conf
echo 'Setup dhclient'
@ -67,7 +67,6 @@ mv $TARGET/etc/profile.d/color_prompt.sh.disabled $TARGET/etc/profile.d/color_pr
echo 'alias rs="doas bash"' > $TARGET/etc/profile.d/alias.sh
# branding
rm -f $TARGET/etc/motd
cp $SETUP/neofetch.conf $TARGET/etc/neofetch.conf
cp $SETUP/zdt-ascii.txt $TARGET/etc/neofetch-logo.txt
echo '[ -n "$SSH_TTY" -a "$SHLVL" -eq 1 ] && neofetch --config /etc/neofetch.conf' > $TARGET/etc/profile.d/motd.sh

View File

@ -6,7 +6,7 @@
SETUP=/tmp/setup.d
TARGET=/mnt
KUBE_VERSION=1.23
KUBE_VERSION=1.24
AWS_IAM_VERSION=0.5.9
apk -U --root "$TARGET" --no-cache add \
@ -15,9 +15,14 @@ apk -U --root "$TARGET" --no-cache add \
kubelet@kubezero=~$KUBE_VERSION \
kubectl@kubezero=~$KUBE_VERSION \
ecr-credential-provider@kubezero=~$KUBE_VERSION \
aws-iam-authenticator@kubezero=~$AWS_IAM_VERSION \
aws-neuron-driver@kubezero \
nvidia-open-gpu@kubezero
aws-iam-authenticator@kubezero=~$AWS_IAM_VERSION
# Only install custom kernel modules for X86_64
if [ "$ARCH" == "x86_64" ]; then
apk -U --root "$TARGET" --no-cache add \
aws-neuron-driver@kubezero \
nvidia-open-gpu@kubezero
fi
# Pre-load container images
# echo 'Pre-loaded Kubernetes control container images'

View File

@ -1,5 +1,5 @@
print_info() {
echo -e "\n\n"
echo -e "\n"
prin "$(color 1)Welcome to Alpine - ZeroDownTime edition"
echo
@ -11,6 +11,7 @@ print_info() {
info title
info underline
info "OS" distro
info "Host" model
info "Kernel" kernel
info "Uptime" uptime
@ -27,7 +28,7 @@ print_info() {
info underline
}
title_fqdn="on"
title_fqdn="off"
memory_percent="on"
colors=(1 2 15 15 15 15)
image_source="/etc/neofetch-logo.txt"

View File

@ -1,10 +1,10 @@
qemu = {
"boot_wait": {
"aarch64": "15s",
"x86_64": "15s"
"aarch64": "2m",
"x86_64": "30s"
}
cmd_wait = "5s"
ssh_timeout = "20s"
ssh_timeout = "1m"
memory = 1024
}