feat: latest Alpine 3.16.2, new upstream alpine-cloud-images patches, latest tiny-cloud, KubeZero 1.23

This commit is contained in:
Stefan Reimer 2022-08-17 17:20:49 +02:00
parent 5ac29a4421
commit 17a837716d
34 changed files with 533 additions and 142 deletions

View File

@ -1,6 +1,6 @@
OVERLAY := $(shell pwd)/overlay
ONLY :=
FILTER := --only 3.15 $(ONLY) --skip aarch64
FILTER := --only 3.16 $(ONLY) --skip aarch64
STEP := publish
all: build

View File

@ -282,10 +282,6 @@ not they're included in the final list...
| `false` | skip |
| `true` | include |
### `builder` string
The Packer builder that's used to build images. This is set to `qemu`.
### `qemu.machine_type` string
The QEMU machine type to use when building local images. For x86_64, this is

View File

@ -80,7 +80,7 @@ build {
# QEMU builder
dynamic "source" {
for_each = { for b, c in local.configs:
b => c if contains(c.actions, "build") && c.builder == "qemu"
b => c if contains(c.actions, "build")
}
iterator = B
labels = ["qemu.alpine"] # links us to the base source
@ -129,7 +129,7 @@ build {
iterator = B
labels = ["file"]
content {
only = [ "${B.value.builder}.${B.key}" ] # configs specific to one build
only = [ "qemu.${B.key}" ] # configs specific to one build
sources = [ for d in B.value.script_dirs: "work/scripts/${d}" ]
destination = "/tmp/"
@ -144,7 +144,7 @@ build {
iterator = B
labels = ["shell"]
content {
only = [ "${B.value.builder}.${B.key}" ] # configs specific to one build
only = [ "qemu.${B.key}" ] # configs specific to one build
scripts = [ for s in B.value.scripts: "work/scripts/${s}" ]
use_env_var_file = true
@ -186,7 +186,7 @@ build {
iterator = B
labels = ["shell-local"]
content {
only = [ "${B.value.builder}.${B.key}", "null.${B.key}" ]
only = [ "qemu.${B.key}", "null.${B.key}" ]
inline = [ for action in ["import", "publish"]:
"./cloud_helper.py ${action} ${local.debug_arg} ${local.broker_arg} ${B.key}" if contains(B.value.actions, action)
]

View File

@ -190,7 +190,7 @@ def install_qemu_firmware():
p = Popen(tar_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = p.communicate(input=data)
if p.returncode:
log.critical('Unable to untar %s to get %s', ovmf_url, bin)
log.critical('Unable to untar %s to get %s', apk_url, bin)
log.error('%s = %s', p.returncode, ' '.join(tar_cmd))
log.error('STDOUT:\n%s', out.decode('utf8'))
log.error('STDERR:\n%s', err.decode('utf8'))

View File

@ -79,9 +79,12 @@ for image_key in args.image_keys:
image_config = configs.get(image_key)
if args.action == 'import':
clouds.convert_image(image_config)
clouds.import_image(image_config)
#clouds.upload_image(image_config)
elif args.action == 'publish':
os.makedirs(image_config.local_dir, exist_ok=True)
artifacts = clouds.publish_image(image_config)
yaml.dump(artifacts, Path(image_config.local_dir) / 'artifacts.yaml')
yaml.dump(artifacts, image_config.artifacts_yaml)
#clouds.release_image(image_config) # sha256, sign, metadata, put in place for downloading

View File

@ -1,6 +1,6 @@
# vim: ts=4 et:
from . import aws # , oci, gcp, azure
from . import aws, nocloud # , oci, gcp, azure
ADAPTERS = {}
@ -12,7 +12,7 @@ def register(*mods):
ADAPTERS[cloud] = p
register(aws) # , oci, azure, gcp)
register(aws, nocloud) # , oci, azure, gcp)
# using a credential provider is optional, set across all adapters
@ -32,6 +32,10 @@ def latest_build_image(config):
)
def convert_image(config):
return ADAPTERS[config.cloud].convert_image(config)
def import_image(config):
return ADAPTERS[config.cloud].import_image(config)
@ -40,5 +44,13 @@ def remove_image(config, image_id):
return ADAPTERS[config.cloud].remove_image(image_id)
def upload_image(config):
return ADAPTERS[config.cloud].upload_image(config)
def publish_image(config):
return ADAPTERS[config.cloud].publish_image(config)
def release_image(config):
return ADAPTERS[config.cloud].release_image(config)

View File

@ -22,9 +22,6 @@ class AWSCloudAdapter(CloudAdapterInterface):
'secret_key': 'aws_secret_access_key',
'session_token': 'aws_session_token',
}
CONVERT_CMD = (
'qemu-img', 'convert', '-f', 'qcow2', '-O', 'vpc', '-o', 'force_size=on'
)
ARCH = {
'aarch64': 'arm64',
'x86_64': 'x86_64',
@ -113,22 +110,9 @@ class AWSCloudAdapter(CloudAdapterInterface):
# import an image
# NOTE: requires 'vmimport' role with read/write of <s3_bucket>.* and its objects
def import_image(self, ic):
log = logging.getLogger('import')
image_path = ic.local_path
image_aws = ic.local_dir / 'image.vhd'
name = ic.image_name
description = ic.image_description
# convert QCOW2 to VHD
log.info('Converting %s to VHD format', image_path)
p = Popen(self.CONVERT_CMD + (image_path, image_aws), stdout=PIPE, stdin=PIPE, encoding='utf8')
out, err = p.communicate()
if p.returncode:
log.error('Unable to convert %s to VHD format (%s)', image_path, p.returncode)
log.error('EXIT: %d', p.returncode)
log.error('STDOUT:\n%s', out)
log.error('STDERR:\n%s', err)
raise RuntimeError
log = logging.getLogger('import')
description = ic.image_description
session = self.session()
s3r = session.resource('s3')
@ -136,39 +120,33 @@ class AWSCloudAdapter(CloudAdapterInterface):
ec2r = session.resource('ec2')
bucket_name = 'alpine-cloud-images.' + hashlib.sha1(os.urandom(40)).hexdigest()
s3_key = name + '.vhd'
bucket = s3r.Bucket(bucket_name)
log.info('Creating S3 bucket %s', bucket.name)
bucket.create(
CreateBucketConfiguration={'LocationConstraint': ec2c.meta.region_name}
)
bucket.wait_until_exists()
s3_url = f"s3://{bucket.name}/{s3_key}"
s3_url = f"s3://{bucket.name}/{ic.image_file}"
try:
log.info('Uploading %s to %s', image_aws, s3_url)
bucket.upload_file(str(image_aws), s3_key)
log.info('Uploading %s to %s', ic.image_path, s3_url)
bucket.upload_file(str(ic.image_path), ic.image_file)
# import snapshot from S3
log.info('Importing EC2 snapshot from %s', s3_url)
_import_opts = {
ss_import_opts = {
'DiskContainer': {
'Description': description, # https://github.com/boto/boto3/issues/2286
'Format': 'VHD',
'Url': s3_url
}
'Url': s3_url,
},
'Encrypted': True if ic.encrypted else False,
# NOTE: TagSpecifications -- doesn't work with ResourceType: snapshot?
}
# NOTE: TagSpecifications -- doesn't work with ResourceType: snapshot?
# For some reason the import_snapshot boto function cannot handle setting KmsKeyId to default / empty
# so we need to set it conditionally
if ic.encryption_key_id:
_import_opts['Encrypted'] = True
_import_opts['KmsKeyId'] = ic.encryption_key_id
ss_import = ec2c.import_snapshot(**_import_opts)
if type(ic.encrypted) is str:
ss_import_opts['KmsKeyId'] = ic.encrypted
ss_import = ec2c.import_snapshot(**ss_import_opts)
ss_task_id = ss_import['ImportTaskId']
while True:
ss_task = ec2c.describe_import_snapshot_tasks(
@ -191,7 +169,7 @@ class AWSCloudAdapter(CloudAdapterInterface):
finally:
# always cleanup S3, even if there was an exception raised
log.info('Cleaning up %s', s3_url)
bucket.Object(s3_key).delete()
bucket.Object(ic.image_file).delete()
bucket.delete()
# tag snapshot
@ -318,15 +296,18 @@ class AWSCloudAdapter(CloudAdapterInterface):
log.info('%s: Already exists as %s', r, image.id)
else:
ec2c = self.session(r).client('ec2')
copy_image_opts = {
'Description': source.description,
'Name': source.name,
'SourceImageId': source_id,
'SourceRegion': source_region,
'Encrypted': True if ic.encrypted else False,
}
if type(ic.encrypted) is str:
copy_image_opts['KmsKeyId'] = ic.encrypted
try:
res = ec2c.copy_image(
Description=source.description,
Name=source.name,
SourceImageId=source_id,
SourceRegion=source_region,
Encrypted=True if ic.encryption_key_id else False,
KmsKeyId=ic.encryption_key_id
)
res = ec2c.copy_image(**copy_image_opts)
except Exception:
log.warning('Skipping %s, unable to copy image:', r, exc_info=True)
continue
@ -386,7 +367,7 @@ class AWSCloudAdapter(CloudAdapterInterface):
try:
ec2c.enable_image_deprecation(
ImageId=image.id,
DeprecateAt=f"{tags.end_of_life}T23:59:59Z"
DeprecateAt=f"{tags.end_of_life}T23:59:00Z"
)
except Exception:
log.warning('Unable to set EOL Deprecation on %s image:', r, exc_info=True)

View File

@ -1,10 +1,20 @@
# vim: ts=4 et:
import logging
from subprocess import Popen, PIPE
class CloudAdapterInterface:
CONVERT_CMD = {
'qcow2': ['ln', '-f'],
'vhd': ['qemu-img', 'convert', '-f', 'qcow2', '-O', 'vpc', '-o', 'force_size=on'],
}
def __init__(self, cloud, cred_provider=None):
self._sdk = None
self._sessions = {}
self._clients = {}
self.cloud = cloud
self.cred_provider = cred_provider
self._default_region = None
@ -27,14 +37,47 @@ class CloudAdapterInterface:
def session(self, region=None):
raise NotImplementedError
def client(self, client, region=None):
raise NotImplementedError
# get information about the latest released image
def latest_build_image(self, project, image_key):
raise NotImplementedError
# convert local QCOW2 to format appropriate for a cloud
def convert_image(self, ic):
log = logging.getLogger('import')
local_path = ic.local_path
image_path = ic.local_dir / ic.image_file
log.info('Converting %s to %s', image_path, image_path)
p = Popen(self.CONVERT_CMD[ic.image_format] + [ic.local_path, ic.image_path], stdout=PIPE, stdin=PIPE, encoding='utf8')
out, err = p.communicate()
if p.returncode:
log.error('Unable to convert %s to %s format (%s)', ic.local_path, ic.image_path, p.returncode)
log.error('EXIT: %d', p.returncode)
log.error('STDOUT:\n%s', out)
log.error('STDERR:\n%s', err)
raise RuntimeError
# import local image to cloud provider
def import_image(self, config):
raise NotImplementedError
# remove unpublished image from cloud provider
def remove_image(self, config, image_id):
raise NotImplementedError
# upload cloud image for testing, if upload_path
def upload_image(self, config):
raise NotImplementedError
# TODO: implement here
# publish image to cloud provider regions
def publish_image(self, config):
raise NotImplementedError
# generate image checksum, save metadata, sign image, make downloadable, if download_path
def release_image(self, config):
raise NotImplementedError
# TODO: implement here!

View File

@ -0,0 +1,51 @@
# NOTE: not meant to be executed directly
# vim: ts=4 et:
import logging
import hashlib
import os
import time
from datetime import datetime
from subprocess import Popen, PIPE, run
from .interfaces.adapter import CloudAdapterInterface
from image_configs import Tags, DictObj
# For NoCloud, this will mostly be a no-op.
class NoCloudAdapter(CloudAdapterInterface):
IMAGE_INFO = [
'revision', 'imported', 'import_id', 'import_region', 'published',
]
# get the latest imported image for a given build name
def latest_build_image(self, project, image_key):
# TODO: get info from permanently published image (if exists)
return None
# import an image
def import_image(self, ic):
# TODO: what exactly should be returned?
return DictObj({
'revision': ic.revision,
'imported': datetime.now(),
# 'import_id': '?',
})
# remove an (unpublished) image
def remove_image(self, image_id):
# TODO: remove image from temporary location
pass
# publish an image
def publish_image(self, ic):
# TODO: what exaclty should be returned? nocloud isn't launchabl.
return {
'generic?': 'url?'
}
def register(cloud, cred_provider=None):
return NoCloudAdapter(cloud, cred_provider)

View File

@ -35,6 +35,13 @@ Default {
login = alpine
local_format = qcow2
image_format = qcow2
# these paths are subject to change, as image downloads are developed
upload_path = "ssh://dev.alpinelinux.org/~tomalok/public_html/alpine-cloud-images"
download_path = "https://dl-cdn.alpinelinux.org/alpine"
#download_path = "https://dev.alpinelinux.org/~tomalok/alpine-cloud-images" # development
remote_path = "{v_version}/cloud/{cloud}"
# image access
access.PUBLIC = true
@ -65,7 +72,8 @@ Dimensions {
cloudinit { include required("bootstrap/cloudinit.conf") }
}
cloud {
aws { include required("cloud/aws.conf") }
aws { include required("cloud/aws.conf") }
nocloud { include required("cloud/nocloud.conf") }
}
}
@ -73,6 +81,7 @@ Dimensions {
Mandatory {
name = [ "r{revision}" ]
description = [ "- https://alpinelinux.org/cloud" ]
encrypted = false
# final motd message
motd.motd_change = "You may change this message by editing /etc/motd."

View File

@ -1,7 +1,7 @@
# vim: ts=2 et:
name = [cloudinit]
bootstrap_name = cloud-init
bootstrap_url = "https://cloud-init.io/"
bootstrap_url = "https://cloud-init.io"
# start cloudinit images with 3.15
EXCLUDE = ["3.12", "3.13", "3.14"]
@ -13,4 +13,4 @@ packages {
}
services.default.cloud-init-hotplugd = true
scripts = [ setup-cloudinit ]
scripts = [ setup-cloudinit ]

View File

@ -1,5 +1,7 @@
# vim: ts=2 et:
name = [tiny]
name = [tiny]
bootstrap_name = Tiny Cloud
bootstrap_url = "https://gitlab.alpinelinux.org/alpine/cloud/tiny-cloud"
services {
sysinit.tiny-cloud-early = true
@ -23,6 +25,12 @@ WHEN {
}
}
}
nocloud {
# tiny-cloud's nocloud support is currently > 3.16
EXCLUDE = ["3.12", "3.13", "3.14", "3.15", "3.16"]
packages.tiny-cloud-nocloud = true
}
# azure.packages.tiny-cloud-azure = true
# gcp.packages.tiny-cloud-gcp = true
# oci.packages.tiny-cloud-oci = true

View File

@ -1,10 +1,6 @@
# vim: ts=2 et:
cloud_name = Amazon Web Services
cloud_image_url = "https://{region}.console.aws.amazon.com/ec2/home#Images:visibility=public-images;imageId={image_id}",
cloud_launch_url = "https://{region}.console.aws.amazon.com/ec2/home#launchAmi={image_id}"
builder = qemu
ntp_server = 169.254.169.123
cloud_name = Amazon Web Services
image_format = vhd
kernel_modules {
ena = true
@ -17,10 +13,14 @@ initfs_features {
ena = true
nvme = true
}
ntp_server = 169.254.169.123
access.PUBLIC = true
regions.ALL = true
cloud_region_url = "https://{region}.console.aws.amazon.com/ec2/home#Images:visibility=public-images;imageId={image_id}",
cloud_launch_url = "https://{region}.console.aws.amazon.com/ec2/home#launchAmi={image_id}"
WHEN {
aarch64 {
# new AWS aarch64 default...

View File

@ -0,0 +1,5 @@
# vim: ts=2 et:
cloud_name = NoCloud
image_format = qcow2
ntp_server = ""

View File

@ -4,4 +4,4 @@ firmware_name = BIOS
bootloader = extlinux
packages.syslinux = --no-scripts
qemu.firmware = null
qemu.firmware = null

View File

@ -0,0 +1,212 @@
#!/usr/bin/env python3
# vim: ts=4 et:
# Ensure we're using the Python virtual env with our installed dependencies
import os
import sys
import textwrap
NOTE = textwrap.dedent("""
This script's output provides a mustache-ready datasource to alpine-mksite
(https://gitlab.alpinelinux.org/alpine/infra/alpine-mksite) and should be
run after the main 'build' script has published ALL images.
STDOUT from this script should be saved as 'cloud/releases.yaml' in the
above alpine-mksite repo.
""")
sys.pycache_prefix = 'work/__pycache__'
if not os.path.exists('work'):
print('FATAL: Work directory does not exist.', file=sys.stderr)
print(NOTE, file=sys.stderr)
exit(1)
# Re-execute using the right virtual environment, if necessary.
venv_args = [os.path.join('work', 'bin', 'python3')] + sys.argv
if os.path.join(os.getcwd(), venv_args[0]) != sys.executable:
print("Re-executing with work environment's Python...\n", file=sys.stderr)
os.execv(venv_args[0], venv_args)
# We're now in the right Python environment
import argparse
import logging
from collections import defaultdict
from ruamel.yaml import YAML
import clouds
from image_configs import ImageConfigManager
### Constants & Variables
LOGFORMAT = '%(name)s - %(levelname)s - %(message)s'
### Functions
# allows us to set values deep within an object that might not be fully defined
def dictfactory():
return defaultdict(dictfactory)
# undo dictfactory() objects to normal objects
def undictfactory(o):
if isinstance(o, defaultdict):
o = {k: undictfactory(v) for k, v in o.items()}
return o
### Command Line & Logging
parser = argparse.ArgumentParser(description=NOTE)
parser.add_argument(
'--use-broker', action='store_true',
help='use the identity broker to get credentials')
parser.add_argument('--debug', action='store_true', help='enable debug output')
args = parser.parse_args()
log = logging.getLogger('gen_mksite_releases')
log.setLevel(logging.DEBUG if args.debug else logging.INFO)
console = logging.StreamHandler(sys.stderr)
console.setFormatter(logging.Formatter(LOGFORMAT))
log.addHandler(console)
log.debug(args)
# set up credential provider, if we're going to use it
if args.use_broker:
clouds.set_credential_provider()
# load build configs
configs = ImageConfigManager(
conf_path='work/configs/images.conf',
yaml_path='work/images.yaml',
log='gen_mksite_releases'
)
# make sure images.yaml is up-to-date with reality
configs.refresh_state('final')
yaml = YAML()
filters = dictfactory()
versions = dictfactory()
data = {}
log.info('Transforming image data')
for i_key, i_cfg in configs.get().items():
if not i_cfg.published:
continue
version = i_cfg.version
if version == 'edge':
continue
image_name = i_cfg.image_name
release = i_cfg.release
arch = i_cfg.arch
firmware = i_cfg.firmware
bootstrap = i_cfg.bootstrap
cloud = i_cfg.cloud
if cloud not in filters['clouds']:
filters['clouds'][cloud] = {
'cloud': cloud,
'cloud_name': i_cfg.cloud_name,
}
filters['regions'] = {}
if arch not in filters['archs']:
filters['archs'][arch] = {
'arch': arch,
'arch_name': i_cfg.arch_name,
}
if firmware not in filters['firmwares']:
filters['firmwares'][firmware] = {
'firmware': firmware,
'firmware_name': i_cfg.firmware_name,
}
if bootstrap not in filters['bootstraps']:
filters['bootstraps'][bootstrap] = {
'bootstrap': bootstrap,
'bootstrap_name': i_cfg.bootstrap_name,
}
if i_cfg.artifacts:
for region, image_id in {r: i_cfg.artifacts[r] for r in sorted(i_cfg.artifacts)}.items():
if region not in filters['regions']:
filters['regions'][region] = {
'region': region,
'clouds': [cloud],
}
if cloud not in filters['regions'][region]['clouds']:
filters['regions'][region]['clouds'].append(cloud)
versions[version] |= {
'version': version,
'release': release,
'end_of_life': i_cfg.end_of_life,
}
versions[version]['images'][image_name] |= {
'image_name': image_name,
'arch': arch,
'firmware': firmware,
'bootstrap': bootstrap,
'published': i_cfg.published.split('T')[0], # just the date
}
versions[version]['images'][image_name]['downloads'][cloud] |= {
'cloud': cloud,
'image_url': i_cfg.download_url,
}
versions[version]['images'][image_name]['regions'][region] |= {
'cloud': cloud,
'region': region,
'region_url': i_cfg.region_url(region, image_id),
'launch_url': i_cfg.launch_url(region, image_id),
}
log.info('Making data mustache-compatible')
# convert filters to mustache-compatible format
data['filters'] = {}
for f in ['clouds', 'regions', 'archs', 'firmwares', 'bootstraps']:
data['filters'][f] = [
filters[f][k] for k in filters[f] # order as they appear in work/images.yaml
]
for r in data['filters']['regions']:
c = r.pop('clouds')
r['clouds'] = [{'cloud': v} for v in c]
# convert versions to mustache-compatible format
data['versions'] = []
versions = undictfactory(versions)
for version in sorted(versions, reverse=True, key=lambda s: [int(u) for u in s.split('.')]):
images = versions[version].pop('images')
i = []
for image_name in images: # order as they appear in work/images.yaml
downloads = images[image_name].pop('downloads')
d = []
for download in downloads:
d.append(downloads[download])
images[image_name]['downloads'] = d
regions = images[image_name].pop('regions')
r = []
for region in sorted(regions):
r.append(regions[region])
images[image_name]['regions'] = r
i.append(images[image_name])
versions[version]['images'] = i
data['versions'].append(versions[version])
log.info('Dumping YAML')
yaml.dump(data, sys.stdout)
log.info('Done')

View File

@ -7,6 +7,10 @@ import sys
import textwrap
NOTE = textwrap.dedent("""
NOTE: This is an old script, replaced by 'gen_mksite_releases.py' after
https://gitlab.alpinelinux.org/alpine/infra/alpine-mksite/-/merge_requests/52
is merged.
This script's output is compatible with the retired alpine-ec2-ami repo's
releases/alpine.yaml, in order to bridge the gap until
https://alpinelinux.org/cloud dynamically calls a published-image metadata

View File

@ -100,18 +100,6 @@ class ImageConfigManager():
for dim, dim_key in dim_map.items():
dim_cfg = deepcopy(cfg.Dimensions[dim][dim_key])
exclude = dim_cfg.pop('EXCLUDE', None)
if exclude and set(exclude) & set(dim_keys):
self.log.debug('%s SKIPPED, %s excludes %s', config_key, dim_key, exclude)
skip = True
break
if eol := dim_cfg.get('end_of_life', None):
if self.now > datetime.fromisoformat(eol):
self.log.warning('%s SKIPPED, %s end_of_life %s', config_key, dim_key, eol)
skip = True
break
image_config._merge(dim_cfg)
# now that we're done with ConfigTree/dim_cfg, remove " from dim_keys
@ -124,6 +112,18 @@ class ImageConfigManager():
if len(set(when_keys.split(' ')) & dim_keys) > 0:
image_config._merge(when_conf)
exclude = image_config._pop('EXCLUDE', None)
if exclude and set(exclude) & set(dim_keys):
self.log.debug('%s SKIPPED, %s excludes %s', config_key, dim_key, exclude)
skip = True
break
if eol := image_config._get('end_of_life', None):
if self.now > datetime.fromisoformat(eol):
self.log.warning('%s SKIPPED, %s end_of_life %s', config_key, dim_key, eol)
skip = True
break
if skip is True:
continue
@ -186,6 +186,10 @@ class ImageConfig():
if tags:
self.tags = tags
@property
def v_version(self):
return 'edge' if self.version == 'edge' else 'v' + self.version
@property
def local_dir(self):
return Path('work/images') / self.cloud / self.image_key
@ -198,6 +202,10 @@ class ImageConfig():
def published_yaml(self):
return self.local_dir / 'published.yaml'
@property
def artifacts_yaml(self):
return self.local_dir / 'artifacts.yaml'
@property
def image_name(self):
return self.name.format(**self.__dict__)
@ -206,8 +214,25 @@ class ImageConfig():
def image_description(self):
return self.description.format(**self.__dict__)
def image_url(self, region, image_id):
return self.cloud_image_url.format(region=region, image_id=image_id, **self.__dict__)
@property
def image_file(self):
return '.'.join([self.image_name, self.image_format])
@property
def image_path(self):
return self.local_dir / self.image_file
@property
def upload_url(self):
return '/'.join([self.upload_path, self.remote_path, self.image_file]).format(v_version=self.v_version, **self.__dict__)
@property
def download_url(self):
return '/'.join([self.download_path, self.remote_path, self.image_file]).format(v_version=self.v_version, **self.__dict__)
# TODO? region_url instead?
def region_url(self, region, image_id):
return self.cloud_region_url.format(region=region, image_id=image_id, **self.__dict__)
def launch_url(self, region, image_id):
return self.cloud_launch_url.format(region=region, image_id=image_id, **self.__dict__)
@ -261,6 +286,9 @@ class ImageConfig():
def _merge(self, obj={}):
mergedeep.merge(self.__dict__, self._deep_dict(obj), strategy=mergedeep.Strategy.ADDITIVE)
def _get(self, attr, default=None):
return self.__dict__.get(attr, default)
def _pop(self, attr, default=None):
return self.__dict__.pop(attr, default)
@ -441,10 +469,9 @@ class ImageConfig():
}
# update artifacts, if we've got 'em
artifacts_yaml = self.local_dir / 'artifacts.yaml'
if artifacts_yaml.exists():
if self.artifacts_yaml.exists():
yaml = YAML()
self.artifacts = yaml.load(artifacts_yaml)
self.artifacts = yaml.load(self.artifacts_yaml)
else:
self.artifacts = None

View File

@ -1,4 +1,3 @@
# vim: ts=2 et:
builder = qemu
# TBD

View File

@ -26,6 +26,9 @@ case "$CLOUD" in
aws)
DATASOURCE="Ec2"
;;
nocloud)
DATASOURCE="NoCloud"
;;
*)
echo "Unsupported Cloud '$CLOUD'" >&2
exit 1

View File

@ -5,7 +5,8 @@ echo "Are you really sure as AMIs might be used by customers !!"
read
#TAG_FILTER="Name=tag:project,Values=zdt-alpine"
TAG_FILTER="Name=tag:Name,Values=zdt-alpine-3.15.4-x86_64-kubezero-1.22.8-r3"
TAG_FILTER="Name=tag:Name,Values=zdt-alpine-3.16.2-x86_64-bios-tiny-minimal-r1"
#TAG_FILTER="Name=tag:Name,Values=zdt-alpine-3.16.2-x86_64-bios-tiny-kubezero-1.23.9-r0"
#for r in $(aws ec2 describe-regions --query "Regions[].{Name:RegionName}" --output text); do
for r in ap-southeast-2 ca-central-1 eu-central-1 us-east-1 us-west-1 us-west-2; do

View File

@ -12,5 +12,10 @@ ethtool = true
nvme-cli = true
xfsprogs = true
dhclient = true
#monit = true
# prometheus-node-exporter = true
monit = true
prometheus-node-exporter = true
prometheus-wireguard-exporter = true
tiny-cloud = edge
tiny-cloud-openrc = edge
tiny-cloud-network = edge
tiny-cloud-aws = edge

View File

@ -10,5 +10,6 @@ boot {
default {
local = true
crond = true
# monit = true
# monit = true # We use init
node-exporter = true
}

View File

@ -4,6 +4,10 @@ scripts = [ setup-common ]
packages { include required("common-packages.conf") }
services { include required("common-services.conf") }
repos {
"https://dl-cdn.alpinelinux.org/alpine/edge/main" = edge
}
WHEN {
aws {
packages {

View File

@ -3,7 +3,7 @@
include required("common.conf")
description = [ "- https://kubezero.com" ]
name = [ kubezero-1.22.8 ]
name = [ kubezero-1.23.9 ]
size = 2G
WHEN {

View File

@ -5,10 +5,9 @@ project = zdt-alpine
# all build configs start with these
Default {
project = ${project}
kubeversion = 1.22
kubeversion = 1.23
# image name/description components
encryption_key_id = null
name = [ zdt-alpine ]
description = [ "ZeroDownTime Alpine Images" ]
@ -38,11 +37,11 @@ Default {
# profile build matrix
Dimensions {
version {
"3.15" { include required("version/3.15.conf") }
# edge { include required("version/edge.conf") }
"3.16" { include required("version/3.16.conf") }
# edge { include required("version/edge.conf") }
}
arch {
x86_64 { include required("arch/x86_64.conf") }
x86_64 { include required("arch/x86_64.conf") }
# aarch64 { include required("arch/aarch64.conf") }
}
firmware {
@ -54,7 +53,7 @@ Dimensions {
tiny { include required("bootstrap/tiny.conf") }
}
cloud {
aws { include required("cloud/aws.conf") }
aws { include required("cloud/aws.conf") }
aws.regions {
ALL = false
ap-southeast-2 = true
@ -73,8 +72,8 @@ Dimensions {
# all build configs merge these at the very end
Mandatory {
name = [ "r{revision}" ]
encryption_key_id = "alias/zdt/amis"
name = [ "r{revision}" ]
encrypted = "alias/zdt/amis"
# final provisioning script
scripts = [ cleanup ]

View File

@ -6,8 +6,16 @@
SETUP=/tmp/setup.d
TARGET=/mnt
# Enable testing repo - do we really want versions to change randomly ?
# echo "@testing http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> "$TARGET/etc/apk/repositories"
# Enable ZDT repo
echo "@kubezero https://cdn.zero-downtime.net/alpine/v${VERSION}/kubezero" >> "$TARGET/etc/apk/repositories"
wget -q -O $TARGET/etc/apk/keys/stefan@zero-downtime.net-61bb6bfb.rsa.pub https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub
# Install custom sysctl settings
cp $SETUP/zdt-sysctl.conf $TARGET/etc/sysctl.d/60-zdt.conf
# Install fluent-bit
apk -U --root "$TARGET" --no-cache add \
fluent-bit@kubezero
# Fix dhcp to set MTU properly
install -o root -g root -Dm644 -t $TARGET/etc/dhcp $SETUP/dhclient.conf
@ -38,9 +46,6 @@ echo 'Installed cloudbender shutdown hook'
cp $SETUP/route53.py $TARGET/usr/local/bin
echo 'Installed route53.py'
# use patched tiny-cloud until PR is merged
cp $SETUP/tiny.init-final $TARGET/lib/tiny-cloud/init-final
# ps_mem
#wget https://raw.githubusercontent.com/pixelb/ps_mem/master/ps_mem.py
#sed -i -e 's,#!/usr/bin/env python,#!/usr/bin/env python3,' ps_mem.py
@ -48,6 +53,13 @@ cp $SETUP/tiny.init-final $TARGET/lib/tiny-cloud/init-final
cp $SETUP/ps_mem.py $TARGET/usr/local/bin/ps_mem
echo 'Installed ps_mem'
# use init to spawn monit
echo ":2345:respawn:/usr/bin/monit -Ic /etc/monitrc" >> $TARGET/etc/inittab
mkdir -p $TARGET/etc/monit.d
cp $SETUP/monitrc $TARGET/etc/monitrc && chmod 600 $TARGET/etc/monitrc
cp $SETUP/monit_alert.sh $TARGET/usr/local/bin/monit_alert.sh
echo 'Enable monit via init, hooked up cloudbender alerting'
# QoL
mv $TARGET/etc/profile.d/color_prompt.sh.disabled $TARGET/etc/profile.d/color_prompt.sh
echo 'alias rs="doas bash --login"' > $TARGET/etc/profile.d/alias.sh

View File

@ -6,12 +6,8 @@
SETUP=/tmp/setup.d
TARGET=/mnt
KUBE_VERSION=1.22
AWS_IAM_VERSION=0.5.7
# Enable ZDT repo
echo "@kubezero https://cdn.zero-downtime.net/alpine/v${VERSION}/kubezero" >> "$TARGET/etc/apk/repositories"
wget -q -O $TARGET/etc/apk/keys/stefan@zero-downtime.net-61bb6bfb.rsa.pub https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub
KUBE_VERSION=1.23
AWS_IAM_VERSION=0.5.9
apk -U --root "$TARGET" --no-cache add \
cri-tools@kubezero \
@ -20,7 +16,9 @@ apk -U --root "$TARGET" --no-cache add \
kubectl@kubezero=~$KUBE_VERSION \
ecr-credential-provider@kubezero=~$KUBE_VERSION \
aws-iam-authenticator@kubezero=~$AWS_IAM_VERSION \
aws-neuron-driver@kubezero
aws-neuron-driver@kubezero \
nvidia-open-gpu@kubezero \
conmon@kubezero # temp until conmon gets fixed in edge
# Pre-load container images
# echo 'Pre-loaded Kubernetes control container images'

View File

@ -0,0 +1,10 @@
#!/bin/sh
LEVEL=${1}
shift
ATTACHMENT="$@"
if [ -n "${MONIT_SERVICE}${MONIT_EVENT}" -a -n "$MONIT_DESCRIPTION" ]; then
/usr/local/bin/cloudbender_sns_alarm.sh "$MONIT_SERVICE - $MONIT_EVENT" "$MONIT_DESCRIPTION" $LEVEL "$ATTACHMENT"
fi

View File

@ -0,0 +1,20 @@
# Give instance 3 min to settle after boot
set daemon 30
# Monit starts after user-data so no delay required
# with start delay 180
set log syslog
set httpd port 2812 and
use address localhost
allow localhost
allow admin:localmonit
# Basic rootfs check
# >80%: emergency logrotate
# >90%: warning
check filesystem rootfs with path /
if space usage > 80% then exec "/etc/periodic/hourly/logrotate"
if space usage > 90% then exec "/usr/local/bin/monit_alert.sh warning"
include /etc/monit.d/*.conf

View File

@ -1,7 +1,7 @@
# syslog-ng, format all json into messages
# https://www.syslog-ng.com/technical-documents/doc/syslog-ng-open-source-edition/3.23/administration-guide/63#TOPIC-1268643
@version: 3.30
@version: 3.36
@include "scl.conf"
options { chain_hostnames(off); flush_lines(0); use_dns(no); use_fqdn(no);

View File

@ -5,7 +5,7 @@
missingok
notifempty
compress
maxsize 64M
maxsize 32M
sharedscripts
postrotate
rc-service syslog-ng reload > /dev/null

View File

@ -1,28 +0,0 @@
# Tiny Cloud - Final Phase Functions
# vim:set ts=4 et ft=sh:
source /lib/tiny-cloud/init-common
save_userdata() {
skip_action save_userdata && return
imds -e @userdata > "$TINY_CLOUD_VAR/$CLOUD_USERDATA"
gunzip -t "$TINY_CLOUD_VAR/$CLOUD_USERDATA" && mv "$TINY_CLOUD_VAR/$CLOUD_USERDATA" "$TINY_CLOUD_VAR/$CLOUD_USERDATA".gz && gunzip -k "$TINY_CLOUD_VAR/$CLOUD_USERDATA".gz
}
is_userdata_script() {
head -n1 "$TINY_CLOUD_VAR/$CLOUD_USERDATA" | grep -q '#!/'
}
run_userdata() {
skip_action run_userdata && return
local log="$TINY_CLOUD_LOGS/$CLOUD_USERDATA.log"
local exit="$TINY_CLOUD_LOGS/$CLOUD_USERDATA.exit"
local userdata="$TINY_CLOUD_VAR/$CLOUD_USERDATA"
chmod +x "$userdata"
{ "$userdata" 2>& 1; echo $? > "$exit"; } | tee "$log"
return $(cat "$exit")
}

View File

@ -0,0 +1,16 @@
net.core.somaxconn = 1024
net.core.netdev_max_backlog = 4096
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_wmem = 4096 12582912 16777216
net.ipv4.tcp_rmem = 4096 12582912 16777216
net.ipv4.tcp_max_syn_backlog = 8192
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_retries2 = 9
net.ipv4.tcp_slow_start_after_idle = 0
net.netfilter.nf_conntrack_max = 524288
net.ipv4.ip_no_pmtu_disc = 0
net.ipv4.ip_forward_use_pmtu = 0
kernel.panic = 10
kernel.panic_on_oops = 1
vm.oom_dump_tasks = 0