Another round of moving core cloudbender user-data feature into the base image

This commit is contained in:
Stefan Reimer 2022-12-03 18:04:13 +01:00
parent 0e13e43677
commit aef2df858f
10 changed files with 239 additions and 42 deletions

View File

@ -0,0 +1,35 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=kubezero
pkgver=1.24
pkgrel=0
pkgdesc="KubeZero release package"
url="https://git.zero-downtime.net/ZeroDownTime/alpine-overlay/src/branch/master/kubezero/kubezero"
arch="noarch"
license="AGPL-3.0"
depends="
cri-tools
cri-o=~$pkgver
kubelet=~$pkgver
kubectl=~$pkgver
ecr-credential-provider=~$pkgver
aws-iam-authenticator=~0.5.9
"
options="!check"
source="
shared-sys-fs.start
"
build() {
return 0
}
package() {
# core library
install -Dm755 "$srcdir"/shared-sys-fs.start "$pkgdir/etc/local.d/shared-sys-fs.start"
}
sha512sums="
b0cadf577ea912630efabf8d104f2edaa79bd1697a1f9224ce8a75354dd204196c6d3c15c0318afa44be10be9696ce20ef0015198ee0b74050897d164f77ae60 shared-sys-fs.start
"

View File

@ -0,0 +1,3 @@
#!/bin/sh
mount --make-shared /sys/fs/cgroup
mount --make-shared /sys

View File

@ -7,13 +7,15 @@ pkgdesc="ZeroDownTime Alpine additions and customizations"
url="https://git.zero-downtime.net/ZeroDownTime/alpine-overlay/src/branch/master/kubezero/zdt-base"
arch="noarch"
license="AGPL-3.0"
depends="logrotate syslog-ng neofetch monit"
depends="logrotate syslog-ng neofetch monit file"
options="!check"
subpackages="$pkgname-openrc $pkgname-aws"
install="$pkgname.post-install"
source="
zdt-mount.init
lib-base.sh
cb-mount-var.init
cb-volumes.startstop
zdt-sysctl.conf
https://raw.githubusercontent.com/pixelb/ps_mem/v3.14/ps_mem.py
syslog-ng.conf
@ -33,15 +35,23 @@ build() {
}
package() {
# core library
install -Dm755 "$srcdir/lib-base.sh" "$pkgdir/usr/lib/cloudbender/base.sh"
# dhcp tuning for MTU
install -Dm644 "$srcdir"/dhclient.conf "$pkgdir"/etc/dhcp/dhclient.conf
# various sysctl tunings
install -Dm644 "$srcdir"/zdt-sysctl.conf "$pkgdir"/etc/sysctl.d/60-zdt.conf
# init script to find and mount /var
mkdir -p "$pkgdir"/etc/init.d
cp zdt-mount.init "$pkgdir"/etc/init.d/zdt-mount
# init script to mount var as early as possible, cannot use any network !
install -Dm755 "$srcdir/cb-mount-var.init" "$pkgdir/etc/init.d/cb-mount-var"
# ensure "local" init script runs before user-data
mkdir -p "$pkgdir/etc/conf.d"
echo 'rc_before="tiny-cloud-final"' > "$pkgdir/etc/conf.d/local"
install -Dm755 "$srcdir/cb-volumes.startstop" "$pkgdir/etc/local.d/cb-volumes.start"
( cd $pkgdir/etc/local.d; ln -s cb-volumes.start cb-volumes.stop; )
# syslog-ng configs, json all into messages
install -Dm644 "$srcdir"/syslog-ng.conf "$pkgdir"/lib/zdt/syslog-ng.conf
@ -74,7 +84,9 @@ aws() {
}
sha512sums="
16f4020e2e1f93b13b2ce140dea0c31066a55709cb3ae2ece54b9a6db57583e226bc43ac62be18f5a60274b87ae0de8c6bc613597988451853cdf085cae245eb zdt-mount.init
62e5bd982d3e957ca445891b00cc9fcdc3df22414cd332321a6046ae4ee4c98f9646d3680d83a6d643f01ded229bfea6f968e5734a58a5d233ac899c92ce85da lib-base.sh
0d78bb09b143576b1bc582a62868236e4febed306aa9d085570e91cf9cfbc77dd379342ade9f99203d822f830bbd55d42dcba52cb934952c7b749e252fab1eb3 cb-mount-var.init
b4fbbf55c1a4d38c2877bade1d5e2ce5f1276a6704b0bb95b025e66a7c678710a60a8d4f37cb1f136af1435657cd4ffd03709e80fb61f8950ee39520c1a47f31 cb-volumes.startstop
b9479835d8667fa99f8b1b140f969f0464a9bb3c60c7d19b57e306cfe82357d453932791e446caded71fddd379161ae8328367f1ee75ae3afc1b85e12294b621 zdt-sysctl.conf
76e6a4f309f31bfa07de2d3b1faebe5670722752e18157b69d6e868cbe9e85eda393aed0728b0347a01a810eee442844c78259f86ff71e3136a013f4cbfaaea4 ps_mem.py
9d087f2d4403a9c6d4d2f06fbb86519f2b8b134d8eb305facaef07c183815f917fb7bac916d39d504dbab7fdf3321a3f70954dde57e8986cc223371715bb1c54 syslog-ng.conf
@ -84,7 +96,7 @@ b928ba547af080a07dc9063d44cb0f258d0e88e7c5a977e8f1cf1263c23608f0a138b8ffca0cdf58
346b0170ada6cc1207ffb7b8ef138a1570a63c7df4d57618aa4b6b6c0d2df2197b0f5b23578ec83c641ee5e724865ac06985222e125809c990467426a0851b72 neofetch.conf
532b8e2eb04942ab20bdc36b5dea1c60239fcbfcb85706123f3e05c18d65c938b85e9072d964ae5793177625a8db47b532db1f5bd5ed5ecbb70d5a331666ff54 zdt-ascii.txt
c565516121b9e6f9d5f769511eb900546753e67cc4208d1b388fdce44cd28699261a5c3905f9a168d4b2d45ac65ac3a2a6a95335f1bbd76d2f444d5f50ec5c9e dhclient.conf
399356eaf09b41cde101aa9164eb492dc824e4bc75d8cd2197d1c2d6120349462dad2791609fb073285b3d3545067611f4608ff14b9d9586a46909269f496c02 cloudbender.stop
cd7ddd7923d45370275fa26c0f2c6dea930c6788c8f55af4388eb42309125c15e5cbb34b186ab4aebbeac3470bed0ba2db9dd46ba8796242b59092f51c5cedf5 cloudbender.stop
2d419d5c25a3829e99326b09876f459e48ab66f5756a8ad39b406c0f2829f5a323e8ff512afd8f32b7b07f24c88efa911bee495ce6c4d1925194cb54d3ba57bd route53.py
00eaff6c0a506580340b2547c3b1602a54238bac6090a15516839411478a4b4fdc138668b8ad23455445131f3a3e3fda175ed4bb0dd375402641c0e7b69c3218 get_iam_sshkeys.py
"

View File

@ -0,0 +1,20 @@
#!/sbin/openrc-run
# vim:set ts=8 noet ft=sh:
description="Find suitable block device, prepare and mount it under /var"
depend() {
need fsck root
use lvm modules
after clock lvm modules
before bootmisc
}
start() {
source /usr/lib/cloudbender/base.sh
ebegin "Looking for suitable /var"
setup_var
eend $?
}

View File

@ -0,0 +1,18 @@
#!/bin/sh
# vim:set ts=8 noet ft=sh:
#
. /usr/lib/cloudbender/base.sh
if [ "${0##*/}" = cb-volumes.start ]; then
get_meta_data
[ -z "$volumes" ] && return 0
mount_volumes "$volumes"
elif [ "${0##*/}" = cb-volumes.stop ]; then
get_meta_data
[ -z "$volumes" ] && return 0
unmount_volumes "$volumes"
fi

View File

@ -10,6 +10,6 @@ done
[ $DEBUG -eq 1 ] && SHUTDOWNLOG="$(cat /tmp/shutdown.log)"
[ -n "$RC_REBOOT" ] && ACTION="rebooting" || ACTION="terminated"
[ -z "$DISABLE_SCALING_EVENTS" ] && cloudbender_sns_alarm.sh "Instance $ACTION" "" Info "$SHUTDOWNLOG"
[ -z "$DISABLE_SCALING_EVENTS" ] && /var/lib/cloudbender/sns_alarm.sh "Instance $ACTION" "" Info "$SHUTDOWNLOG"
sleep ${SHUTDOWN_PAUSE:-0}

View File

@ -1,16 +0,0 @@
#!/sbin/openrc-run
# vim:set ts=8 noet ft=sh:
description="ZDT stateful /var"
depend() {
after mdev
before syslog-ng
}
start() {
ebegin "Looking for suitable /var"
echo "fake it"
eend $?
}

View File

@ -0,0 +1,136 @@
#!/bin/sh
# We built on top of tiny-cloud
. /etc/conf.d/tiny-cloud
# extract user-data args and cloud meta-data into /var/lib/cloud/meta-data
get_meta_data() {
if [ ! -f /var/lib/cloud/meta-data ]; then
[ -f /var/lib/cloud/user-data ] && bash /var/lib/cloud/user-data get_meta_data || echo "Error trying to extract cloud meta-data" >&2
fi
. /var/lib/cloud/meta-data
}
# archive orig /var, mount new var on top and restore orig var
copy_and_mount() {
local dev=$1
tar cf /tmp/var.tar /var 2>/dev/null
mount -t xfs -o noatime "$dev" /var
tar xf /tmp/var.tar -C / && rm -f /tmp/var.tar
}
setup_var() {
for d in $(find /dev/sd?); do
# resolve to a valid block device
dev="$(realpath "$d")"
[ -b "$dev" ] || continue
# already mounted
mount | grep -q "$dev" && continue
case "$CLOUD" in
aws)
# on AWS look for sdx
if [ "$d" = "/dev/sdx" ]; then
# check volume for existing filesystem
type=$(file -Lbs $d)
if [[ "$type" =~ "XFS filesystem" ]]; then
xfs_repair $d >/dev/null 2>&1
else
mkfs.xfs -qf $d >/dev/null
fi
copy_and_mount "$dev"
grep -q "$dev" /etc/fstab || echo "$dev /var xfs defaults,noatime,nofail 0 2" >> /etc/fstab
fi
;;
*)
echo "Unsupported Cloud '$CLOUD'" >&2
exit 1
;;
esac
done
}
attach_ebs() {
local volId="$1"
local device="$2"
local tries=30
while true; do
_json="$(aws ec2 describe-volumes --volume-ids $volId --region $REGION --output json)"
rc=$?; [ $rc -ne 0 ] && return $rc
vol_status=$(echo "$_json" | jq -r .Volumes[].State)
attachId=$(echo "$_json" | jq -r .Volumes[].Attachments[].InstanceId)
[ "$attachId" = "$INSTANCE_ID" ] && break
if [ "$vol_status" = "available" ]; then
aws ec2 attach-volume --volume-id "$volId" --instance-id "$INSTANCE_ID" --region "$REGION" --device "$device" > /dev/null
rc=$?; [ $rc -ne 0 ] && return $rc
break
fi
# if attached but not to us -> detach
if [ "$vol_status" = "in-use" ]; then
aws ec2 detach-volume --volume-id "$volId" --region "$REGION" --force
rc=$?; [ $rc -ne 0 ] && return $rc
fi
((tries=tries-1))
[ $tries -eq 0 ] && return 1
sleep 5
done
}
_parse_volume() {
# Todo: proper checks once all is yaml
# For now just replace ':'
echo $1 | sed -e 's/:/ /g'
}
# mount optional remote volumes
mount_volumes() {
local volumes="$1"
for vol in $volumes; do
# Todo: check volume type and call matching func
read volType volId volDevice volPath < <(_parse_volume $vol)
[ "$volType" != "ebs" ] && { echo "Unknown volume type $volType"; break; }
attach_ebs $volId $volDevice
rc=$?
[ $rc -ne 0 ] && { ewarn "error trying to attach $volId"; break; }
# wait for the block device to become available
while true; do
mdev -s
test -b $volDevice && break
sleep 1
done
# check volume for existing filesystem
type=$(file -Lbs $volDevice)
if [[ "$type" =~ "XFS filesystem" ]]; then
xfs_repair $volDevice >/dev/null 2>&1
else
mkfs.xfs -qf $volDevice >/dev/null
fi
# mount
mkdir -p $volPath
mount -t xfs -o noatime $volDevice $volPath
done
}
unmount_volumes() {
local volumes="$1"
for vol in $volumes; do
read volType volId volDevice volPath < <(_parse_volume $vol)
umount $volPath && aws ec2 detach-volume --volume-id "$volId" --instance-id $INSTANCE_ID --region $REGION > /dev/null
done
}

View File

@ -4,9 +4,14 @@
sed -i -e 's/^[\s#]*TCPKeepAlive\s.*/TCPKeepAlive yes/' -e 's/^[\s#]*ClientAliveInterval\s.*/ClientAliveInterval 60/' /etc/ssh/sshd_config
echo 'enabled SSH keep alives'
# CgroupsV2
# openRC
sed -i -e 's/^[\s#]*rc_cgroup_mode=.*/rc_cgroup_mode="unified"/' /etc/rc.conf
echo 'enabled cgroupv2'
sed -i -e 's/^[\s#]*rc_logger=.*/rc_logger="YES"/' /etc/rc.conf
echo 'enabled cgroupv2, openRC logging'
# OpenRC parallel - causes too much chaos
#sed -i -e 's/^[\s#]*rc_parallel=.*/rc_parallel="YES"/' /etc/rc.conf
#echo 'enable parallel openRC'
# Setup syslog-ng json logging
cp /lib/zdt/syslog-ng.conf /etc/syslog-ng/syslog-ng.conf

View File

@ -1,16 +0,0 @@
#!/sbin/openrc-run
# vim:set ts=8 noet ft=sh:
description="ZDT stateful /var"
depend() {
after mdev
before syslog-ng
}
start() {
ebegin "Looking for suitable /var"
echo "fake it"
eend $?
}