feat: Nocloud metal and vm configs, Alpine 3.19 os WIP

This commit is contained in:
Stefan Reimer 2024-03-20 11:04:12 +00:00
parent bfba223c17
commit f289ad4d07
15 changed files with 566 additions and 246 deletions

View File

@ -1,7 +1,11 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
# https://awsdocs-neuron.readthedocs-hosted.com/en/latest/release-notes/runtime/aws-neuronx-dkms/index.html#neuron-driver-release-notes
#
# Todo: needs fix of https://github.com/aws-neuron/aws-neuron-sdk/issues/843
#
pkgname=aws-neuron-driver
pkgver=2.10.11.0
pkgver=2.15.9.0
pkgrel=0
pkgdesc="Linux Kernel module for AWS Neuron INF instances"
url="https://awsdocs-neuron.readthedocs-hosted.com/en/latest/release-notes/index.html#"
@ -10,8 +14,6 @@ license="GPL-2.0"
makedepends="bash xz linux-headers linux-virt-dev"
options="!check"
# https://awsdocs-neuron.readthedocs-hosted.com/en/latest/release-notes/neuron-driver.html#neuron-driver-release-notes
# apt-get download --print-uris aws-neuron-dkms | cut -d' ' -f1
source="$pkgname-$pkgver.deb::https://apt.repos.neuron.amazonaws.com/pool/main/a/aws-neuronx-dkms/aws-neuronx-dkms_"$pkgver"_amd64.deb"
unpack() {
@ -42,5 +44,5 @@ package() {
}
sha512sums="
0fdbc1ebd12044be77714affd427c198f72ce04f0236a100e49642fbdb143a4e6c1156f4555ac0fe8baa6bea09420408bbb1cfd2857f29d54e615b22193afd0d aws-neuron-driver-2.10.11.0.deb
e0c6261a51ce847eb5b0d11c68345ae95ff45a9fecfd1d9a98f327436d369b48f7d4a7c38ffcf7a686b8d319a4ecdc5afd1e4bf946157f72d406daf8164207b7 aws-neuron-driver-2.15.9.0.deb
"

View File

@ -0,0 +1,43 @@
From dca56cf4d28bbbb1d3be029ce9a6710cb3f6cd2f Mon Sep 17 00:00:00 2001
From: Laszlo Ersek <lersek@redhat.com>
Date: Thu, 4 Jun 2020 13:34:12 +0200
Subject: BaseTools: do not build BrotliCompress (RH only)
Notes about the RHEL-8.3/20200603-ca407c7246bf [edk2-stable202005] ->
RHEL-8.5/20210520-e1999b264f1f [edk2-stable202105] rebase:
- no change
Notes about the RHEL-8.2/20190904-37eef91017ad [edk2-stable201908] ->
RHEL-8.3/20200603-ca407c7246bf [edk2-stable202005] rebase:
- New patch.
BrotliCompress is not used for building ArmVirtPkg or OvmfPkg platforms.
It depends on one of the upstream Brotli git submodules that we removed
earlier in this rebase series. (See patch "remove upstream edk2's Brotli
submodules (RH only").
Do not attempt to build BrotliCompress.
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
(cherry picked from commit db8ccca337e2c5722c1d408d2541cf653d3371a2)
---
BaseTools/Source/C/GNUmakefile | 1 -
1 file changed, 1 deletion(-)
diff --git a/BaseTools/Source/C/GNUmakefile b/BaseTools/Source/C/GNUmakefile
index 8c191e0c38..3eae824a1c 100644
--- a/BaseTools/Source/C/GNUmakefile
+++ b/BaseTools/Source/C/GNUmakefile
@@ -48,7 +48,6 @@ all: makerootdir subdirs
LIBRARIES = Common
VFRAUTOGEN = VfrCompile/VfrLexer.h
APPLICATIONS = \
- BrotliCompress \
VfrCompile \
EfiRom \
GenFfs \
--
2.27.0

View File

@ -0,0 +1,49 @@
From 9729dd1d6b83961d531e29777d0cc4a610b108be Mon Sep 17 00:00:00 2001
From: Laszlo Ersek <lersek@redhat.com>
Date: Thu, 4 Jun 2020 13:39:08 +0200
Subject: MdeModulePkg: remove package-private Brotli include path (RH only)
Notes about the RHEL-8.3/20200603-ca407c7246bf [edk2-stable202005] ->
RHEL-8.5/20210520-e1999b264f1f [edk2-stable202105] rebase:
- no change
Notes about the RHEL-8.2/20190904-37eef91017ad [edk2-stable201908] ->
RHEL-8.3/20200603-ca407c7246bf [edk2-stable202005] rebase:
- New patch.
Originating from upstream commit 58802e02c41b
("MdeModulePkg/BrotliCustomDecompressLib: Make brotli a submodule",
2020-04-16), "MdeModulePkg/MdeModulePkg.dec" contains a package-internal
include path into a Brotli submodule.
The edk2 build system requires such include paths to resolve successfully,
regardless of the firmware platform being built. Because
BrotliCustomDecompressLib is not consumed by any OvmfPkg or ArmVirtPkg
platforms, and we've removed the submodule earlier in this patch set,
remove the include path too.
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
(cherry picked from commit e05e0de713c4a2b8adb6ff9809611f222bfe50ed)
---
MdeModulePkg/MdeModulePkg.dec | 3 ---
1 file changed, 3 deletions(-)
diff --git a/MdeModulePkg/MdeModulePkg.dec b/MdeModulePkg/MdeModulePkg.dec
index 8d38383915..ba2d0290e7 100644
--- a/MdeModulePkg/MdeModulePkg.dec
+++ b/MdeModulePkg/MdeModulePkg.dec
@@ -24,9 +24,6 @@
[Includes]
Include
-[Includes.Common.Private]
- Library/BrotliCustomDecompressLib/brotli/c/include
-
[LibraryClasses]
## @libraryclass Defines a set of methods to reset whole system.
ResetSystemLib|Include/Library/ResetSystemLib.h
--
2.27.0

178
kubezero/edk2/APKBUILD Normal file
View File

@ -0,0 +1,178 @@
# Contributor: Timo Teräs <timo.teras@iki.fi>
# Maintainer: Natanael Copa <ncopa@alpinelinux.org>
pkgname=edk2
pkgver=0.0.202308
_realver=edk2-stable${pkgver##*.}
_sslver=3.0.9
_sfver=3e
pkgrel=0
pkgdesc="EFI Development Kit II"
url="https://github.com/tianocore/tianocore.github.io/wiki/EDK-II/"
arch="x86_64 aarch64"
license="BSD-2-Clause-Patent"
makedepends="bash python3 iasl nasm util-linux-dev util-linux-misc"
options="!archcheck !check" # has no checks
subpackages="$pkgname-pyc"
_mipisyst_commit=370b5944c046bab043dd8b133727b2135af7747a
source="$pkgname-$pkgver.tar.gz::https://github.com/tianocore/edk2/archive/$_realver.tar.gz
mipisyst-$_mipisyst_commit.tar.gz::https://github.com/MIPI-Alliance/public-mipi-sys-t/archive/$_mipisyst_commit.tar.gz
https://www.openssl.org/source/openssl-$_sslver.tar.gz
http://www.jhauser.us/arithmetic/SoftFloat-$_sfver.zip
build-hack.patch
0008-BaseTools-do-not-build-BrotliCompress-RH-only.patch
0009-MdeModulePkg-remove-package-private-Brotli-include-p.patch
"
builddir="$srcdir/$pkgname-$_realver"
case "$CARCH" in
x86)
TARGET_ARCH=IA32
PLATFORM=OvmfPkg/OvmfPkgIa32X64.dsc
;;
x86_64)
TARGET_ARCH=X64
PLATFORM="OvmfPkg/OvmfPkgX64.dsc OvmfPkg/OvmfXen.dsc OvmfPkg/CloudHv/CloudHvX64.dsc"
subpackages="$subpackages ovmf:_ovmf:noarch ovmf-xen:_xen:noarch cloudhv:_cloudhv:noarch"
;;
aarch64)
TARGET_ARCH=AARCH64
PLATFORM=ArmVirtPkg/ArmVirtQemu.dsc
subpackages="$subpackages aavmf::noarch"
;;
esac
TOOLCHAIN=GCC5
RELEASE=RELEASE
prepare() {
# unix line endings for the files to be patched
sed -e 's/\r$//' -i BaseTools/Source/C/VfrCompile/VfrUtilityLib.cpp \
BaseTools/Source/C/VolInfo/VolInfo.c
rm -rf CryptoPkg/Library/OpensslLib/openssl
ln -s "$srcdir"/openssl-$_sslver CryptoPkg/Library/OpensslLib/openssl
rm -rf ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3
ln -s "$srcdir"/SoftFloat-$_sfver \
ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3
rm -rf MdePkg/Library/MipiSysTLib/mipisyst
ln -s "$srcdir"/public-mipi-sys-t-$_mipisyst_commit \
MdePkg/Library/MipiSysTLib/mipisyst
default_prepare
}
build() {
export PYTHON_COMMAND=python3
export WORKSPACE=$PWD
export PACKAGES_PATH=$PWD
export EDK_TOOLS_PATH=$PWD/BaseTools/
export PATH=$PWD/BaseTools/BinWrappers/PosixLike/:$PATH
# parallel build fails
unset MAKEFLAGS
bash -c ". edksetup.sh"
make -C BaseTools
for _p in $PLATFORM; do
msg "Building Plaform Files $_p"
command build -b $RELEASE \
-a $TARGET_ARCH \
-t $TOOLCHAIN \
-p $_p \
-n ${JOBS:-2} \
-DSECURE_BOOT_ENABLE=TRUE \
-DTPM2_ENABLE=TRUE
done
}
package() {
mkdir -p "$pkgdir"/usr/bin \
"$pkgdir"/usr/share/$pkgname/Conf \
"$pkgdir"/usr/share/$pkgname/Scripts
install BaseTools/Source/C/bin/* BaseTools/BinWrappers/PosixLike/LzmaF86Compress \
"$pkgdir"/usr/bin
install BaseTools/BuildEnv "$pkgdir"/usr/share/$pkgname/
install BaseTools/Conf/*.template "$pkgdir"/usr/share/$pkgname/Conf
install BaseTools/Scripts/GccBase.lds "$pkgdir"/usr/share/$pkgname/Scripts
for i in $(find BaseTools/Source/Python -type d -maxdepth 1); do
local mod=${i##*/}
test -f "$i/$mod.py" || continue
cp -R BaseTools/Source/Python/"$mod" "$pkgdir"/usr/share/edk2/Python/
cat <<- EOF > "$pkgdir"/usr/bin/"$mod".py
#!/bin/sh
export PYTHONPATH=/usr/share/edk2/Python
exec $PYTHON_COMMAND /usr/share/edk2/Python/$mod/$mod.py "\$@"
EOF
chmod +x "$pkgdir"/usr/bin/"$mod".py
done
}
_ovmf() {
pkgdesc="Open Virtual Machine Firmware (OVMF) BIOS"
license="BSD MIT"
for fw in "$builddir"/Build/OvmfX64/"$RELEASE"_"$TOOLCHAIN"/FV/*.fd; do
install -D $fw "$subpkgdir"/usr/share/OVMF/${fw##*/}
done
# dont ship memfd for now to save space
rm -f "$subpkgdir"/usr/share/OVMF/MEMFD.fd
install -d "$subpkgdir"/usr/share/ovmf
ln -sf ../OVMF/OVMF.fd "$subpkgdir"/usr/share/ovmf/bios.bin
}
_xen() {
pkgdesc="Open Virtual Machine Firmware (OVMF) - Xen build"
license="BSD MIT"
install -D "$builddir"/Build/OvmfXen/"$RELEASE"_"$TOOLCHAIN"/FV/OVMF.fd \
"$subpkgdir"/usr/lib/xen/boot/ovmf.bin
}
_cloudhv() {
pkgdesc="EDK2 EFI Firmware - Cloud-Hypervisor build"
license="BSD MIT"
install -D "$builddir"/Build/CloudHvX64/"$RELEASE"_"$TOOLCHAIN"/FV/CLOUDHV.fd \
"$subpkgdir"/usr/share/cloudhv/CLOUDHV.fd
}
aavmf() {
pkgdesc="ARM (aarch64) Virtual Machine Firmware EFI"
license="BSD MIT"
dd if=/dev/zero \
of="$builddir"/Build/ArmVirtQemu-AARCH64/"$RELEASE"_$TOOLCHAIN/FV/AAVMF_CODE.fd \
bs=1M seek=64 count=0
dd if="$builddir"/Build/ArmVirtQemu-AARCH64/"$RELEASE"_$TOOLCHAIN/FV/QEMU_EFI.fd \
of="$builddir"/Build/ArmVirtQemu-AARCH64/"$RELEASE"_$TOOLCHAIN/FV/AAVMF_CODE.fd \
conv=notrunc
dd if=/dev/zero \
of="$builddir"/Build/ArmVirtQemu-AARCH64/"$RELEASE"_$TOOLCHAIN/FV/AAVMF_VARS.fd \
bs=1M seek=64 count=0
for fw in "$builddir"/Build/*/"$RELEASE"_"$TOOLCHAIN"/FV/*.fd; do
install -D $fw "$subpkgdir"/usr/share/AAVMF/${fw##*/}
done
}
pyc() {
default_pyc
local IFS=$'\n'
amove $(find usr/share/edk2/Python -type d -name __pycache__)
}
sha512sums="
668411dc64a4a69afd145221c599fffc3797de26e801dda7d9b7ed92f755ff4fda4635dbc21c821f527e56eb71c4ad98c1fb079112a56d6b6eea5ff4d010e3cf edk2-0.0.202308.tar.gz
de6888577ceab7ab6915d792f3c48248cfa53357ccd310fc7f7eae4d25a932de8c7c23e5b898c9ebf61cf86cb538277273f2eb131a628b3bf0d46c9a3b9b6686 mipisyst-370b5944c046bab043dd8b133727b2135af7747a.tar.gz
86c99146b37236419b110db77dd3ac3992e6bed78c258f0cc3434ca233460b4e17c0ac81d7058547fe9cb72a9fd80ee56d4b4916bb731dbe2bbcf1c3d46bf31a openssl-3.0.9.tar.gz
3fedcd0060affb2d8fc7995894133cfed6a495c8717df0d30c89885223c38749f25743598383736036332dad6353c6a3f027f5a94a696660f7c4b607e33e534c SoftFloat-3e.zip
a7d4ab2c82b62ba01c86e59f53bd3896d661c9bfbb9db9598734155b66d5fe03eca4a2a9993a14d3bf555992c6d01ba5d7a15868ff9ec6ed98b8a9b3895bb7df build-hack.patch
ecbfc1ec3b732580c33c477191b71553247af1a68f1754bd363d179e0f5aabde93e3c5ec7f2574f9a9ffefef34e75787a2a87b1057b02cd206e8f0618a252871 0008-BaseTools-do-not-build-BrotliCompress-RH-only.patch
ecad98ff84ab307bda751c8a9a321e064ef880dc66b4d107e66aedbc4e14d00eed76770437e25fa9153dc30803f5cbbf1299329f56865a3b75d2c19f6615e68b 0009-MdeModulePkg-remove-package-private-Brotli-include-p.patch
"

View File

@ -0,0 +1,13 @@
VfrCompile seg.faults with fortify enabled. It's probably broken.
diff -ru a/edk2-e242cdfb307a6dfe2c0f75c4719f5c1f6b418625/BaseTools/Source/C/VfrCompile/VfrUtilityLib.cpp b/edk2-e242cdfb307a6dfe2c0f75c4719f5c1f6b418625/BaseTools/Source/C/VfrCompile/VfrUtilityLib.cpp
--- edk2-e242cdfb307a6dfe2c0f75c4719f5c1f6b418625/BaseTools/Source/C/VfrCompile/VfrUtilityLib.cpp 2016-11-16 10:01:14.000000000 +0200
+++ edk2-e242cdfb307a6dfe2c0f75c4719f5c1f6b418625/BaseTools/Source/C/VfrCompile/VfrUtilityLib.cpp 2016-11-16 14:47:30.211978076 +0200
@@ -13,6 +13,7 @@
**/
+#define _FORTIFY_SOURCE 0
#include "stdio.h"
#include "stdlib.h"
#include "CommonLib.h"

View File

@ -1,14 +1,19 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
# Issues:
# - https://github.com/NVIDIA/open-gpu-kernel-modules/issues/468
# https://github.com/NVIDIA/open-gpu-kernel-modules/pull/609/files
# remove coreutils from makedepends
pkgname=nvidia-open-gpu
#pkgver=535.86.05
pkgver=525.125.06
pkgver=550.54.15
pkgrel=0
pkgdesc="NVIDIA Linux open GPU kernel modules"
url="https://github.com/NVIDIA/open-gpu-kernel-modules"
arch="x86_64"
license="MIT OR GPL-2.0"
makedepends="bash linux-headers linux-virt-dev"
makedepends="bash linux-headers linux-virt-dev coreutils"
options="!check"
source="nvidia-$pkgver.tar.gz::https://github.com/NVIDIA/open-gpu-kernel-modules/archive/refs/tags/$pkgver.tar.gz
@ -21,7 +26,7 @@ build() {
# Hack running the build inside a container other uname -r returns host kernel
KERNEL_VERSION=$(basename $(ls -d /lib/modules/*-virt))
make KERNEL_UNAME=$KERNEL_VERSION
make KERNEL_UNAME=$KERNEL_VERSION || bash
}
package() {
@ -36,7 +41,7 @@ package() {
mkdir -p "$pkgdir"/lib/modules/$KERNEL_VERSION/kernel
for m in $modules; do
gzip -9 -c kernel-open/$m > "$pkgdir"/lib/modules/$KERNEL_VERSION/kernel/$m.gz
gzip -9 -c kernel-open/$m > "$pkgdir"/lib/modules/$KERNEL_VERSION/kernel/$m.gz
done
# Add some udev rules to automate node handling
@ -45,7 +50,7 @@ package() {
}
sha512sums="
4cedcf56e87c93354bc56d168de64b30866cf0b8fba2d2861ac60893b43f8140fa29626c4825af8250c420f9228fd1b64c93750cc50dd210040b4e7c4927e90a nvidia-525.125.06.tar.gz
54645a2c196a480e6da6740dd84784725fd81974bd59581dbcc21746244bd1d13910040dbea18cb0c40a41f6c586adb205d432ba452793bf430a3b721cca5f61 nvidia-550.54.15.tar.gz
b16b86ded8601ff802477e2b191c5728290014f90bb85ad6ec0e5b7e84f8004c467f5b6c66b80dc5d205fb70a3900ac286764a3829ca3ad3b8a3a5fd0b73a702 91-nvidia.rules
8335bd69c482da1f67b5cddd31a0b40d01b5c627aeca137b40ac7776cb3e7475767bec808a972ed739c26914207aca264324c41496f6fb579d910c8477f7cc1c create-nvidia-uvm-dev-node.sh
"

View File

@ -9,7 +9,7 @@ arch="noarch"
license="AGPL-3.0"
depends="logrotate syslog-ng neofetch monit file tiny-cloud dhcpcd"
options="!check"
subpackages="$pkgname-openrc $pkgname-aws"
subpackages="$pkgname-openrc $pkgname-aws $pkgname-nocloud"
install="$pkgname.post-install"
source="
@ -17,6 +17,8 @@ source="
boot.sh
cloudbender-early.init
cloudbender.init
cloud-aws.sh
cloud-nocloud.sh
zdt-sysctl.conf
https://raw.githubusercontent.com/pixelb/ps_mem/v3.14/ps_mem.py
syslog-ng.conf
@ -79,8 +81,12 @@ package() {
}
aws() {
# Basic AWS tools
mkdir -p "$subpkgdir"
# aws libs
install -Dm755 "$srcdir/cloud-aws.sh" "$pkgdir/usr/lib/cloudbender/cloud/aws.sh"
# other tools
install -Dm755 "$srcdir"/route53.py "$subpkgdir"/usr/sbin/route53.py
install -Dm755 "$srcdir"/uniq_hostname.py "$subpkgdir"/usr/sbin/uniq_hostname.py
install -Dm755 "$srcdir"/get_iam_sshkeys.py "$subpkgdir"/usr/sbin/get_iam_sshkeys.py
@ -90,20 +96,29 @@ aws() {
install -Dm755 "$srcdir"/monit_alert.sh.aws "$pkgdir"/usr/bin/monit_alert.sh
}
nocloud() {
mkdir -p "$subpkgdir"
# nocloud libs
install -Dm755 "$srcdir/cloud-nocloud.sh" "$pkgdir/usr/lib/cloudbender/cloud/nocloud.sh"
}
sha512sums="
2ddef702aae2783335c8b2836daa00a279d253c33b27170a0979d283d06d7ac666750fa026d2d2eed5759e7d6fd54ea898971fabe1e343ee1d09ffed42cf6355 common.sh
7f6a69a77d6a4a3c34928609108b7939cd43a892d72fb14bebc1d935cd66eda3bd625d15eebb4d6026715b36b12919fcaf863ed5f65ffdc0e2de9fc1b969cb3e boot.sh
c73970604c225199596f932fee3093d0cc9364f90b12f5490eac17643d12e65b4f662aae994ad9e3ebdbd4ee691e41a068fc988513377d6def0697fcd76285e2 common.sh
cf8b75a81bb35e853761d21b15b5b109f15350c54daaf66d2912541a20f758c3ca237d58932e5608d2d3867fe15a07ebd694fd1c313a8290d15afc2b27a575dd boot.sh
eb7d5b6f92f500dbaba04a915cdd8d66e90456ca86bed86b3a9243f0c25577a9aa42c2ba28c3cad9dda6e6f2d14363411d78eff35656c7c60a6a8646f43dcba5 cloudbender-early.init
336a211e6708432f185c911d0c990209c5af79f289d5cc331e0542e258e0309616e1386efd660d5439928562feaf3559970f66e950f9ce6e5aaf20c334596143 cloudbender.init
cac71c605324ad8e60b72f54b8c39ee0924205fcd1f072af9df92b0e8216bcde887ffec677eb2f0eacce3df430f31d5b5609e997d85f14389ee099fbde3c478f cloudbender.init
482438e6d443777636fd8f8f7b3d887c5664243d9547a47a755cbb3f56fac3a145be34e9ef6ce622bf0dcb28f5dda1a53c8448f8dbfb632210cc52a3b786b18c cloud-aws.sh
3a84b728d4169b92356f1da52922c6110efd5bdc2df90b64abe59f89a5de57cc85a81936bdead0cae5071c1ba1735bda1bd866018b5c3f7fd4ef155d0606ac2d cloud-nocloud.sh
06102e56c847637f705d0b29b05b07fbbb2bda9ba69f0a7fe1d716126d3b1c7922fb0df159199809908fa0dc143209775edb1dd5976faa84244dbcaa45f00364 zdt-sysctl.conf
76e6a4f309f31bfa07de2d3b1faebe5670722752e18157b69d6e868cbe9e85eda393aed0728b0347a01a810eee442844c78259f86ff71e3136a013f4cbfaaea4 ps_mem.py
44b2dcf90709a51e4d804d4bb22eb866aa678089647b33b253a48fe29861e4ae85312b23f8a7ab8a20ed184bd6f341e9b919f3d1586f1c0d9c350b8206b29e04 syslog-ng.conf
b86dec8c059642309b2f583191457b7fac7264b75dc5f4a06ad641de6b76589c0571b8b72b51519516ba7e68a128fe2da29b4a2a6dc77c252204675c51b2d128 syslog-ng.conf
484bdcf001b71ce5feed26935db437c613c059790b99f3f5a3e788b129f3e22ba096843585309993446a88c0ab5d60fd0fa530ef3cfb6de1fd34ffc828172329 syslog-ng.logrotate.conf
e86eed7dd2f4507b04050b869927b471e8de26bc7d97e7064850478323380a0580a92de302509901ea531d6e3fa79afcbf24997ef13cd0496bb3ee719ad674ee syslog-ng.apparmor
f8c052c7ec12c71937c7b8bc05d8374c588f345e303b30eda9c8612dff8f8f34a87a433648a3e9b85b278196ece198533b29680a303ff6478171d43f8e095189 dhcpcd-mtu.hook
e00a8f296c76446fe1241bf804c0108f47a2676f377a413ee9fede0943362a6582cad30fe13edd93f3d0daab0e2d7696553fb9458dca62adc05572dce339021a monitrc
c955dabe692c0a4a2fa2b09ab9096f6b14e83064b34ae8d22697096daf6551f00b590d837787d66ea1d0030a7cc30bef583cc4c936c980465663e73aec5fa2dc monit_alert.sh.aws
346b0170ada6cc1207ffb7b8ef138a1570a63c7df4d57618aa4b6b6c0d2df2197b0f5b23578ec83c641ee5e724865ac06985222e125809c990467426a0851b72 neofetch.conf
2c02a1d454881dd7197548286c6cf24c1453dd9d726f3e5445703c12414853b0e12205e5b6a0c3ae09b76097d2bdfcfd6e1bc9a122dd9f66c6d6d03ab41f748a neofetch.conf
532b8e2eb04942ab20bdc36b5dea1c60239fcbfcb85706123f3e05c18d65c938b85e9072d964ae5793177625a8db47b532db1f5bd5ed5ecbb70d5a331666ff54 zdt-ascii.txt
c3e72cd92936b03f2b9eab5e97e9a12fcddcdf2c943342e42e7702e2d2407e00859c62dc9b4de3378688d2f05458aa5c104272af7ab13e53a62f1676d1a9a1b4 profile
816049360aa442f9e9aa4d6525795913cfe3dc7c6c14dc4ccad59c0880500f9d42f198edc442fe036bc84ba2690d9c5bc8ae622341d8276b3f14947db6b879b1 route53.py

View File

@ -26,7 +26,7 @@ setup_var() {
case "$CLOUD" in
aws)
# on AWS look for sdx/xvdx
# on AWS look for sdx/xvdx
if [ "$d" = "/dev/sdx" -o "$d" = "/dev/xvdx" ]; then
# check volume for existing filesystem
type=$(file -Lbs $d)
@ -43,6 +43,10 @@ setup_var() {
log -i -t early info "mounted $d at /var"
fi
;;
nocloud)
# Todo: should we try to mount a special tagged block device as /var ?
return 0
;;
*)
ewarn "Unsupported cloud: $CLOUD"
return 1

View File

@ -0,0 +1,205 @@
#!/bin/bash
# Todo: This should go into a yaml file
query_imds() {
MAC=$(imds meta-data/mac)
AVAILABILITY_ZONE=$(imds meta-data/placement/availability-zone)
REGION=$(echo ${AVAILABILITY_ZONE} | sed "s/[a-z]$//")
INSTANCE_ID=$(imds meta-data/instance-id)
cat <<EOF >> /var/lib/cloud/meta-data
AVAILABILITY_ZONE=$AVAILABILITY_ZONE
REGION=$REGION
INSTANCE_ID=$INSTANCE_ID
IP_ADDRESS=$(imds meta-data/local-ipv4)
PUBLIC_IP_ADDRESS=$(imds meta-data/public-ipv4 || true)
DEFAULT_GW_INTERFACE=$(ip -o route get 8.8.8.8 | awk '{print $5}')
MAC=$MAC
VPC_CIDR_RANGE=$(imds meta-data/network/interfaces/macs/${MAC}/vpc-ipv4-cidr-block)
SUBNET=$(imds meta-data/network/interfaces/macs/${MAC}/subnet-ipv4-cidr-block)
_META_HOSTNAME=$(imds meta-data/hostname)
DOMAIN_NAME=\${_META_HOSTNAME#*.}
AWS_ACCOUNT_ID=$(imds meta-data/network/interfaces/macs/${MAC}/owner-id)
INSTANCE_LIFE_CYCLE=$(imds meta-data/instance-life-cycle)
INSTANCE_TYPE=$(imds meta-data/instance-type)
EOF
}
# Todo: This should go into a yaml file
get_tags() {
# via metadata AWS restricts tags to NOT have " " or "/" ;-(
# Replace all /:.- with _ for valid variable names
for key in $(imds meta-data/tags/instance); do
value="$(imds meta-data/tags/instance/$key)"
key=$(echo ${key//[\/:.-]/_} | tr '[:lower:]' '[:upper:]')
echo "$key=\"$value\"" >> /var/lib/cloud/meta-data
done
#while read _key value; do
# key=$(echo ${_key//[\/:.-]/_} | tr '[:lower:]' '[:upper:]')
# echo "$key=\"$value\"" >> /var/lib/cloud/meta-data
#done < <(aws ec2 describe-tags --filters "Name=resource-id,Values=${INSTANCE_ID}" --query 'Tags[*].[Key,Value]' --region $REGION --output text)
}
# extract user-data args and cloud meta-data into /var/lib/cloud/meta-data
get_meta_data() {
if [ ! -f /var/lib/cloud/meta-data ]; then
echo '#!/bin/bash' > /var/lib/cloud/meta-data
query_imds
get_tags
fi
if [ ! -f /etc/cloudbender.conf ]; then
bash /var/lib/cloud/user-data extract_parameters
fi
}
import_meta_data() {
. /etc/cloudbender.conf
. /var/lib/cloud/meta-data
export AWS_DEFAULT_REGION=$REGION
export AWS_DEFAULT_OUTPUT=text
# Enabled LaunchHooks if not DEBUG
is_enabled $ZDT_CLOUDBENDER_DEBUG || LAUNCH_HOOK="CloudBenderLaunchHook"
# Workaround for current CFN ASG_<parameter> hack
_key=$(echo $AWS_CLOUDFORMATION_LOGICAL_ID | tr '[:lower:]' '[:upper:]')
[ -n "$(eval echo \$${_key}_CUSTOMHOSTNAME)" ] && CUSTOMHOSTNAME="$(eval echo \$${_key}_CUSTOMHOSTNAME)"
[ -n "$(eval echo \$${_key}_VOLUMES)" ] && VOLUMES="$(eval echo \$${_key}_VOLUMES)"
return 0
}
# various early volume functions
attach_ebs() {
local volId="$1"
local device="$2"
local tries=30
while true; do
_json="$(aws ec2 describe-volumes --volume-ids $volId --region $REGION --output json)"
rc=$?; [ $rc -ne 0 ] && return $rc
vol_status=$(echo "$_json" | jq -r .Volumes[].State)
attachedId=$(echo "$_json" | jq -r .Volumes[].Attachments[].InstanceId)
[ "$attachedId" = "$INSTANCE_ID" ] && break
if [ "$vol_status" = "available" ]; then
aws ec2 attach-volume --volume-id "$volId" --instance-id "$INSTANCE_ID" --region "$REGION" --device "$device" > /dev/null
rc=$?; [ $rc -ne 0 ] && return $rc
break
fi
# if attached but not to us -> detach
if [ "$vol_status" = "in-use" ]; then
aws ec2 detach-volume --volume-id "$volId" --region "$REGION" --force
rc=$?; [ $rc -ne 0 ] && return $rc
fi
((tries=tries-1))
[ $tries -eq 0 ] && return 1
sleep 5
done
}
asg_heartbeat() {
[ -n "$LAUNCH_HOOK" ] && aws autoscaling record-lifecycle-action-heartbeat --instance-id $INSTANCE_ID --lifecycle-hook-name $LAUNCH_HOOK --auto-scaling-group-name $AWS_AUTOSCALING_GROUPNAME || true
}
setup_sns_alarms() {
# store SNS message json template
cat <<EOF > /var/lib/cloud/sns_alarm.json
{
"Source": "CloudBender",
"AWSAccountId": "$AWS_ACCOUNT_ID",
"Region": "$REGION",
"Artifact": "$ARTIFACT",
"Asg": "$AWS_AUTOSCALING_GROUPNAME",
"Instance": "$INSTANCE_ID",
"ip": "$IP_ADDRESS"
}
EOF
cat <<'EOF' > /var/lib/cloud/sns_alarm.sh
#!/bin/bash
SUBJECT=$1
MSG=$2
LEVEL=${3:-Info}
ATTACHMENT=${4:-""}
EMOJI=${5:-""}
EOF
if [ -n "$ALARMSNSARN" ]; then
cat <<EOF >> /var/lib/cloud/sns_alarm.sh
jq -M --arg subject "\$SUBJECT" --arg level "\$LEVEL" --arg msg "\$MSG" --arg attachment "\$ATTACHMENT" --arg emoji "\$EMOJI" --arg hostname "\$HOSTNAME" '.Subject = \$subject | .Level = \$level | .Message = \$msg | .Attachment = \$attachment | .Emoji = \$emoji | .Hostname = \$hostname' < /var/lib/cloud/sns_alarm.json | sed -e 's/\\\\\\\\/\\\\/g' > /tmp/sns.json
aws sns publish --region ${REGION} --target-arn $ALARMSNSARN --message file:///tmp/sns.json
EOF
fi
chmod +x /var/lib/cloud/sns_alarm.sh
}
# associate EIP
# return 0 if we attached an EIP
# return 1 if we the public IP did NOT change or other error
associate_eip() {
local instance_id=$1
local eip=$(echo $2 | sed -e 's/\/32//' | grep -E -o "(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)") || true
local current_instance
if [ -n "$eip" ]; then
if [ "$eip" != "0.0.0.0" ]; then
read eip_alloc_id eip_assoc_id current_instance < <(aws ec2 describe-addresses --public-ips $eip --query 'Addresses[*].[AllocationId,AssociationId,InstanceId]' || true)
# If we already own and have the EIP attached -> done
[ "$instance_id" == "$current_instance" ] && return
if [ ! -z "$eip_alloc_id" ]; then
if [[ "$eip_assoc_id" =~ ^eipassoc- ]]; then
log -t user-data info "EIP $eip already associated via Association ID ${eip_assoc_id}. Disassociating."
retry 3 10 aws ec2 disassociate-address --association-id $eip_assoc_id
fi
log -t user-data info "Associating Elastic IP $eip via Allocation ID $eip_alloc_id with Instance $instance_id"
aws ec2 associate-address --no-allow-reassociation --instance-id $instance_id --allocation-id $eip_alloc_id
return
else
log -t user-data warn "Elastic IP $eip address not found."
fi
else
log -t user-data info "0.0.0.0 requested, keeping AWS assigned IP."
fi
else
log -t user-data debug "Invalid or no ElasticIP defined. Skip"
fi
return 1
}
# Accept incoming traffic for everything
disable_source_dest_check() {
aws ec2 modify-instance-attribute --instance-id ${INSTANCE_ID} --source-dest-check "{\"Value\": false}"
}
# Register ourself at route tables
register_routes() {
local rtb_id_list="$1"
local route_cidr="$2"
for cidr in ${route_cidr//,/ }; do
if [ "$cidr" != "$VPC_CIDR_RANGE" ]; then
for rt in ${rtb_id_list//,/ }; do
if [[ "$rt" =~ ^rtb-[a-f0-9]*$ ]]; then
aws ec2 create-route --route-table-id $rt --destination-cidr-block "${cidr}" --instance-id ${INSTANCE_ID} || \
aws ec2 replace-route --route-table-id $rt --destination-cidr-block "${cidr}" --instance-id ${INSTANCE_ID}
else
log -t user-data warn "Invalid Route Table ID: $rt"
fi
done
fi
done
}

View File

@ -0,0 +1,9 @@
#!/bin/bash
get_meta_data() {
SSHPORT=$(imds meta-data/cloudbender/sshPort)
}
import_meta_data() {
echo Noop
}

View File

@ -2,7 +2,7 @@
# vim:set ts=8 noet ft=sh:
#
description="CloudBender - setup meta_data, mount additional volumes, send shutdown messages"
description="CloudBender - main phase"
depend() {
need net
@ -27,12 +27,14 @@ start() {
# add optional ssh keys, eg. via IAM for AWS
configure_sshd
set_hostname $CUSTOMHOSTNAME
if [ "$CLOUD" == "aws" ]; then
set_hostname $CUSTOMHOSTNAME
# if fixed hostname use persistent sshd keys
[ -n "$CUSTOMHOSTNAME" ] && persistent_sshd_hostkeys "/_ssh/${ARTIFACT}/${CONGLOMERATE}/${HOSTNAME}"
# if fixed hostname use persistent sshd keys
[ -n "$CUSTOMHOSTNAME" ] && persistent_sshd_hostkeys "/_ssh/${ARTIFACT}/${CONGLOMERATE}/${HOSTNAME}"
associate_eip $INSTANCE_ID $ELASTICIP && PUBLIC_IP_ADDRESS=$ELASTICIP
associate_eip $INSTANCE_ID $ELASTICIP && PUBLIC_IP_ADDRESS=$ELASTICIP
fi
register_service_dns

View File

@ -1,14 +1,6 @@
# We built on top of tiny-cloud
. /lib/tiny-cloud/common
IMDS_ENDPOINT="169.254.169.254"
. /lib/tiny-cloud/cloud/"$CLOUD"/imds
_imds() {
wget --quiet --timeout 1 --output-document - \
--header "$(_imds_header)" \
"http://$IMDS_ENDPOINT/$IMDS_URI/$1$IMDS_QUERY"
}
. /usr/lib/cloudbender/cloud/"$CLOUD".sh
# boolean flags
is_enabled() {
@ -21,77 +13,6 @@ is_enabled() {
return 1
}
# Todo: This should go into a yaml file
query_imds() {
MAC=$(_imds meta-data/mac)
AVAILABILITY_ZONE=$(_imds meta-data/placement/availability-zone)
REGION=$(echo ${AVAILABILITY_ZONE} | sed "s/[a-z]$//")
INSTANCE_ID=$(_imds meta-data/instance-id)
cat <<EOF >> /var/lib/cloud/meta-data
AVAILABILITY_ZONE=$AVAILABILITY_ZONE
REGION=$REGION
INSTANCE_ID=$INSTANCE_ID
IP_ADDRESS=$(_imds meta-data/local-ipv4)
PUBLIC_IP_ADDRESS=$(_imds meta-data/public-ipv4 || true)
DEFAULT_GW_INTERFACE=$(ip -o route get 8.8.8.8 | awk '{print $5}')
MAC=$MAC
VPC_CIDR_RANGE=$(_imds meta-data/network/interfaces/macs/${MAC}/vpc-ipv4-cidr-block)
SUBNET=$(_imds meta-data/network/interfaces/macs/${MAC}/subnet-ipv4-cidr-block)
_META_HOSTNAME=$(_imds meta-data/hostname)
DOMAIN_NAME=\${_META_HOSTNAME#*.}
AWS_ACCOUNT_ID=$(_imds meta-data/network/interfaces/macs/${MAC}/owner-id)
INSTANCE_LIFE_CYCLE=$(_imds meta-data/instance-life-cycle)
INSTANCE_TYPE=$(_imds meta-data/instance-type)
EOF
}
# Todo: This should go into a yaml file
get_tags() {
# via metadata AWS restricts tags to NOT have " " or "/" ;-(
# Replace all /:.- with _ for valid variable names
for key in $(_imds meta-data/tags/instance); do
value="$(_imds meta-data/tags/instance/$key)"
key=$(echo ${key//[\/:.-]/_} | tr '[:lower:]' '[:upper:]')
echo "$key=\"$value\"" >> /var/lib/cloud/meta-data
done
#while read _key value; do
# key=$(echo ${_key//[\/:.-]/_} | tr '[:lower:]' '[:upper:]')
# echo "$key=\"$value\"" >> /var/lib/cloud/meta-data
#done < <(aws ec2 describe-tags --filters "Name=resource-id,Values=${INSTANCE_ID}" --query 'Tags[*].[Key,Value]' --region $REGION --output text)
}
# extract user-data args and cloud meta-data into /var/lib/cloud/meta-data
get_meta_data() {
if [ ! -f /var/lib/cloud/meta-data ]; then
echo '#!/bin/bash' > /var/lib/cloud/meta-data
query_imds
get_tags
fi
if [ ! -f /etc/cloudbender.conf ]; then
bash /var/lib/cloud/user-data extract_parameters
fi
}
import_meta_data() {
. /etc/cloudbender.conf
. /var/lib/cloud/meta-data
export AWS_DEFAULT_REGION=$REGION
export AWS_DEFAULT_OUTPUT=text
# Enabled LaunchHooks if not DEBUG
is_enabled $ZDT_CLOUDBENDER_DEBUG || LAUNCH_HOOK="CloudBenderLaunchHook"
# Workaround for current CFN ASG_<parameter> hack
_key=$(echo $AWS_CLOUDFORMATION_LOGICAL_ID | tr '[:lower:]' '[:upper:]')
[ -n "$(eval echo \$${_key}_CUSTOMHOSTNAME)" ] && CUSTOMHOSTNAME="$(eval echo \$${_key}_CUSTOMHOSTNAME)"
[ -n "$(eval echo \$${_key}_VOLUMES)" ] && VOLUMES="$(eval echo \$${_key}_VOLUMES)"
return 0
}
# setup_instance, various OS tweaks impossible to do via AMI baking
setup_instance() {
@ -99,7 +20,7 @@ setup_instance() {
[ -f /etc/machine-id ] || uuidgen > /etc/machine-id
# add and mount bpf file system
add_once /etc/fstab "bpffs /sys/fs/bpf bpf rw,nosuid,nodev,noexec,relatime,mode=700 0 0"
add_once /etc/fstab "bpffs /sys/fs/bpf bpf rw,nosuid,nodev,noexec,relatime,mode=700 0 0"
mount -a
# Ensure certain mounts are shared to run containers later, eg. cilium, falco
@ -126,9 +47,9 @@ setup_instance() {
esac
}
################
################
# IAM SSH KEYS #
################
################
configure_sshd() {
# Change Listen port
local port=${SSHPORT:-"22"}
@ -149,6 +70,9 @@ configure_sshd() {
einfo "added $group to SSH admin keys"
fi
;;
nocloud)
return 0
;;
*)
ewarn "Unsupported Cloud: $CLOUD"
# return 1
@ -230,38 +154,6 @@ set_hostname() {
fi
}
# various early volume functions
attach_ebs() {
local volId="$1"
local device="$2"
local tries=30
while true; do
_json="$(aws ec2 describe-volumes --volume-ids $volId --region $REGION --output json)"
rc=$?; [ $rc -ne 0 ] && return $rc
vol_status=$(echo "$_json" | jq -r .Volumes[].State)
attachedId=$(echo "$_json" | jq -r .Volumes[].Attachments[].InstanceId)
[ "$attachedId" = "$INSTANCE_ID" ] && break
if [ "$vol_status" = "available" ]; then
aws ec2 attach-volume --volume-id "$volId" --instance-id "$INSTANCE_ID" --region "$REGION" --device "$device" > /dev/null
rc=$?; [ $rc -ne 0 ] && return $rc
break
fi
# if attached but not to us -> detach
if [ "$vol_status" = "in-use" ]; then
aws ec2 detach-volume --volume-id "$volId" --region "$REGION" --force
rc=$?; [ $rc -ne 0 ] && return $rc
fi
((tries=tries-1))
[ $tries -eq 0 ] && return 1
sleep 5
done
}
_parse_volume() {
# Todo: proper checks once all is yaml
@ -365,10 +257,6 @@ init_passphrase() {
{ xxd -l16 -p /dev/random > $_PPFILE; chmod 600 $_PPFILE; put_secret $_URL "$(cat $_PPFILE)"; }
}
asg_heartbeat() {
[ -n "$LAUNCH_HOOK" ] && aws autoscaling record-lifecycle-action-heartbeat --instance-id $INSTANCE_ID --lifecycle-hook-name $LAUNCH_HOOK --auto-scaling-group-name $AWS_AUTOSCALING_GROUPNAME || true
}
# upload various useful logs to s3 if configured
upload_debug_logs(){
[ -z $ZDT_CLOUDBENDER_DEBUG_REMOTELOGS ] && return 0
@ -392,38 +280,6 @@ upload_debug_logs(){
return 0
}
setup_sns_alarms() {
# store SNS message json template
cat <<EOF > /var/lib/cloud/sns_alarm.json
{
"Source": "CloudBender",
"AWSAccountId": "$AWS_ACCOUNT_ID",
"Region": "$REGION",
"Artifact": "$ARTIFACT",
"Asg": "$AWS_AUTOSCALING_GROUPNAME",
"Instance": "$INSTANCE_ID",
"ip": "$IP_ADDRESS"
}
EOF
cat <<'EOF' > /var/lib/cloud/sns_alarm.sh
#!/bin/bash
SUBJECT=$1
MSG=$2
LEVEL=${3:-Info}
ATTACHMENT=${4:-""}
EMOJI=${5:-""}
EOF
if [ -n "$ALARMSNSARN" ]; then
cat <<EOF >> /var/lib/cloud/sns_alarm.sh
jq -M --arg subject "\$SUBJECT" --arg level "\$LEVEL" --arg msg "\$MSG" --arg attachment "\$ATTACHMENT" --arg emoji "\$EMOJI" --arg hostname "\$HOSTNAME" '.Subject = \$subject | .Level = \$level | .Message = \$msg | .Attachment = \$attachment | .Emoji = \$emoji | .Hostname = \$hostname' < /var/lib/cloud/sns_alarm.json | sed -e 's/\\\\\\\\/\\\\/g' > /tmp/sns.json
aws sns publish --region ${REGION} --target-arn $ALARMSNSARN --message file:///tmp/sns.json
EOF
fi
chmod +x /var/lib/cloud/sns_alarm.sh
}
exit_trap() {
set +e
@ -479,7 +335,7 @@ exit_trap() {
# timestamp being done
end_uptime=$(awk '{print $1}' < /proc/uptime)
log -t user-data info "Exiting user-data. Duration: $(echo "$end_uptime-$start_uptime" | bc) seconds"
log -t user-data info "Exiting user-data. $end_uptime seconds after boot. Duration: $(echo "$end_uptime-$start_uptime" | bc)"
# if we ran into error, either upload debug files or poweroff
if [ $ERR_CODE -ne 0 ]; then
@ -602,73 +458,6 @@ EOF
}
# associate EIP
# return 0 if we attached an EIP
# return 1 if we the public IP did NOT change or other error
associate_eip() {
local instance_id=$1
local eip=$(echo $2 | sed -e 's/\/32//' | grep -E -o "(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)") || true
local current_instance
if [ -n "$eip" ]; then
if [ "$eip" != "0.0.0.0" ]; then
read eip_alloc_id eip_assoc_id current_instance < <(aws ec2 describe-addresses --public-ips $eip --query 'Addresses[*].[AllocationId,AssociationId,InstanceId]' || true)
# If we already own and have the EIP attached -> done
[ "$instance_id" == "$current_instance" ] && return
if [ ! -z "$eip_alloc_id" ]; then
if [[ "$eip_assoc_id" =~ ^eipassoc- ]]; then
log -t user-data info "EIP $eip already associated via Association ID ${eip_assoc_id}. Disassociating."
retry 3 10 aws ec2 disassociate-address --association-id $eip_assoc_id
fi
log -t user-data info "Associating Elastic IP $eip via Allocation ID $eip_alloc_id with Instance $instance_id"
aws ec2 associate-address --no-allow-reassociation --instance-id $instance_id --allocation-id $eip_alloc_id
return
else
log -t user-data warn "Elastic IP $eip address not found."
fi
else
log -t user-data info "0.0.0.0 requested, keeping AWS assigned IP."
fi
else
log -t user-data debug "Invalid or no ElasticIP defined. Skip"
fi
return 1
}
# Accept incoming traffic for everything
disable_source_dest_check() {
aws ec2 modify-instance-attribute --instance-id ${INSTANCE_ID} --source-dest-check "{\"Value\": false}"
}
# Register ourself at route tables
register_routes() {
local rtb_id_list="$1"
local route_cidr="$2"
for cidr in ${route_cidr//,/ }; do
if [ "$cidr" != "$VPC_CIDR_RANGE" ]; then
for rt in ${rtb_id_list//,/ }; do
if [[ "$rt" =~ ^rtb-[a-f0-9]*$ ]]; then
aws ec2 create-route --route-table-id $rt --destination-cidr-block "${cidr}" --instance-id ${INSTANCE_ID} || \
aws ec2 replace-route --route-table-id $rt --destination-cidr-block "${cidr}" --instance-id ${INSTANCE_ID}
else
log -t user-data warn "Invalid Route Table ID: $rt"
fi
done
fi
done
}
setup_prometheus() {
rc-update add node-exporter default
rc-service node-exporter start
@ -711,7 +500,7 @@ EOF
Send_options true
Require_ack_response true
EOF
LOG_FILES=$LOGGING_FILES
## TODO:

View File

@ -1,5 +1,5 @@
print_info() {
echo -e "\n"
#echo -e "\n"
prin "$(color 1)Welcome to Alpine - ZeroDownTime edition"
echo

View File

@ -6,7 +6,7 @@
options { chain_hostnames(off); flush_lines(0); use_dns(no); use_fqdn(no);
dns_cache(no); owner("root"); group("adm"); perm(0640);
stats(freq(43200)); bad_hostname("^gconfd$"); frac-digits(6);
stats(freq(43200)); bad_hostname("^gconfd$"); frac-digits(9); keep-timestamp(no);
};
source s_sys { system(); internal();};

View File

@ -1,5 +1,7 @@
#!/bin/sh
. /lib/tiny-cloud/common
# Enable SSH keepalive
sed -i -e 's/^[\s#]*TCPKeepAlive\s.*/TCPKeepAlive yes/' -e 's/^[\s#]*ClientAliveInterval\s.*/ClientAliveInterval 60/' /etc/ssh/sshd_config
echo 'enabled SSH keep alives'
@ -9,6 +11,10 @@ sed -i -e 's/^[\s#]*rc_cgroup_mode=.*/rc_cgroup_mode="unified"/' /etc/rc.conf
sed -i -e 's/^[\s#]*rc_logger=.*/rc_logger="YES"/' /etc/rc.conf
echo 'enabled cgroupv2, openRC logging'
# speed up dhcpcd and chronyd
add_once /etc/dhcpcd.conf noarp >/dev/null
sed -i -e 's/^[\s#]*FAST_STARTUP=.*/FAST_STARTUP=yes/' /etc/conf.d/chronyd
# OpenRC parallel - causes too much chaos
#sed -i -e 's/^[\s#]*rc_parallel=.*/rc_parallel="YES"/' /etc/rc.conf
#echo 'enable parallel openRC'
@ -25,7 +31,7 @@ cp /lib/zdt/syslog-ng.apparmor /etc/apparmor.d/local/sbin.syslog-ng
echo 'syslog-ng: all to /var/log/messages as json, rotate hourly'
# use init to spawn monit
echo ":2345:respawn:/usr/bin/monit -Ic /etc/monitrc.zdt" >> /etc/inittab
add_once /etc/inittab ":2345:respawn:/usr/bin/monit -Ic /etc/monitrc.zdt" >/dev/null
echo 'Enable monit via inittab'
# QoL - color prompt even for doas bash