Compare commits

...

8 Commits

55 changed files with 1950 additions and 5609 deletions

View File

@ -1,13 +1,11 @@
FROM alpine:3.18
ARG ALPINE="v3.18"
FROM alpine:3.19
ARG ALPINE="v3.19"
ARG BUILDUSER=alpine
RUN echo "http://dl-cdn.alpinelinux.org/alpine/${ALPINE}/main" > /etc/apk/repositories && \
echo "http://dl-cdn.alpinelinux.org/alpine/${ALPINE}/community" >> /etc/apk/repositories && \
echo "@edge-main http://dl-cdn.alpinelinux.org/alpine/edge/main" >> /etc/apk/repositories && \
echo "@edge-community http://dl-cdn.alpinelinux.org/alpine/edge/community" >> /etc/apk/repositories && \
echo "@kubezero https://cdn.zero-downtime.net/alpine/${ALPINE}/kubezero" >> /etc/apk/repositories && \
wget -q -O /etc/apk/keys/stefan@zero-downtime.net-61bb6bfb.rsa.pub https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub
echo "@edge-community http://dl-cdn.alpinelinux.org/alpine/edge/community" >> /etc/apk/repositories
RUN apk -U --no-cache upgrade && \
apk --no-cache add \
@ -23,8 +21,10 @@ RUN adduser -D $BUILDUSER && \
echo "permit nopass :abuild" > /etc/doas.d/doas.conf && \
install -d -g abuild -m 775 /var/cache/distfiles && \
install -d -g abuild -m 775 /packages && \
echo -e "$BUILDUSER:1:999\n$BUILDUSER:1001:64535" > /etc/subuid && \
echo -e "$BUILDUSER:1:999\n$BUILDUSER:1001:64535" > /etc/subgid
echo -e "$BUILDUSER:1001:64535" > /etc/subuid && \
echo -e "$BUILDUSER:1001:64535" > /etc/subgid && \
echo "@kubezero https://cdn.zero-downtime.net/alpine/${ALPINE}/kubezero" >> /etc/apk/repositories && \
wget -q -O /etc/apk/keys/stefan@zero-downtime.net-61bb6bfb.rsa.pub https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub
COPY abuilder aarch64-toolchain.sh /usr/bin/

View File

@ -4,7 +4,7 @@ REGION := us-east-1
include .ci/podman.mk
BUILDER := v3.18.4
BUILDER := v3.19.1
PKG := '*'
CF_DIST := E11OFTOA3L8IVY

View File

@ -11,7 +11,7 @@ if [ "$1" = 'aarch64-toolchain' ]; then
else
# Set ENV for cross compile for aarch64
if [ "$2" = "cross-arm64" ]; then
ALPINE="v3.18"
ALPINE="v3.19"
TARGET_ARCH=aarch64
SUDO_APK=abuild-apk
APORTS=/home/alpine/aports
@ -38,7 +38,7 @@ else
# If checksum is OK, build package
APKBUILD=$pkg abuild verify && rc=$? || rc=$?
if [ $rc -eq 0 ]; then
CHOST=$TARGET_ARCH APKBUILD=$pkg abuild -r
APKBUILD=$pkg abuild -r
else
APKBUILD=$pkg abuild checksum

View File

@ -1,7 +1,7 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=aws-iam-authenticator
pkgver=0.6.10
pkgver=0.6.14
pkgrel=0
pkgdesc="AWS aws-iam-authenticator"
url="https://github.com/kubernetes-sigs/aws-iam-authenticator"
@ -20,5 +20,5 @@ package() {
}
sha512sums="
2b5da6dfbec1f5483ead8da280de8dd719b71157a9bfa4751c015dbc77a4f4c01a59486015cd2231ffb4232a0bf4a35ef843007605dd0b9fffd51ca0208f8fda aws-iam-authenticator-0.6.10.tar.gz
26a6b394fbe767910f605a356032338a4ec254b81cd470796e3137e3595fef338bd213dee8d956c8d23e16f5508741e78664cd0f8b1acd97321d2fb5b7b723af aws-iam-authenticator-0.6.14.tar.gz
"

View File

@ -1,7 +1,11 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
# https://awsdocs-neuron.readthedocs-hosted.com/en/latest/release-notes/runtime/aws-neuronx-dkms/index.html#neuron-driver-release-notes
#
# Todo: needs fix of https://github.com/aws-neuron/aws-neuron-sdk/issues/843
#
pkgname=aws-neuron-driver
pkgver=2.10.11.0
pkgver=2.16.7.0
pkgrel=0
pkgdesc="Linux Kernel module for AWS Neuron INF instances"
url="https://awsdocs-neuron.readthedocs-hosted.com/en/latest/release-notes/index.html#"
@ -10,8 +14,6 @@ license="GPL-2.0"
makedepends="bash xz linux-headers linux-virt-dev"
options="!check"
# https://awsdocs-neuron.readthedocs-hosted.com/en/latest/release-notes/neuron-driver.html#neuron-driver-release-notes
# apt-get download --print-uris aws-neuron-dkms | cut -d' ' -f1
source="$pkgname-$pkgver.deb::https://apt.repos.neuron.amazonaws.com/pool/main/a/aws-neuronx-dkms/aws-neuronx-dkms_"$pkgver"_amd64.deb"
unpack() {
@ -28,6 +30,9 @@ build() {
# Hack running the build inside a container other uname -r returns host kernel
KERNEL_VERSION=$(basename $(ls -d /lib/modules/*-virt))
unset CFLAGS CPPFLAGS CXXFLAGS
unset LDFLAGS
make KERNEL_SRC_DIR=/lib/modules/$KERNEL_VERSION/build
}
@ -42,5 +47,5 @@ package() {
}
sha512sums="
0fdbc1ebd12044be77714affd427c198f72ce04f0236a100e49642fbdb143a4e6c1156f4555ac0fe8baa6bea09420408bbb1cfd2857f29d54e615b22193afd0d aws-neuron-driver-2.10.11.0.deb
968eb60bcd6826fa7dba827c29eda0033c626e016c7a57389a821f64c966d076a85da885f772e937098885853fe50765013a1368aab8b0ca85d732c34e60a26e aws-neuron-driver-2.16.7.0.deb
"

View File

@ -3,7 +3,7 @@
# Contributor: TBK <alpine@jjtc.eu>
# Maintainer: ungleich <foss@ungleich.ch>
pkgname=cri-o
pkgver=1.26.4
pkgver=1.28.4
pkgrel=0
pkgdesc="OCI-based implementation of Kubernetes Container Runtime Interface"
url="https://github.com/cri-o/cri-o/"
@ -14,8 +14,8 @@ license="Apache-2.0"
options="net chmod-clean !check"
depends="
cni-plugins
conntrack-tools
conmon
conntrack-tools
containers-common
iproute2
iptables
@ -33,6 +33,7 @@ makedepends="
libselinux-dev
lvm2-dev
ostree-dev
sqlite-dev
tzdata
"
checkdepends="bats cri-tools jq parallel sudo conmon"
@ -40,9 +41,10 @@ subpackages="
$pkgname-doc
$pkgname-bash-completion
$pkgname-zsh-completion
$pkgname-fish-completion
$pkgname-openrc
"
#$pkgname-fish-completion
source="
$pkgname-$pkgver.tar.gz::https://github.com/cri-o/cri-o/archive/v$pkgver/cri-o-$pkgver.tar.gz
crio.conf
@ -70,7 +72,7 @@ export GOBIN="$GOPATH/bin"
build() {
# https://github.com/cri-o/cri-o/blob/master/install.md#build-tags
make BUILDTAGS="seccomp selinux apparmor containers_image_openpgp containers_image_ostree_stub"
make BUILDTAGS="libsqlite3 seccomp selinux apparmor containers_image_openpgp containers_image_ostree_stub"
}
check() {
@ -78,11 +80,17 @@ check() {
}
package() {
make DESTDIR="$pkgdir" PREFIX=/usr CRICTL_CONFIG_DIR="/etc/crio" OCIUMOUNTINSTALLDIR="/etc/crio" install
make \
DESTDIR="$pkgdir" \
PREFIX=/usr \
CRICTL_CONFIG_DIR="/etc/crio" \
OCIUMOUNTINSTALLDIR="/etc/crio" \
FISHINSTALLDIR=/usr/share/fish/vendor_completions.d \
install.bin-nobuild install.man-nobuild install.completions install.config-nobuild
# We want it in etc so apk does not overwrite it
mkdir -p "$pkgdir"/usr/share/oci-umount/oci-umount.d/
ln -sf /etc/crio/crio-umount.conf "$pkgdir"/usr/share/oci-umount/oci-umount.d/crio-umount.conf
ln -sf ../../../../etc/crio/crio-umount.conf "$pkgdir"/usr/share/oci-umount/oci-umount.d/crio-umount.conf
# The CNI plugins are recommended to be installed as examples
install -Dm644 contrib/cni/*.conflist -t "$pkgdir"/usr/share/doc/cri-o/examples/cni/
@ -95,13 +103,13 @@ package() {
}
sha512sums="
99bf6b438da236491fcc33ddaa28aeb381fc40c04138918be98fca1117132c5616598e4d758a6852071a67e4884895494b091c9206490a964a559723f77b84e7 cri-o-1.26.4.tar.gz
8d27211a4baad86d5251faa396a23d78d2962de894124be851172d6e85fbf3c0da57ec08f70840c7d8526dc6daa93999485a8d92a1d2c33b374eff84b1e063ae cri-o-1.28.4.tar.gz
1f60719677295c9c5c615eb25d9159bde0af68a132eee67747f57fe76642d457c98c896c6189f85637d7b4ac24ba55fd9eaeb1699f43c3c5077b645f72a479fb crio.conf
cfc4c144931400023e6642fa0b9880f0e3c09c187542905ca56044cedafb5e1f1d49708e4352233abee4e02181155c02fc9688bf93202fc8d80dfc1ffc90699b crio.initd
e9149cc2ddd24328c5290d3aea895c01e2798e066897535384f615a556496acdd52a603a0f4ac3c4c70bd5c363592f23c8b4d1987bf738300112fc62e1def555 crio.initd
1115228546a696eeebeb6d4b3e5c3152af0c99a2559097fc5829d8b416d979c457b4b1789e0120054babf57f585d3f63cbe49949d40417ae7aab613184bf4516 crio.logrotated
0a567dfa431ab1e53f2a351689be8d588a60cc5fcdbda403ec4f8b6ab9b1c18ad425f6c47f9a5ab1491e3a61a269dc4efa6a59e91e7521fa2b6bb165074aa8e0 cni-plugins-path.patch
f9577aa7b1c90c6809010e9e406e65092251b6e82f6a0adbc3633290aa35f2a21895e1a8b6ba4b6375dcad3e02629b49a34ab16387e1c36eeb32c8f4dac74706 makefile-fix-install.patch
1c1bfa5feeb0c5ddc92271a5ef80edc38d56afa1574ffc124605d5bb227a407b55dd5268df6cebc6720768ac31245e08b7950e5ab2b7f14ba934c94f1e325f86 fix-test.patch
78c150f87027de489289596371dce0465159ced0758776b445deb58990e099de9c654406183c9da3cc909878b24d28db62121b7056cd180a6f2820e79e165cc6 remove-systemd-files.patch
b0fdaf2280968a69e05ef72288bbf6fc03787616c6b6fca1e4398f9849167f4773e5e6e72bf1738d1fff2a84e97aa00f23aabcd50898ba8ed130969f50363006 fix-test.patch
ae7e4a43f18076f19f3ae37d7302bfdf7a3befadf33e46bc9b1b14d50b605e8ba0d06d479568c24e8bf68f17c80ae48798068b2a46c3bcab565a5d225779f30e remove-systemd-files.patch
79e1a7c6183ba56f55d923e9d738be945564494042bc011d31e9195f66c268d702ee5c86711d4b46618285fc1b10b59ea55c321390feca770cfc7de334e103bd crictl.yaml
"

View File

@ -16,6 +16,7 @@ start_stop_daemon_args="-N 1 \
depend() {
need net
use dns
}
checkconfig() {

View File

@ -21,7 +21,7 @@ index 8beb6f06..80193413 100644
+ skip "need systemd cgroup manager"
+ fi
+
CONTAINER_CGROUP_MANAGER="systemd" CONTAINER_DROP_INFRA_CTR=false CONTAINER_MANAGE_NS_LIFECYCLE=false CONTAINER_CONMON_CGROUP="customcrioconmon.slice" start_crio
CONTAINER_CGROUP_MANAGER="systemd" CONTAINER_DROP_INFRA_CTR=false CONTAINER_CONMON_CGROUP="customcrioconmon.slice" start_crio
jq ' .linux.cgroup_parent = "Burstablecriotest123.slice"' \
@@ -77,6 +85,10 @@ EOF
@ -48,20 +48,20 @@ index 04492172..abae521e 100755
if [[ "${DEBUG_ARGS}" == "malformed-result" ]]; then
cat <<-EOF
diff --git a/test/helpers.bash b/test/helpers.bash
diff --git a/test/common.sh b/test/common.sh
index f7f8e1f2..45b7dd58 100644
--- a/test/helpers.bash
+++ b/test/helpers.bash
@@ -38,7 +38,7 @@ CONTAINER_UID_MAPPINGS=${CONTAINER_UID_MAPPINGS:-}
CONTAINER_GID_MAPPINGS=${CONTAINER_GID_MAPPINGS:-}
OVERRIDE_OPTIONS=${OVERRIDE_OPTIONS:-}
# CNI path
-CONTAINER_CNI_PLUGIN_DIR=${CONTAINER_CNI_PLUGIN_DIR:-/opt/cni/bin}
+CONTAINER_CNI_PLUGIN_DIR=${CONTAINER_CNI_PLUGIN_DIR:-/usr/libexec/cni}
--- a/test/common.sh
+++ b/test/common.sh
@@ -41,7 +41,7 @@ # CNI path
if command -v host-local >/dev/null; then
CONTAINER_CNI_PLUGIN_DIR=${CONTAINER_CNI_PLUGIN_DIR:-$(dirname "$(readlink "$(command -v host-local)")")}
else
- CONTAINER_CNI_PLUGIN_DIR=${CONTAINER_CNI_PLUGIN_DIR:-/opt/cni/bin}
+ CONTAINER_CNI_PLUGIN_DIR=${CONTAINER_CNI_PLUGIN_DIR:-/usr/libexec/cni}
fi
# Runtime
CONTAINER_DEFAULT_RUNTIME=${CONTAINER_DEFAULT_RUNTIME:-runc}
RUNTIME_BINARY_PATH=$(command -v "$CONTAINER_DEFAULT_RUNTIME")
@@ -70,7 +70,7 @@ CHECKCRIU_BINARY=${CHECKCRIU_BINARY:-${CRIO_ROOT}/test/checkcriu/checkcriu}
@@ -74,7 +74,7 @@ CHECKCRIU_BINARY=${CHECKCRIU_BINARY:-${CRIO_ROOT}/test/checkcriu/checkcriu}
# The default log directory where all logs will go unless directly specified by the kubelet
DEFAULT_LOG_PATH=${DEFAULT_LOG_PATH:-/var/log/crio/pods}
# Cgroup manager to be used

View File

@ -6,8 +6,8 @@ index 19f8052..135385c 100644
sed -i '/# INCLUDE/q' scripts/get
cat contrib/bundle/install-paths contrib/bundle/install >> scripts/get
-install: .gopathok install.bin install.man install.completions install.systemd install.config
+install: .gopathok install.bin install.man install.completions install.config
-install: install.bin install.man install.completions install.systemd install.config
+install: install.bin install.man install.completions install.config
install.bin-nobuild:
install ${SELINUXOPT} -D -m 755 bin/crio $(BINDIR)/crio

View File

@ -1,11 +1,11 @@
# Contributor: Francesco Colista <fcolista@alpinelinux.org>
# Maintainer: Francesco Colista <fcolista@alpinelinux.org>
pkgname=cri-tools
pkgver=1.26.1
pkgrel=1
pkgver=1.28.0
pkgrel=0
pkgdesc="CLI tool for Kubelet Container Runtime Interface (CRI)"
url="https://github.com/kubernetes-sigs/cri-tools"
arch="x86_64 aarch64 ppc64le s390x armv7 x86"
arch="all !armhf"
license="Apache-2.0"
makedepends="go"
options="!check" # no check available
@ -27,5 +27,5 @@ package() {
}
sha512sums="
1900b5d22a20ab1f01c13832be4dcf1e9845b64afb3cdcb6169752bbb20a6e69dcbb6ccc8d31b9d4bf091bf81aa04b9979544586763ea985499f229e7ab2a39d cri-tools-1.26.1.tar.gz
222d3785dc7e8485538b4745766494be02d359347eb1337c9dd04839e19269d768922ff04f07d1fb72291c3554ecf91b382307253a288c9376079135a625cc0c cri-tools-1.28.0.tar.gz
"

View File

@ -1,7 +1,7 @@
# Contributor: Christian Kampka <christian@kampka.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=docker-registry
pkgver=2.8.2_git20230519
pkgver=2.8.3
pkgrel=0
pkgdesc="An implementation of the Docker Registry HTTP API V2 for use with docker 1.6+"
url="https://github.com/distribution/distribution"
@ -57,7 +57,7 @@ package() {
}
sha512sums="
8ceb8b994085bc6522e8a203785bd670977117988d391023148a4153e3c150ad7c17fb98de863c4c2300714022444dc5141a75a2899b8b0f04cbbdc17794b5c7 docker-registry-2.8.2_git20230519.tar.gz
8ceb8b994085bc6522e8a203785bd670977117988d391023148a4153e3c150ad7c17fb98de863c4c2300714022444dc5141a75a2899b8b0f04cbbdc17794b5c7 docker-registry-2.8.3.tar.gz
96100a4de311afa19d293a3b8a63105e1fcdf49258aa8b1752befd389e6b4a2b1f70711341ea011b450d4468bd37dbd07a393ffab3b9aa1b2213cf0fdd915904 docker-registry.initd
5a38f4d3f0ee5cd00c0a5ced744eb5b29b839da5921adea26c5de3eb88b6b2626a7ba29b1ab931e5f8fbfafbed8c94cb972a58737ec0c0a69cf515c32139e387 config-example.patch
"

View File

@ -1,7 +1,7 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=ecr-credential-provider
pkgver=1.26.1
pkgver=1.28.1
pkgrel=0
pkgdesc="AWS Kubernetes ecr-credential-provider"
url="https://github.com/kubernetes/cloud-provider-aws"
@ -15,7 +15,7 @@ builddir="$srcdir/cloud-provider-aws-$pkgver"
build() {
unset LDFLAGS # the default abuild LDFLAGS break the go linker
make GOFLAGS="-buildmode=pie -v" GOLDFLAGS="-extldflags=-static -w -s" ecr-credential-provider
make VERSION="v$pkgver" GOFLAGS="-buildmode=pie -v" GOLDFLAGS="-extldflags=-static -w -s" ecr-credential-provider
}
package() {
@ -24,5 +24,5 @@ package() {
}
sha512sums="
59ec934a93b94290b0dce830a53301957842d8d45118471bb6eaa142b06dc37ed7f32e4c4a83f1f5341b0dda6745cfa7d8ebbac6d31378e3288857808f2aef71 ecr-credential-provider-1.26.1.tar.gz
b9adc389be9301dc4be36c6bf546f354b9f2895cbad13d28d074dbab77f9aecec8d5fd02590d21c2a4acc91b559371adfe9702898c7880d92aea6657b315a539 ecr-credential-provider-1.28.1.tar.gz
"

View File

@ -0,0 +1,43 @@
From dca56cf4d28bbbb1d3be029ce9a6710cb3f6cd2f Mon Sep 17 00:00:00 2001
From: Laszlo Ersek <lersek@redhat.com>
Date: Thu, 4 Jun 2020 13:34:12 +0200
Subject: BaseTools: do not build BrotliCompress (RH only)
Notes about the RHEL-8.3/20200603-ca407c7246bf [edk2-stable202005] ->
RHEL-8.5/20210520-e1999b264f1f [edk2-stable202105] rebase:
- no change
Notes about the RHEL-8.2/20190904-37eef91017ad [edk2-stable201908] ->
RHEL-8.3/20200603-ca407c7246bf [edk2-stable202005] rebase:
- New patch.
BrotliCompress is not used for building ArmVirtPkg or OvmfPkg platforms.
It depends on one of the upstream Brotli git submodules that we removed
earlier in this rebase series. (See patch "remove upstream edk2's Brotli
submodules (RH only").
Do not attempt to build BrotliCompress.
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
(cherry picked from commit db8ccca337e2c5722c1d408d2541cf653d3371a2)
---
BaseTools/Source/C/GNUmakefile | 1 -
1 file changed, 1 deletion(-)
diff --git a/BaseTools/Source/C/GNUmakefile b/BaseTools/Source/C/GNUmakefile
index 8c191e0c38..3eae824a1c 100644
--- a/BaseTools/Source/C/GNUmakefile
+++ b/BaseTools/Source/C/GNUmakefile
@@ -48,7 +48,6 @@ all: makerootdir subdirs
LIBRARIES = Common
VFRAUTOGEN = VfrCompile/VfrLexer.h
APPLICATIONS = \
- BrotliCompress \
VfrCompile \
EfiRom \
GenFfs \
--
2.27.0

View File

@ -0,0 +1,49 @@
From 9729dd1d6b83961d531e29777d0cc4a610b108be Mon Sep 17 00:00:00 2001
From: Laszlo Ersek <lersek@redhat.com>
Date: Thu, 4 Jun 2020 13:39:08 +0200
Subject: MdeModulePkg: remove package-private Brotli include path (RH only)
Notes about the RHEL-8.3/20200603-ca407c7246bf [edk2-stable202005] ->
RHEL-8.5/20210520-e1999b264f1f [edk2-stable202105] rebase:
- no change
Notes about the RHEL-8.2/20190904-37eef91017ad [edk2-stable201908] ->
RHEL-8.3/20200603-ca407c7246bf [edk2-stable202005] rebase:
- New patch.
Originating from upstream commit 58802e02c41b
("MdeModulePkg/BrotliCustomDecompressLib: Make brotli a submodule",
2020-04-16), "MdeModulePkg/MdeModulePkg.dec" contains a package-internal
include path into a Brotli submodule.
The edk2 build system requires such include paths to resolve successfully,
regardless of the firmware platform being built. Because
BrotliCustomDecompressLib is not consumed by any OvmfPkg or ArmVirtPkg
platforms, and we've removed the submodule earlier in this patch set,
remove the include path too.
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
(cherry picked from commit e05e0de713c4a2b8adb6ff9809611f222bfe50ed)
---
MdeModulePkg/MdeModulePkg.dec | 3 ---
1 file changed, 3 deletions(-)
diff --git a/MdeModulePkg/MdeModulePkg.dec b/MdeModulePkg/MdeModulePkg.dec
index 8d38383915..ba2d0290e7 100644
--- a/MdeModulePkg/MdeModulePkg.dec
+++ b/MdeModulePkg/MdeModulePkg.dec
@@ -24,9 +24,6 @@
[Includes]
Include
-[Includes.Common.Private]
- Library/BrotliCustomDecompressLib/brotli/c/include
-
[LibraryClasses]
## @libraryclass Defines a set of methods to reset whole system.
ResetSystemLib|Include/Library/ResetSystemLib.h
--
2.27.0

178
kubezero/edk2/APKBUILD Normal file
View File

@ -0,0 +1,178 @@
# Contributor: Timo Teräs <timo.teras@iki.fi>
# Maintainer: Natanael Copa <ncopa@alpinelinux.org>
pkgname=edk2
pkgver=0.0.202308
_realver=edk2-stable${pkgver##*.}
_sslver=3.0.9
_sfver=3e
pkgrel=0
pkgdesc="EFI Development Kit II"
url="https://github.com/tianocore/tianocore.github.io/wiki/EDK-II/"
arch="x86_64 aarch64"
license="BSD-2-Clause-Patent"
makedepends="bash python3 iasl nasm util-linux-dev util-linux-misc"
options="!archcheck !check" # has no checks
subpackages="$pkgname-pyc"
_mipisyst_commit=370b5944c046bab043dd8b133727b2135af7747a
source="$pkgname-$pkgver.tar.gz::https://github.com/tianocore/edk2/archive/$_realver.tar.gz
mipisyst-$_mipisyst_commit.tar.gz::https://github.com/MIPI-Alliance/public-mipi-sys-t/archive/$_mipisyst_commit.tar.gz
https://www.openssl.org/source/openssl-$_sslver.tar.gz
http://www.jhauser.us/arithmetic/SoftFloat-$_sfver.zip
build-hack.patch
0008-BaseTools-do-not-build-BrotliCompress-RH-only.patch
0009-MdeModulePkg-remove-package-private-Brotli-include-p.patch
"
builddir="$srcdir/$pkgname-$_realver"
case "$CARCH" in
x86)
TARGET_ARCH=IA32
PLATFORM=OvmfPkg/OvmfPkgIa32X64.dsc
;;
x86_64)
TARGET_ARCH=X64
PLATFORM="OvmfPkg/OvmfPkgX64.dsc OvmfPkg/OvmfXen.dsc OvmfPkg/CloudHv/CloudHvX64.dsc"
subpackages="$subpackages ovmf:_ovmf:noarch ovmf-xen:_xen:noarch cloudhv:_cloudhv:noarch"
;;
aarch64)
TARGET_ARCH=AARCH64
PLATFORM=ArmVirtPkg/ArmVirtQemu.dsc
subpackages="$subpackages aavmf::noarch"
;;
esac
TOOLCHAIN=GCC5
RELEASE=RELEASE
prepare() {
# unix line endings for the files to be patched
sed -e 's/\r$//' -i BaseTools/Source/C/VfrCompile/VfrUtilityLib.cpp \
BaseTools/Source/C/VolInfo/VolInfo.c
rm -rf CryptoPkg/Library/OpensslLib/openssl
ln -s "$srcdir"/openssl-$_sslver CryptoPkg/Library/OpensslLib/openssl
rm -rf ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3
ln -s "$srcdir"/SoftFloat-$_sfver \
ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3
rm -rf MdePkg/Library/MipiSysTLib/mipisyst
ln -s "$srcdir"/public-mipi-sys-t-$_mipisyst_commit \
MdePkg/Library/MipiSysTLib/mipisyst
default_prepare
}
build() {
export PYTHON_COMMAND=python3
export WORKSPACE=$PWD
export PACKAGES_PATH=$PWD
export EDK_TOOLS_PATH=$PWD/BaseTools/
export PATH=$PWD/BaseTools/BinWrappers/PosixLike/:$PATH
# parallel build fails
unset MAKEFLAGS
bash -c ". edksetup.sh"
make -C BaseTools
for _p in $PLATFORM; do
msg "Building Plaform Files $_p"
command build -b $RELEASE \
-a $TARGET_ARCH \
-t $TOOLCHAIN \
-p $_p \
-n ${JOBS:-2} \
-DSECURE_BOOT_ENABLE=TRUE \
-DTPM2_ENABLE=TRUE
done
}
package() {
mkdir -p "$pkgdir"/usr/bin \
"$pkgdir"/usr/share/$pkgname/Conf \
"$pkgdir"/usr/share/$pkgname/Scripts
install BaseTools/Source/C/bin/* BaseTools/BinWrappers/PosixLike/LzmaF86Compress \
"$pkgdir"/usr/bin
install BaseTools/BuildEnv "$pkgdir"/usr/share/$pkgname/
install BaseTools/Conf/*.template "$pkgdir"/usr/share/$pkgname/Conf
install BaseTools/Scripts/GccBase.lds "$pkgdir"/usr/share/$pkgname/Scripts
for i in $(find BaseTools/Source/Python -type d -maxdepth 1); do
local mod=${i##*/}
test -f "$i/$mod.py" || continue
cp -R BaseTools/Source/Python/"$mod" "$pkgdir"/usr/share/edk2/Python/
cat <<- EOF > "$pkgdir"/usr/bin/"$mod".py
#!/bin/sh
export PYTHONPATH=/usr/share/edk2/Python
exec $PYTHON_COMMAND /usr/share/edk2/Python/$mod/$mod.py "\$@"
EOF
chmod +x "$pkgdir"/usr/bin/"$mod".py
done
}
_ovmf() {
pkgdesc="Open Virtual Machine Firmware (OVMF) BIOS"
license="BSD MIT"
for fw in "$builddir"/Build/OvmfX64/"$RELEASE"_"$TOOLCHAIN"/FV/*.fd; do
install -D $fw "$subpkgdir"/usr/share/OVMF/${fw##*/}
done
# dont ship memfd for now to save space
rm -f "$subpkgdir"/usr/share/OVMF/MEMFD.fd
install -d "$subpkgdir"/usr/share/ovmf
ln -sf ../OVMF/OVMF.fd "$subpkgdir"/usr/share/ovmf/bios.bin
}
_xen() {
pkgdesc="Open Virtual Machine Firmware (OVMF) - Xen build"
license="BSD MIT"
install -D "$builddir"/Build/OvmfXen/"$RELEASE"_"$TOOLCHAIN"/FV/OVMF.fd \
"$subpkgdir"/usr/lib/xen/boot/ovmf.bin
}
_cloudhv() {
pkgdesc="EDK2 EFI Firmware - Cloud-Hypervisor build"
license="BSD MIT"
install -D "$builddir"/Build/CloudHvX64/"$RELEASE"_"$TOOLCHAIN"/FV/CLOUDHV.fd \
"$subpkgdir"/usr/share/cloudhv/CLOUDHV.fd
}
aavmf() {
pkgdesc="ARM (aarch64) Virtual Machine Firmware EFI"
license="BSD MIT"
dd if=/dev/zero \
of="$builddir"/Build/ArmVirtQemu-AARCH64/"$RELEASE"_$TOOLCHAIN/FV/AAVMF_CODE.fd \
bs=1M seek=64 count=0
dd if="$builddir"/Build/ArmVirtQemu-AARCH64/"$RELEASE"_$TOOLCHAIN/FV/QEMU_EFI.fd \
of="$builddir"/Build/ArmVirtQemu-AARCH64/"$RELEASE"_$TOOLCHAIN/FV/AAVMF_CODE.fd \
conv=notrunc
dd if=/dev/zero \
of="$builddir"/Build/ArmVirtQemu-AARCH64/"$RELEASE"_$TOOLCHAIN/FV/AAVMF_VARS.fd \
bs=1M seek=64 count=0
for fw in "$builddir"/Build/*/"$RELEASE"_"$TOOLCHAIN"/FV/*.fd; do
install -D $fw "$subpkgdir"/usr/share/AAVMF/${fw##*/}
done
}
pyc() {
default_pyc
local IFS=$'\n'
amove $(find usr/share/edk2/Python -type d -name __pycache__)
}
sha512sums="
668411dc64a4a69afd145221c599fffc3797de26e801dda7d9b7ed92f755ff4fda4635dbc21c821f527e56eb71c4ad98c1fb079112a56d6b6eea5ff4d010e3cf edk2-0.0.202308.tar.gz
de6888577ceab7ab6915d792f3c48248cfa53357ccd310fc7f7eae4d25a932de8c7c23e5b898c9ebf61cf86cb538277273f2eb131a628b3bf0d46c9a3b9b6686 mipisyst-370b5944c046bab043dd8b133727b2135af7747a.tar.gz
86c99146b37236419b110db77dd3ac3992e6bed78c258f0cc3434ca233460b4e17c0ac81d7058547fe9cb72a9fd80ee56d4b4916bb731dbe2bbcf1c3d46bf31a openssl-3.0.9.tar.gz
3fedcd0060affb2d8fc7995894133cfed6a495c8717df0d30c89885223c38749f25743598383736036332dad6353c6a3f027f5a94a696660f7c4b607e33e534c SoftFloat-3e.zip
a7d4ab2c82b62ba01c86e59f53bd3896d661c9bfbb9db9598734155b66d5fe03eca4a2a9993a14d3bf555992c6d01ba5d7a15868ff9ec6ed98b8a9b3895bb7df build-hack.patch
ecbfc1ec3b732580c33c477191b71553247af1a68f1754bd363d179e0f5aabde93e3c5ec7f2574f9a9ffefef34e75787a2a87b1057b02cd206e8f0618a252871 0008-BaseTools-do-not-build-BrotliCompress-RH-only.patch
ecad98ff84ab307bda751c8a9a321e064ef880dc66b4d107e66aedbc4e14d00eed76770437e25fa9153dc30803f5cbbf1299329f56865a3b75d2c19f6615e68b 0009-MdeModulePkg-remove-package-private-Brotli-include-p.patch
"

View File

@ -0,0 +1,13 @@
VfrCompile seg.faults with fortify enabled. It's probably broken.
diff -ru a/edk2-e242cdfb307a6dfe2c0f75c4719f5c1f6b418625/BaseTools/Source/C/VfrCompile/VfrUtilityLib.cpp b/edk2-e242cdfb307a6dfe2c0f75c4719f5c1f6b418625/BaseTools/Source/C/VfrCompile/VfrUtilityLib.cpp
--- edk2-e242cdfb307a6dfe2c0f75c4719f5c1f6b418625/BaseTools/Source/C/VfrCompile/VfrUtilityLib.cpp 2016-11-16 10:01:14.000000000 +0200
+++ edk2-e242cdfb307a6dfe2c0f75c4719f5c1f6b418625/BaseTools/Source/C/VfrCompile/VfrUtilityLib.cpp 2016-11-16 14:47:30.211978076 +0200
@@ -13,6 +13,7 @@
**/
+#define _FORTIFY_SOURCE 0
#include "stdio.h"
#include "stdlib.h"
#include "CommonLib.h"

View File

@ -22,5 +22,5 @@ package() {
}
sha512sums="
97abd4e5a0078112a048037512b041bcefb9e660131403e9c87bf5fc8b632eb17ab66d20a477a2ef4808f54ae29941d74bd61390143e5781058d7bbd4333dd78 etcdhelper-0.1.0.tar.gz
d1f3d239899a2392d11c45ea49b3bfc18255c00933e677f02eab1f0f59a940722fb40de1842a8a4253aabf066508be028443adb8920e82673342ba50130556ca etcdhelper-0.1.0.tar.gz
"

View File

@ -1,13 +1,16 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=falco-kernel
pkgver=0.35.1
_flavor=lts
_extra_flavors=virt
pkgver=0.37.1
pkgrel=0
pkgname=falco-kernel-$_flavor
pkgdesc="Falco kernel module"
url="https://github.com/falcosecurity/falco"
arch="x86_64 aarch64"
license="AGPL-3.0"
makedepends="cmake linux-virt-dev linux-headers"
makedepends="cmake linux-$_flavor-dev linux-headers"
# protobuf-dev jq-dev openssl-dev curl-dev c-ares-dev grpc-dev yaml-dev yaml-cpp-dev jsoncpp-dev re2-dev"
# perl autoconf elfutils-dev libtool argp-standalone musl-fts-dev musl-libintl musl-obstack-dev"
options="!check"
@ -17,34 +20,52 @@ source="
"
builddir="$srcdir/falco-$pkgver"
prepare() {
[[ -d build ]] || mkdir build
}
for f in $_extra_flavors; do
makedepends="$makedepends linux-$f-dev"
subpackages="$subpackages falco-kernel-$f:_extra"
done
build() {
# Hack running the build inside a container other uname -r returns host kernel
KERNEL_VERSION=$(basename $(ls -d /lib/modules/*-virt))
for flavor in $_flavor $_extra_flavors; do
mkdir -p $srcdir/falco-$pkgver/build-$flavor
cd build
cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-DFALCO_VERSION=$pkgver \
-DCMAKE_INSTALL_PREFIX=/usr \
-DUSE_BUNDLED_DEPS=On \
-DMUSL_OPTIMIZED_BUILD=On
# Hack running the build inside a container other uname -r returns host kernel
KERNEL_VERSION=$(basename $(ls -d /lib/modules/*-"$flavor"))
KERNELDIR=/lib/modules/$KERNEL_VERSION/build make driver
cd $srcdir/falco-$pkgver/build-$flavor
cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-DFALCO_VERSION=$pkgver \
-DCMAKE_INSTALL_PREFIX=/usr \
-DUSE_BUNDLED_DEPS=On \
-DMUSL_OPTIMIZED_BUILD=On
KERNELDIR=/lib/modules/$KERNEL_VERSION/build make driver
done
}
_package() {
local flavor=$1
local _out=$2
KERNEL_VERSION=$(basename $(ls -d /lib/modules/*-"$flavor"))
depends="linux-$flavor~$(echo $KERNEL_VERSION | sed -e 's/-.*$//')"
cd $srcdir/falco-$pkgver/build-$flavor
mkdir -p "$_out"/lib/modules/$KERNEL_VERSION/kernel
gzip -9 -c driver/falco.ko > "$_out"/lib/modules/$KERNEL_VERSION/kernel/falco.ko.gz
}
package() {
KERNEL_VERSION=$(basename $(ls -d /lib/modules/*-virt))
depends="linux-virt~$(echo $KERNEL_VERSION | sed -e 's/-.*$//')"
_package $_flavor $pkgdir
}
cd $srcdir/falco-$pkgver/build
mkdir -p "$pkgdir"/lib/modules/$KERNEL_VERSION/kernel
gzip -9 -c driver/falco.ko > "$pkgdir"/lib/modules/$KERNEL_VERSION/kernel/falco.ko.gz
_extra() {
flavor=${subpkgname##*-}
_package $flavor $subpkgdir
}
sha512sums="
dc648d9b0a625a02320ff0235bbf4f4940e7ba40c684a8a1f972d34f0a3447b4a34e665d7fbc0ee1ec9a014f65f81a304dc76b4ec804fc7b4e448f330b9474af falco-0.35.1.tar.gz
257d526c4d3eadbe2c79852221fdb8076f94e421aa66753628770ae7384137b4672064cbe1ba0a4d88d14e8a7d08e2521d5bd82a312c4b1442d8ea6fbbbb2f28 falco-0.37.1.tar.gz
"

View File

@ -1,17 +1,26 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=falco
pkgver=0.35.1
pkgver=0.37.1
pkgrel=0
pkgdesc="Falco is the open source solution for runtime security for hosts, containers, Kubernetes and the cloud"
url="https://github.com/falcosecurity/falco"
arch="x86_64 aarch64"
license="AGPL-3.0"
# These deps are for BUNLDE_DEPS=On
makedepends="cmake linux-headers bash perl autoconf elfutils-dev libtool argp-standalone musl-fts-dev musl-libintl musl-obstack-dev
protobuf-dev jq-dev openssl-dev curl-dev c-ares-dev grpc-dev yaml-dev yaml-cpp-dev jsoncpp-dev re2-dev"
makedepends="cmake linux-headers bash perl autoconf elfutils-dev libtool argp-standalone
musl-fts-dev
musl-libintl
musl-legacy-error
musl-obstack-dev "
# protobuf-dev
# c-ares-dev
# openssl-dev
# curl-dev
# grpc-dev
# yaml-cpp-dev
# "
options="!check"
depends="falco-kernel~$pkgver"
#depends="falco-kernel~$pkgver"
# Original config
# https://raw.githubusercontent.com/falcosecurity/rules/main/rules/falco_rules.yaml
@ -19,29 +28,33 @@ depends="falco-kernel~$pkgver"
source="
$pkgname-$pkgver.tar.gz::https://github.com/falcosecurity/falco/archive/refs/tags/$pkgver.tar.gz
alpine.patch
falco.patch
rules.patch
"
prepare() {
[[ -d build ]] || mkdir build
# Disable static binaries
patch -i $srcdir/alpine.patch
}
build() {
cd build
cmake .. \
cmake \
-DCPACK_GENERATOR=TGZ \
-DCMAKE_BUILD_TYPE=Release \
-DFALCO_VERSION=$pkgver \
-DCMAKE_INSTALL_PREFIX=/usr \
-DUSE_BUNDLED_DEPS=Off \
-DBUILD_DRIVER=Off \
-DFALCO_ETC_DIR=/etc/falco \
-DUSE_BUNDLED_DEPS=On \
-DMINIMAL_BUILD=On \
-DUSE_DYNAMIC_LIBELF=Off \
-DMUSL_OPTIMIZED_BUILD=On \
-DBUILD_DRIVER=Off \
-DBUILD_BPF=Off \
-DBUILD_LIBSCAP_MODERN_BPF=Off \
..
make falco falcoctl
make falco || bash
}
package() {
@ -50,8 +63,8 @@ package() {
# patch falco config
cd $pkgdir/etc/falco
patch -i $srcdir/falco.patch
patch -i $srcdir/rules.patch
patch --no-backup-if-mismatch -i $srcdir/falco.patch
patch --no-backup-if-mismatch -i $srcdir/rules.patch
# We dont build anything on targets so remove sources
rm -rf $pkgdir/usr/src
@ -60,8 +73,7 @@ package() {
}
sha512sums="
dc648d9b0a625a02320ff0235bbf4f4940e7ba40c684a8a1f972d34f0a3447b4a34e665d7fbc0ee1ec9a014f65f81a304dc76b4ec804fc7b4e448f330b9474af falco-0.35.1.tar.gz
257d526c4d3eadbe2c79852221fdb8076f94e421aa66753628770ae7384137b4672064cbe1ba0a4d88d14e8a7d08e2521d5bd82a312c4b1442d8ea6fbbbb2f28 falco-0.37.1.tar.gz
b152fcf6cd81895efa37797ab7ff1aac7350b5f51f2648aa9e3cce9d5ece55791ddf82c396e9da216293e2379a785a294cc972f28a91162dc5bc88ab09e1ab08 falco.patch
d8f71ca7c6d854a866826b3f2f5630b6f30448f794c4c5a56a9ea656ee03c3645a1cf7663b5e79d3ea63d4fab8bd44f91a80b1752c8239c8310efa08b495f2e2 rules.patch
a067c340269b50354d8eff5cdcf1d60799819b8c20f2f4615af71746eb430d7db320062aa033b75822d4cb3fc2bc73f69d3a0b3ddaba5610155b630e28af6105 alpine.patch
487b8b64d2399fd7b706be29e3722983bcdfde3ab5cf0f78b2e9fe1055a4ad958976f591e739491e25a06d7cdf6894c1e153e892a87b83c7a962e23c9a104528 rules.patch
"

View File

@ -5,7 +5,7 @@
if(MUSL_OPTIMIZED_BUILD)
- set(MUSL_FLAGS "-static -Os -fPIE -pie")
+ set(MUSL_FLAGS "-Os -fPIE -pie")
+ set(MUSL_FLAGS "-fPIE -pie")
add_definitions(-DMUSL_OPTIMIZED)
endif()

File diff suppressed because it is too large Load Diff

View File

@ -1,60 +1,29 @@
--- falco_rules.yaml 2023-07-05 11:42:09.732973942 +0000
+++ zdt_falco_rules.yaml 2023-07-05 13:30:14.184038126 +0000
@@ -270,7 +270,7 @@
--- falco_rules.yaml 2023-11-07 16:26:40.171716913 +0000
+++ zdt_falco_rules.yaml 2023-11-07 16:30:24.912804117 +0000
@@ -171,7 +171,7 @@
# A canonical set of processes that run other programs with different
# privileges or as a different user.
- list: userexec_binaries
- items: [sudo, su, suexec, critical-stack, dzdo]
+ items: [doas, sudo, su, suexec, critical-stack, dzdo]
- list: known_setuid_binaries
items: [
@@ -2298,27 +2298,28 @@
- macro: user_known_non_sudo_setuid_conditions
condition: user.name=root
- list: user_mgmt_binaries
items: [login_binaries, passwd_binaries, shadowutils_binaries]
@@ -200,7 +200,7 @@
]
+# Disabled for now due to buysbox noise
# sshd, mail programs attempt to setuid to root even when running as non-root. Excluding here to avoid meaningless FPs
-- rule: Non sudo setuid
- desc: >
- an attempt to change users by calling setuid. sudo/su are excluded. users "root" and "nobody"
- suing to itself are also excluded, as setuid calls typically involve dropping privileges.
- condition: >
- evt.type=setuid and evt.dir=>
- and (known_user_in_container or not container)
- and not (user.name=root or user.uid=0)
- and not somebody_becoming_themselves
- and not proc.name in (known_setuid_binaries, userexec_binaries, mail_binaries, docker_binaries,
- nomachine_binaries)
- and not proc.name startswith "runc:"
- and not java_running_sdjagent
- and not nrpe_becoming_nagios
- and not user_known_non_sudo_setuid_conditions
- output: >
- Unexpected setuid call by non-sudo, non-root program (user=%user.name user_loginuid=%user.loginuid cur_uid=%user.uid parent=%proc.pname
- command=%proc.cmdline pid=%proc.pid uid=%evt.arg.uid container_id=%container.id image=%container.image.repository)
- priority: NOTICE
- tags: [host, container, users, mitre_privilege_escalation, T1548.001]
+#- rule: Non sudo setuid
+# desc: >
+# an attempt to change users by calling setuid. sudo/su are excluded. users "root" and "nobody"
+# suing to itself are also excluded, as setuid calls typically involve dropping privileges.
+# condition: >
+# evt.type=setuid and evt.dir=>
+# and (known_user_in_container or not container)
+# and not (user.name=root or user.uid=0)
+# and not somebody_becoming_themselves
+# and not proc.name in (known_setuid_binaries, userexec_binaries, mail_binaries, docker_binaries,
+# nomachine_binaries)
+# and not proc.name startswith "runc:"
+# and not java_running_sdjagent
+# and not nrpe_becoming_nagios
+# and not user_known_non_sudo_setuid_conditions
+# output: >
+# Unexpected setuid call by non-sudo, non-root program (user=%user.name user_loginuid=%user.loginuid cur_uid=%user.uid parent=%proc.pname
+# command=%proc.cmdline pid=%proc.pid uid=%evt.arg.uid container_id=%container.id image=%container.image.repository)
+# priority: NOTICE
+# tags: [host, container, users, mitre_privilege_escalation, T1548.001]
- list: sensitive_file_names
- items: [/etc/shadow, /etc/sudoers, /etc/pam.conf, /etc/security/pwquality.conf]
+ items: [/etc/shadow, /etc/doas.d/doas.conf, /etc/sudoers, /etc/pam.conf, /etc/security/pwquality.conf]
- macro: user_known_user_management_activities
condition: (never_true)
- list: sensitive_directory_names
items: [/, /etc, /etc/, /root, /root/]
@@ -208,7 +208,7 @@
- macro: sensitive_files
condition: >
((fd.name startswith /etc and fd.name in (sensitive_file_names)) or
- fd.directory in (/etc/sudoers.d, /etc/pam.d))
+ fd.directory in (/etc/sudoers.d, /etc/pam.d, /etc/doas.d))
# Indicates that the process is new. Currently detected using time
# since process was started, using a threshold of 5 seconds.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,37 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=falcoctl
pkgver=0.7.3
pkgrel=0
pkgdesc="The official CLI tool for working with Falco and its ecosystem components."
url="https://github.com/falcosecurity/falcoctl"
arch="x86_64 aarch64"
license="AGPL-3.0"
makedepends="bash go"
options="!check"
source="
$pkgname-$pkgver.tar.gz::https://github.com/falcosecurity/falcoctl/archive/refs/tags/v$pkgver.tar.gz
"
export GOCACHE="${GOCACHE:-"$srcdir/go-cache"}"
export GOTMPDIR="${GOTMPDIR:-"$srcdir"}"
export GOMODCACHE="${GOMODCACHE:-"$srcdir/go"}"
export GOBIN="$GOPATH/bin"
build() {
make GOFLAGS="-buildmode=pie -v" GOLDFLAGS="-extldflags=-static -w -s" falcoctl
# cleanup 444 files
go clean -modcache
}
package() {
mkdir -p "$pkgdir/usr/bin"
install -Dm755 falcoctl "$pkgdir/usr/bin/falcoctl"
}
sha512sums="
61e539322c91125569c432ea1fc98c84b928795089829a062e6b5c74c7d1223cd71e557b7a8972ba7c6d1b534d1b87da254ee01e12c14038ced5a8f85a22a623 falcoctl-0.7.3.tar.gz
"

View File

@ -1,7 +1,7 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=fluent-bit
pkgver=2.1.10
pkgver=2.2.2
pkgrel=0
pkgdesc="Fast and Lightweight Log processor and forwarder"
url="https://fluentbit.io/"
@ -101,9 +101,9 @@ package() {
}
sha512sums="
55caefa81cdeaf293b727829383c6eaa75bc2f8b8c61ebe15e1478c66033921fde6e50c39fc8c39a7d2d93d03892f709daf4d1b6caacf586133de5268de10299 fluent-bit-2.1.10.tar.gz
681c1db0256d0b50d986194597b700f790726a1394b3ad92c92a26c95d04bf2b65203e94ef2aeb0f0b3403870748ec0ebbec2cd49548857fbadc5c745581452f fluent-bit-2.2.2.tar.gz
f6431397c80a036980b5377b51e38aec25dfceeb8dbe4cd54dce1f6e77d669d9f8daf983fcc96d25332385888f1809ced5e8ab0e8ccfcd93d19494036e3dc949 fluent-bit.confd
e17bad6abd597da620fdb930e3f18612a828dd956abf87ce850e2660b83db4d9ab7d373ab3a9bf1d07f605b5077998234ce4774007c0197cfbfdad465ca6b47a fluent-bit.initd
8ba6c8e84dee90176f9b4375fb2c6444fa5d32fa601d9bcf3ea7960fec87f1ef664f175caf08bd0b052843e971efdbf08e2a5cd180ad9a8f23ff2c5cb233814f fluent-bit.initd
6bd7d8b4da93a17f29b6ea1e0286ea226d0e376024284741110936779b3229bd8d6cd03ffbdc5d3b4842294e7f32a888de0dd16b0851b65d91b062ca58530ea0 chunkio-static-lib-fts.patch
e3308a8377fb8ba496415b7a31e9e022e5aa9965d27a0c33ea5166a29049b72cb364bbcdf9d8611ef3407b0968f9bd4adff12cdb39728bbebd382710e5bc75d0 exclude-luajit.patch
d61f30344af997f126486fa5b34cd3fbfe88bfc9aea394a8c60d0206f4db8db998eadf637a3a581b89512411c1e7980c414e236e455d5e2b889d20a556ee6577 xsi-strerror.patch

View File

@ -7,5 +7,5 @@ command_args="$fluentbit_opts"
depend() {
need net
after firewall cloudbender
after firewall
}

View File

@ -15,7 +15,7 @@ triggers="$pkgname-bin.trigger=/lib:/usr/lib:/usr/glibc-compat/lib:/lib64"
options="!check lib64"
package() {
conflicts="libc6-compat"
conflicts="gcompat"
mkdir -p "$pkgdir/lib" "$pkgdir/lib64" "$pkgdir/usr/glibc-compat/lib/locale" "$pkgdir"/usr/glibc-compat/lib64 "$pkgdir"/etc
cp -a "$srcdir"/usr "$pkgdir"
cp "$srcdir"/ld.so.conf "$pkgdir"/usr/glibc-compat/etc/ld.so.conf

View File

@ -5,12 +5,11 @@
# Contributor: Dave <dj.2dixx@gmail.com>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=kubernetes
pkgver=1.26.8
pkgver=1.28.9
pkgrel=0
pkgdesc="Container Cluster Manager"
url="https://kubernetes.io/"
# ppc64le: failed to build
arch="x86_64 aarch64 armv7 x86"
arch="all !armhf !riscv64"
license="Apache-2.0"
options="!check chmod-clean net" # Tests hang
@ -72,18 +71,16 @@ _services="kube-apiserver kube-controller-manager kube-proxy kube-scheduler"
export GOCACHE="${GOCACHE:-"$srcdir/go-cache"}"
export GOTMPDIR="${GOTMPDIR:-"$srcdir"}"
export GOMODCACHE="${GOMODCACHE:-"$srcdir/go"}"
export FORCE_HOST_GO="y"
build() {
hack/update-codegen.sh
for _pkgs in $_agent $_cli $_services ; do
make -j1 GOFLAGS="-buildmode=pie -v -tags=providerless" GOLDFLAGS="-extldflags=-static" WHAT=cmd/$_pkgs
make -j1 GOFLAGS="$GOFLAGS -buildmode=pie -v -tags=providerless" GOLDFLAGS="-extldflags=-static" WHAT=cmd/$_pkgs
done
}
package() {
for bin in $_agent $_cli $_services; do
install -Dm755 _output/local/bin/linux/*/$bin "$pkgdir"/usr/bin/$bin
install -Dm755 _output/local/go/bin/$bin "$pkgdir"/usr/bin/$bin
done
mkdir -p "$pkgdir"/etc/kubernetes
}
@ -208,7 +205,7 @@ _do_zshcomp() {
}
sha512sums="
38649d4c8a85e236a8ceffe5bba5146cf1a4eb9191534707dd39443303f99d830e95dc4e9be0febfb2a8bd4d0b57f13b5cb883b51fea57306f1f2ceff2052d69 kubernetes-1.26.8.tar.gz
cb10da770f8bb035c98b2c02b9ff202194ae69983d7c4d5052b03f5f5522e57f70a88105039265e1892039c566cfd7d043fcb44ad958823be0f5bee352f864a0 kubernetes-1.28.9.tar.gz
5427c2e653504cfd5b0bcaf195d4734ee40947ddfebc9f155cd96dddccfc27692c29d94af4ac99f1018925b52995c593b584c5d7a82df2f185ebce1a9e463c40 make-e2e_node-run-over-distro-bins.patch
94d07edfe7ca52b12e85dd9e29f4c9edcd144abc8d120fb71e2a0507f064afd4bac5dde30da7673a35bdd842b79a4770a03a1f3946bfae361c01dd4dc4903c64 make-test-cmd-run-over-hyperkube-based-kubectl.patch
e690daff2adb1013c92124f32e71f8ed9a18c611ae6ae5fcb5ce9674768dbf9d911a05d7e4028488cda886e63b82e8ac0606d14389a05844c1b5538a33dd09d1 kube-apiserver.initd
@ -223,7 +220,7 @@ d7e022ee22da191bda7382f87cb293d9c9d115a3df0c2054bf918279eb866f99c6d5c21e4c98eae8
561bef5633ba4b9021720624443d9c279a561e5fabea76e5d0fbee2e7ad8999029a2511a45895fbec8448026212a3c5b4c197b248a6afa7f8bd945f705524ea7 kube-scheduler.initd
af88b382ab75657d0ff13c3f8f6d924cef9f2df7807a9a27daa63495981801bc4b607998f65c0758c11a7e070e43c24f7184ba7720711109c74b1c4d57919e34 kube-scheduler.confd
3692da349dd6ed0f5acc09d7b95ac562ffecb103e2270bebdfe4a7808d48dada9d2debff262d85b11c47f9ca3f0c20000712d03629ed813ff08a3e02d69267e6 kube-scheduler.logrotated
7cb03bde52820c3ce8b10df1a16cf0b46b39d185e01b4d312400f70bba5875992ec71166539d3820cf59ddbabeb48dec7ae8185820646fae3f851c4cd144fe69 kubelet.initd
372cdf2fbb24a229ed7b3450b54197c006928cb8d2fd756f2713e1e6961849c7aaa35b20b14fb75d1a12ef1e35258048738aa22b5f9783af8fa0a31dfd1b5bbd kubelet.initd
44eb973de8ee8e0c5a77d76ab0e105fe0ae892be1ff86c238a5449b43f83cab6f844575b6c3218f08c5ff077e9f828f5aef72425c1d77546cce2e0136e8a8da8 kubelet.confd
941f4a7579dcf78da2d323ac69195e95eba6600e6fcefe9231447f11c9867a7aa57b4189ee1fefb10eab19c89665ea2e7696b539c92e99fbcde905d2ff85be58 kubelet.logrotated
"

View File

@ -24,5 +24,6 @@ pidfile="${KUBELET_PIDFILE:-/run/${RC_SVCNAME}.pid}"
depend() {
after net cloudbender
need cgroups crio
need cgroups
want containerd crio
}

View File

@ -1,21 +1,24 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=kubezero
pkgver=1.26
pkgver=1.28.9
_crio=1.28.4
_ecr=1.28.1
pkgrel=0
pkgdesc="KubeZero release package"
url="https://git.zero-downtime.net/ZeroDownTime/alpine-overlay/src/branch/master/kubezero/kubezero"
arch="noarch"
arch="x86_64"
license="AGPL-3.0"
depends="
podman
xz
cri-tools
cri-o~$pkgver
cri-o~$_crio
kubelet~$pkgver
kubectl~$pkgver
ecr-credential-provider~$pkgver
aws-iam-authenticator~0.6.10
ecr-credential-provider~$_ecr
aws-iam-authenticator~0.6.14
"
options="!check"
#install="$pkgname.post-install"
@ -24,20 +27,26 @@ subpackages="
$pkgname-imagecache
"
IMAGES="
quay.io/cilium/cilium:v1.15.3
ghcr.io/k8snetworkplumbingwg/multus-cni:v3.9.3
"
#multus_version="4.0.2"
source="
shared-sys-fs.start
evictLocalNode.sh
credential-provider.yaml
kubelet.monit
crio.monit
crio.conf
"
#multus-"$multus_version".tar.gz::https://github.com/k8snetworkplumbingwg/multus-cni/releases/download/v"$multus_version"/multus-cni_"$multus_version"_linux_amd64.tar.gz
IMAGES="
quay.io/cilium/cilium:v1.13.5
ghcr.io/k8snetworkplumbingwg/multus-cni:v3.9.3
"
# get multus and cilium binaries and drop them in /usr/libexec/cni
build() {
# pre loaded images
for i in $IMAGES; do
IMAGE_NAME=$(echo $i | sed -e 's/.*\///' -e 's/:.*//')
podman --storage-driver vfs pull $i
@ -52,10 +61,15 @@ package() {
mkdir -p $pkgdir/etc/kubernetes/manifests
install -Dm644 "$srcdir"/credential-provider.yaml "$pkgdir/etc/kubernetes/credential-provider.yaml"
install -Dm644 "$srcdir"/kubelet.monit "$pkgdir/etc/monit.d/kubelet.conf"
# crio settings
install -Dm644 "$srcdir"/crio.conf "$pkgdir/etc/crio/crio.conf.d/01-kubezero.conf"
# monit
install -Dm644 "$srcdir"/kubelet.monit "$pkgdir/etc/monit.d/kubelet.conf"
install -Dm644 "$srcdir"/crio.monit "$pkgdir/etc/monit.d/crio.conf"
# multus
#install -Dm755 "$srcdir"/multus-cni_"$multus_version"_linux_amd64/multus $pkgdir/usr/libexec/cni/multus
}
# Preload container images all nodes need to speed up boot time and reduce data transfer
@ -71,7 +85,8 @@ imagecache() {
sha512sums="
ecb33fc3a0ffc378723624858002f9f5e180e851b55b98ab6611ecc6a73d4719bc7de240f87683fc58de8bf577059e6f19b417655b5301ef8c32deff67a29dff shared-sys-fs.start
fce1013f7b1bfa8ee526de62e642a37fda3168889723e873d3fb69e257f4caa1423b5a14b9343b12a87f3b6f93c7d3861b854efda67ef2d6a42a5ca8cf3d1593 evictLocalNode.sh
716ec3404d7016bce57d663f750a18db3ede07c1ba7a2908f9f01f41c5ca8fe4e7232ded27bc2bccd705b11ae5cd26574322a8eacefcf8c102bba0f8e4995e59 credential-provider.yaml
92499ec9a8b3634c42b16c01d27f1c1bb650bcc074a2c8d9d16cfe2ea08942948989c6aae79bd2df562ff17df11bbc329e0971f15c4e64f944457825dee7aa79 credential-provider.yaml
8b81eb0fb66e6a739965db6af6a31c443e8f612c06146bd51107372abd833b527423299ee11b27e011f46cfbee11415234b3fa0dea695dbbb06711e0ad58f08d kubelet.monit
e801df9ede6065395db75154735ca9368882d4225452a33f2b54b98cd0c4f3ceb730762d8745c6aea350a3a50a1df0c79ab46f422f94e9a40e621528e9d82055 crio.monit
064fc245b7ffd67834a2f5fd13cb0bcb5f4a5caf79b8113b3669bf1d0e1a4af2042e69f8f496991de76d621fd01bc7e67de37c59f034584d12622c6af96376ff crio.conf
"

View File

@ -1,4 +1,4 @@
apiVersion: kubelet.config.k8s.io/v1alpha1
apiVersion: kubelet.config.k8s.io/v1
kind: CredentialProviderConfig
providers:
- name: ecr-credential-provider
@ -9,4 +9,4 @@ providers:
- "*.dkr.ecr.us-iso-east-1.c2s.ic.gov"
- "*.dkr.ecr.us-isob-east-1.sc2s.sgov.gov"
defaultCacheDuration: "12h"
apiVersion: credentialprovider.kubelet.k8s.io/v1alpha1
apiVersion: credentialprovider.kubelet.k8s.io/v1

View File

@ -0,0 +1,4 @@
check process crio pidfile /run/crio.pid
start program = "/sbin/rc-service crio start"
stop program = "/sbin/rc-service crio stop"
restart program = "/sbin/rc-service crio restart"

View File

@ -0,0 +1,64 @@
--- a/execinfo.c.orig
+++ b/execinfo.c
@@ -69,7 +69,8 @@
char **
backtrace_symbols(void *const *buffer, int size)
{
- int i, clen, alen, offset;
+ size_t clen, alen;
+ int i, offset;
char **rval;
char *cp;
Dl_info info;
@@ -78,7 +79,6 @@
rval = malloc(clen);
if (rval == NULL)
return NULL;
- (char **)cp = &(rval[size]);
for (i = 0; i < size; i++) {
if (dladdr(buffer[i], &info) != 0) {
if (info.dli_sname == NULL)
@@ -92,14 +92,14 @@
2 + /* " <" */
strlen(info.dli_sname) + /* "function" */
1 + /* "+" */
- D10(offset) + /* "offset */
+ 10 + /* "offset */
5 + /* "> at " */
strlen(info.dli_fname) + /* "filename" */
1; /* "\0" */
rval = realloc_safe(rval, clen + alen);
if (rval == NULL)
return NULL;
- snprintf(cp, alen, "%p <%s+%d> at %s",
+ snprintf((char *) rval + clen, alen, "%p <%s+%d> at %s",
buffer[i], info.dli_sname, offset, info.dli_fname);
} else {
alen = 2 + /* "0x" */
@@ -108,12 +108,15 @@
rval = realloc_safe(rval, clen + alen);
if (rval == NULL)
return NULL;
- snprintf(cp, alen, "%p", buffer[i]);
+ snprintf((char *) rval + clen, alen, "%p", buffer[i]);
}
- rval[i] = cp;
- cp += alen;
+ rval[i] = (char *) clen;
+ clen += alen;
}
+ for (i = 0; i < size; i++)
+ rval[i] += (long) rval;
+
return rval;
}
@@ -155,6 +158,6 @@
return;
snprintf(buf, len, "%p\n", buffer[i]);
}
- write(fd, buf, len - 1);
+ write(fd, buf, strlen(buf));
}
}

View File

@ -0,0 +1,24 @@
--- a/execinfo.c.orig
+++ b/execinfo.c
@@ -26,6 +26,7 @@
* $Id: execinfo.c,v 1.3 2004/07/19 05:21:09 sobomax Exp $
*/
+#define _GNU_SOURCE
#include <sys/types.h>
#include <sys/uio.h>
#include <dlfcn.h>
--- a/stacktraverse.c.orig
+++ b/stacktraverse.c
@@ -1,3 +1,4 @@
+#define _GNU_SOURCE
#include <stddef.h>
#include "stacktraverse.h"
--- a/test.c.orig
+++ b/test.c
@@ -1,3 +1,4 @@
+#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>

View File

@ -0,0 +1,44 @@
--- a/Makefile.orig
+++ b/Makefile
@@ -23,24 +23,25 @@
# SUCH DAMAGE.
#
# $Id: Makefile,v 1.3 2004/07/19 05:19:55 sobomax Exp $
+#
+# Linux Makefile by Matt Smith <mcs@darkregion.net>, 2011/01/04
-LIB= execinfo
+CC=cc
+AR=ar
+EXECINFO_CFLAGS=$(CFLAGS) -O2 -pipe -fno-strict-aliasing -std=gnu99 -fstack-protector -c
+EXECINFO_LDFLAGS=$(LDFLAGS)
-SRCS= stacktraverse.c stacktraverse.h execinfo.c execinfo.h
+all: static dynamic
-INCS= execinfo.h
+static:
+ $(CC) $(EXECINFO_CFLAGS) $(EXECINFO_LDFLAGS) stacktraverse.c
+ $(CC) $(EXECINFO_CFLAGS) $(EXECINFO_LDFLAGS) execinfo.c
+ $(AR) rcs libexecinfo.a stacktraverse.o execinfo.o
-SHLIB_MAJOR= 1
-SHLIB_MINOR= 0
+dynamic:
+ $(CC) -fpic -DPIC $(EXECINFO_CFLAGS) $(EXECINFO_LDFLAGS) stacktraverse.c -o stacktraverse.So
+ $(CC) -fpic -DPIC $(EXECINFO_CFLAGS) $(EXECINFO_LDFLAGS) execinfo.c -o execinfo.So
+ $(CC) -shared -Wl,-soname,libexecinfo.so.1 -o libexecinfo.so.1 stacktraverse.So execinfo.So
-NOPROFILE= yes
-
-DPADD= ${LIBM}
-LDADD= -lm
-
-#WARNS?= 4
-
-#stacktraverse.c: gen.py
-# ./gen.py > stacktraverse.c
-
-.include <bsd.lib.mk>
+clean:
+ rm -rf *.o *.So *.a *.so

View File

@ -0,0 +1,50 @@
# Contributor: Philipp Andronov <filipp.andronov@gmail.com>
# Maintainer: Matt Smith <mcs@darkregion.net>
pkgname=libexecinfo
pkgver=1.1
pkgrel=1
pkgdesc="A quick-n-dirty BSD licensed clone of the GNU libc backtrace facility."
options="!check" # No testsuite
url="https://www.freshports.org/devel/libexecinfo"
arch="all"
license="BSD-2-Clause"
subpackages="$pkgname-static ${pkgname}-dev"
source="http://distcache.freebsd.org/local-distfiles/itetcu/$pkgname-$pkgver.tar.bz2
10-execinfo.patch
20-define-gnu-source.patch
30-linux-makefile.patch
"
build() {
cd "$builddir"
export CFLAGS="${CFLAGS} -fno-omit-frame-pointer"
make
}
package() {
cd "$builddir"
install -D -m755 "$builddir"/execinfo.h \
"$pkgdir"/usr/include/execinfo.h
install -D -m755 "$builddir"/stacktraverse.h \
"$pkgdir"/usr/include/stacktraverse.h
install -D -m755 "$builddir"/libexecinfo.a \
"$pkgdir"/usr/lib/libexecinfo.a
install -D -m755 "$builddir"/libexecinfo.so.1 \
"$pkgdir"/usr/lib/libexecinfo.so.1
ln -s /usr/lib/libexecinfo.so.1 \
"$pkgdir"/usr/lib/libexecinfo.so
}
static() {
depends=""
pkgdesc="$pkgdesc (static library)"
mkdir -p "$subpkgdir"/usr/lib
mv "$pkgdir"/usr/lib/*.a "$subpkgdir"/usr/lib
}
sha512sums="51fea7910ef6873061a25c22434ce4da724e9d8e37616a069ad0a58c0463755be4c6c7da88cd747484c2f3373909d7be4678b32a4bd91b6d9e0f74526094e92c libexecinfo-1.1.tar.bz2
cd35c9046d88b39b05bc36faffb1e71ae3b2140632da7da37f374fff671d4ad812eebd0581011ff9e3b25d3cb4bc962cf35957074f713817b3b617511425af1a 10-execinfo.patch
c961b2f86cba291e8c69a507e3880354ad7369fd04c8948d54c4db0578fe30cca8f4250742cb63d1ab6e5875988f04c4729256197030369404f0e925f299a66c 20-define-gnu-source.patch
13d3df88a6dabd78ee2cf50092511f5a10f0e5ff3d81dbacb182fcf85ceb0c13a5f0252397b4eb0ac57f8d8bd3fc3af6c05865d6398cbc1517f347210c5750da 30-linux-makefile.patch"

View File

@ -1,7 +1,8 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
# https://github.com/NVIDIA/nvidia-container-toolkit
pkgname=nvidia-container-toolkit
pkgver=1.13.5
pkgver=1.15.0
pkgrel=0
pkgdesc="NVIDIA Container toolkit incl. cri hooks"
url="https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/overview.html"
@ -12,17 +13,17 @@ depends="glibc-bin nvidia-drivers"
options="!check !tracedeps"
_nv_ver="$pkgver"-1
_libcap=2.25-2
_libcap=2.44-1
_libseccomp=2.3.3-4
source="https://nvidia.github.io/libnvidia-container/stable/debian10/amd64/libnvidia-container1_"$_nv_ver"_amd64.deb
https://nvidia.github.io/libnvidia-container/stable/debian10/amd64/libnvidia-container-tools_"$_nv_ver"_amd64.deb
https://nvidia.github.io/libnvidia-container/stable/debian10/amd64/nvidia-container-toolkit_"$_nv_ver"_amd64.deb
http://deb.debian.org/debian/pool/main/libc/libcap2/libcap2_"$_libcap"_amd64.deb
http://deb.debian.org/debian/pool/main/libs/libseccomp/libseccomp2_"$_libseccomp"_amd64.deb
config.toml
oci-nvidia-hook.json
"
source="https://nvidia.github.io/libnvidia-container/stable/deb/amd64/libnvidia-container1_"$_nv_ver"_amd64.deb
https://nvidia.github.io/libnvidia-container/stable/deb/amd64/libnvidia-container-tools_"$_nv_ver"_amd64.deb
https://nvidia.github.io/libnvidia-container/stable/deb/amd64/nvidia-container-toolkit_"$_nv_ver"_amd64.deb
http://deb.debian.org/debian/pool/main/libs/libseccomp/libseccomp2_"$_libseccomp"_amd64.deb
http://deb.debian.org/debian/pool/main/libc/libcap2/libcap2_"$_libcap"_amd64.deb
config.toml
oci-nvidia-hook.json
"
build() {
return 0
@ -51,7 +52,6 @@ package() {
ar -x "$srcdir"/libcap2_"$_libcap"_amd64.deb && tar xfJ data.tar.xz
mv lib/x86_64-linux-gnu/libcap.so.* "$pkgdir"/usr/glibc-compat/lib
rm -rf control.tar.xz data.tar.xz debian-binary usr
# libseccomp
ar -x "$srcdir"/libseccomp2_"$_libseccomp"_amd64.deb && tar xfJ data.tar.xz
mv usr/lib/x86_64-linux-gnu/libseccomp.so.* "$pkgdir"/usr/glibc-compat/lib
@ -68,11 +68,11 @@ package() {
}
sha512sums="
903155c63c7af83dbd431ba3e5bc0d8ca74cce38996bf944b80520b5838f9765bbc0cbe201122d8ccc21cbd01dd4c4e47d2b451bdab7fadc99a8d75b941fda67 libnvidia-container1_1.13.5-1_amd64.deb
2d4cbbdd80db2730b1ed9db8d4b36c5212ce5361350dcdfbc5795dac887136cecd40c13843e61350bad12b103cd1550030c76de35a2cbbca2a6df3850b6b68ca libnvidia-container-tools_1.13.5-1_amd64.deb
8614c2b436dab3886df6a2328b3753c27704dd3a78f0abe5c333c57fb4ee8deebb6fc03051931b3794bf152d947b721c160acf6614e5145b39bb7162d1ef45d8 nvidia-container-toolkit_1.13.5-1_amd64.deb
694a3ec64ef3056d5874ff03b889b868c294bccb16506468fdf1c289fe3aaadc2da25a5934de653af9633a5d993d2bb21491d84b3b2e2529e6b31d92c78a2228 libcap2_2.25-2_amd64.deb
36adc14f49b3827ba5b86fdf75b2eb91fd2b2621e9be3a02c2f7f94b2c30e47b9f9d7482ae4f788cee804b12b359e4dc597878171f6e68f7287c18b3d3dfdf8d libnvidia-container1_1.15.0-1_amd64.deb
686a642649104379710056740dd0e9004a20809729231d88557c85844fe83ea5b279fe6ac49e890bcc8727b050c285f4d1b2cba584b12be1158c5b7af48d27a3 libnvidia-container-tools_1.15.0-1_amd64.deb
f8507ca4d5b4678c6afaa271b5930f856d33d2ab92ed70fbd2a5047eb6fe7635f55758047117119f8656270d96667ddb154bb95074d58a34ad37ffb7832ce951 nvidia-container-toolkit_1.15.0-1_amd64.deb
5a4eaa96e6e774948889909d618a8ed44a82f649cbba11622dc7b4478098bea006995d5a5a60ca026a57b76ad866d1e2c6caebd154a26eb6bd7e15291b558057 libseccomp2_2.3.3-4_amd64.deb
cc9109cdcf51dc40db732e10ac3eda7e4ac73299ad51d2ec619d7f4cff3f0311be0937530d2175e5486c393bc9e91c709072094fad510573785739afaad831f1 libcap2_2.44-1_amd64.deb
040ac2e3f58549dc09e5bce0d694e4be2f6aae736014bf0ee90042646562d5f1ef1f5990eb9f2c2a2fdf504587b82f4aa0eb99d04c5d3e407670e4012e3edd4e config.toml
0f150ea59b2372bf3ef60e657142b19f46500d1c70cb179d37ce117d6b03e86427dbf356873affb7639e082a07f852a922ae3aea4a8f8885640e43675c4e4add oci-nvidia-hook.json
"

View File

@ -0,0 +1 @@
libpsx.so.2.66

View File

@ -1,8 +1,7 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=nvidia-drivers
#pkgver=535.54.03
pkgver=525.125.06
pkgver=550.76
pkgrel=0
pkgdesc="NVIDIA Driver"
url="https://www.nvidia.com/download/index.aspx"
@ -56,5 +55,5 @@ package() {
}
sha512sums="
a5f13b633d111d9dc928e8522cd916a2b756fccbf2dc532649762a3f9bdc5503bd57c9c698da8205c49e82720b45789413a1afc26be77d741f823b49ae2f333d NVIDIA-Linux-x86_64-525.125.06.run
a3804501b220d4acbda9633b92c4515bb14d0b5233f3ffd5e173290d310efdb1ed9a9602f727c117c1d0746d596c1125c51cc3e1fde65c79905e60e1d35f50ec NVIDIA-Linux-x86_64-550.76.run
"

View File

@ -1,14 +1,19 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
# Issues:
# - https://github.com/NVIDIA/open-gpu-kernel-modules/issues/468
# https://github.com/NVIDIA/open-gpu-kernel-modules/pull/609/files
# remove coreutils from makedepends
pkgname=nvidia-open-gpu
#pkgver=535.86.05
pkgver=525.125.06
pkgver=550.76
pkgrel=0
pkgdesc="NVIDIA Linux open GPU kernel modules"
url="https://github.com/NVIDIA/open-gpu-kernel-modules"
arch="x86_64"
license="MIT OR GPL-2.0"
makedepends="bash linux-headers linux-virt-dev"
makedepends="bash linux-headers linux-virt-dev coreutils"
options="!check"
source="nvidia-$pkgver.tar.gz::https://github.com/NVIDIA/open-gpu-kernel-modules/archive/refs/tags/$pkgver.tar.gz
@ -21,6 +26,9 @@ build() {
# Hack running the build inside a container other uname -r returns host kernel
KERNEL_VERSION=$(basename $(ls -d /lib/modules/*-virt))
unset CFLAGS CPPFLAGS CXXFLAGS
unset LDFLAGS
make KERNEL_UNAME=$KERNEL_VERSION
}
@ -36,7 +44,7 @@ package() {
mkdir -p "$pkgdir"/lib/modules/$KERNEL_VERSION/kernel
for m in $modules; do
gzip -9 -c kernel-open/$m > "$pkgdir"/lib/modules/$KERNEL_VERSION/kernel/$m.gz
gzip -9 -c kernel-open/$m > "$pkgdir"/lib/modules/$KERNEL_VERSION/kernel/$m.gz
done
# Add some udev rules to automate node handling
@ -45,7 +53,7 @@ package() {
}
sha512sums="
4cedcf56e87c93354bc56d168de64b30866cf0b8fba2d2861ac60893b43f8140fa29626c4825af8250c420f9228fd1b64c93750cc50dd210040b4e7c4927e90a nvidia-525.125.06.tar.gz
5126d3b8e3f0635b5b044db4faf0d483e70bb43418bbd21325bb175aaca948e19bd81038fbef9118a95387da65ff0ff3d1592fc54c0d6815a2448b32024468ac nvidia-550.76.tar.gz
b16b86ded8601ff802477e2b191c5728290014f90bb85ad6ec0e5b7e84f8004c467f5b6c66b80dc5d205fb70a3900ac286764a3829ca3ad3b8a3a5fd0b73a702 91-nvidia.rules
8335bd69c482da1f67b5cddd31a0b40d01b5c627aeca137b40ac7776cb3e7475767bec808a972ed739c26914207aca264324c41496f6fb579d910c8477f7cc1c create-nvidia-uvm-dev-node.sh
"

View File

@ -1,7 +1,7 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=zdt-base
pkgver=0.3.18
pkgver=0.3.19
pkgrel=0
pkgdesc="ZeroDownTime Alpine additions and customizations"
url="https://git.zero-downtime.net/ZeroDownTime/alpine-overlay/src/branch/master/kubezero/zdt-base"
@ -9,7 +9,7 @@ arch="noarch"
license="AGPL-3.0"
depends="logrotate syslog-ng neofetch monit file tiny-cloud dhcpcd"
options="!check"
subpackages="$pkgname-openrc $pkgname-aws"
subpackages="$pkgname-openrc $pkgname-aws $pkgname-nocloud"
install="$pkgname.post-install"
source="
@ -17,11 +17,15 @@ source="
boot.sh
cloudbender-early.init
cloudbender.init
cloud-aws.sh
cloud-nocloud.sh
zdt-sysctl.conf
https://raw.githubusercontent.com/pixelb/ps_mem/v3.14/ps_mem.py
syslog-ng.conf
syslog-ng.logrotate.conf
syslog-ng.apparmor
cloudbender.stop
cloudbender.start
dhcpcd-mtu.hook
monitrc
monit_alert.sh.aws
@ -31,6 +35,7 @@ source="
route53.py
get_iam_sshkeys.py
uniq_hostname.py
write_parameters.py
"
build() {
@ -55,9 +60,14 @@ package() {
# early init script to eg. mount var, cannot use any network !
install -Dm755 "$srcdir/cloudbender-early.init" "$pkgdir/etc/init.d/cloudbender-early"
# various tasks during boot
# various tasks during first boot
install -Dm755 "$srcdir/cloudbender.init" "$pkgdir/etc/init.d/cloudbender"
# local boot & shutdown
install -Dm755 "$srcdir/cloudbender.start" "$pkgdir/etc/local.d/cloudbender.start"
install -Dm755 "$srcdir/cloudbender.stop" "$pkgdir/etc/local.d/cloudbender.stop"
# syslog-ng configs, json all into messages
install -Dm644 "$srcdir"/syslog-ng.conf "$pkgdir"/lib/zdt/syslog-ng.conf
install -Dm644 "$srcdir"/syslog-ng.logrotate.conf "$pkgdir"/lib/zdt/syslog-ng.logrotate.conf
@ -78,33 +88,50 @@ package() {
}
aws() {
# Basic AWS tools
mkdir -p "$subpkgdir"
# aws libs
install -Dm755 "$srcdir/cloud-aws.sh" "$pkgdir/usr/lib/cloudbender/cloud/aws.sh"
# other tools
install -Dm755 "$srcdir"/route53.py "$subpkgdir"/usr/sbin/route53.py
install -Dm755 "$srcdir"/uniq_hostname.py "$subpkgdir"/usr/sbin/uniq_hostname.py
install -Dm755 "$srcdir"/get_iam_sshkeys.py "$subpkgdir"/usr/sbin/get_iam_sshkeys.py
install -Dm755 "$srcdir"/write_parameters.py "$subpkgdir"/usr/sbin/write_parameters.py
# Cloudbender SNS integration
install -Dm755 "$srcdir"/monit_alert.sh.aws "$pkgdir"/usr/bin/monit_alert.sh
}
nocloud() {
mkdir -p "$subpkgdir"
# nocloud libs
install -Dm755 "$srcdir/cloud-nocloud.sh" "$pkgdir/usr/lib/cloudbender/cloud/nocloud.sh"
}
sha512sums="
a870cc7657757770fb573a0fb5df61887d1b9d2a6a57b3ee8be93a7dfb34df6a1d489cd5572ab273dfe896b97faad7e7479571f993a3e13cfefe24c4720bcbf4 common.sh
7f6a69a77d6a4a3c34928609108b7939cd43a892d72fb14bebc1d935cd66eda3bd625d15eebb4d6026715b36b12919fcaf863ed5f65ffdc0e2de9fc1b969cb3e boot.sh
ee19dcc0b46bdff8581c2661cda69fd8a3fa2de4dd30d96a4ce438b2536043a9f0bc57a6b0d4056e2715a2663a89bc1b07ec33798d5430a2046a65069a327cda cloudbender-early.init
df610d896c6b2821925df8d65ab44a0008b31e5b738172076234ae7645e8ef7e25d710c43f9b3999fb3f0303ccd81b57327c2e7694e1fc3f790abdbc77e0a097 cloudbender.init
b9479835d8667fa99f8b1b140f969f0464a9bb3c60c7d19b57e306cfe82357d453932791e446caded71fddd379161ae8328367f1ee75ae3afc1b85e12294b621 zdt-sysctl.conf
c1808572d074e1a91e0efc3c31462f6035159338843e51fbccca5102b2923506ce60ba9e1ef00b2fbb134da7a33f55af364e1bff15c272eb7f4ebc6035f33887 common.sh
cf8b75a81bb35e853761d21b15b5b109f15350c54daaf66d2912541a20f758c3ca237d58932e5608d2d3867fe15a07ebd694fd1c313a8290d15afc2b27a575dd boot.sh
eb7d5b6f92f500dbaba04a915cdd8d66e90456ca86bed86b3a9243f0c25577a9aa42c2ba28c3cad9dda6e6f2d14363411d78eff35656c7c60a6a8646f43dcba5 cloudbender-early.init
cac71c605324ad8e60b72f54b8c39ee0924205fcd1f072af9df92b0e8216bcde887ffec677eb2f0eacce3df430f31d5b5609e997d85f14389ee099fbde3c478f cloudbender.init
482438e6d443777636fd8f8f7b3d887c5664243d9547a47a755cbb3f56fac3a145be34e9ef6ce622bf0dcb28f5dda1a53c8448f8dbfb632210cc52a3b786b18c cloud-aws.sh
3a84b728d4169b92356f1da52922c6110efd5bdc2df90b64abe59f89a5de57cc85a81936bdead0cae5071c1ba1735bda1bd866018b5c3f7fd4ef155d0606ac2d cloud-nocloud.sh
06102e56c847637f705d0b29b05b07fbbb2bda9ba69f0a7fe1d716126d3b1c7922fb0df159199809908fa0dc143209775edb1dd5976faa84244dbcaa45f00364 zdt-sysctl.conf
76e6a4f309f31bfa07de2d3b1faebe5670722752e18157b69d6e868cbe9e85eda393aed0728b0347a01a810eee442844c78259f86ff71e3136a013f4cbfaaea4 ps_mem.py
5376f4bf8356ce9249c45e78085073245181e8742c7b4be47c71dcd97a611ae125a7dfd3060502bdd591560af070334f89fe60dbc09c008926149c538ab0560a syslog-ng.conf
b86dec8c059642309b2f583191457b7fac7264b75dc5f4a06ad641de6b76589c0571b8b72b51519516ba7e68a128fe2da29b4a2a6dc77c252204675c51b2d128 syslog-ng.conf
484bdcf001b71ce5feed26935db437c613c059790b99f3f5a3e788b129f3e22ba096843585309993446a88c0ab5d60fd0fa530ef3cfb6de1fd34ffc828172329 syslog-ng.logrotate.conf
e86eed7dd2f4507b04050b869927b471e8de26bc7d97e7064850478323380a0580a92de302509901ea531d6e3fa79afcbf24997ef13cd0496bb3ee719ad674ee syslog-ng.apparmor
cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e cloudbender.stop
b93cec571afe5128ab4d7c3998b3dc48753897f37169a111f606a48d1982e6ffce52a4ac9568a6a062f621148fb652049b84926a40a62d89be3786e6836261e6 cloudbender.start
f8c052c7ec12c71937c7b8bc05d8374c588f345e303b30eda9c8612dff8f8f34a87a433648a3e9b85b278196ece198533b29680a303ff6478171d43f8e095189 dhcpcd-mtu.hook
e00a8f296c76446fe1241bf804c0108f47a2676f377a413ee9fede0943362a6582cad30fe13edd93f3d0daab0e2d7696553fb9458dca62adc05572dce339021a monitrc
c955dabe692c0a4a2fa2b09ab9096f6b14e83064b34ae8d22697096daf6551f00b590d837787d66ea1d0030a7cc30bef583cc4c936c980465663e73aec5fa2dc monit_alert.sh.aws
346b0170ada6cc1207ffb7b8ef138a1570a63c7df4d57618aa4b6b6c0d2df2197b0f5b23578ec83c641ee5e724865ac06985222e125809c990467426a0851b72 neofetch.conf
2c02a1d454881dd7197548286c6cf24c1453dd9d726f3e5445703c12414853b0e12205e5b6a0c3ae09b76097d2bdfcfd6e1bc9a122dd9f66c6d6d03ab41f748a neofetch.conf
532b8e2eb04942ab20bdc36b5dea1c60239fcbfcb85706123f3e05c18d65c938b85e9072d964ae5793177625a8db47b532db1f5bd5ed5ecbb70d5a331666ff54 zdt-ascii.txt
c3e72cd92936b03f2b9eab5e97e9a12fcddcdf2c943342e42e7702e2d2407e00859c62dc9b4de3378688d2f05458aa5c104272af7ab13e53a62f1676d1a9a1b4 profile
816049360aa442f9e9aa4d6525795913cfe3dc7c6c14dc4ccad59c0880500f9d42f198edc442fe036bc84ba2690d9c5bc8ae622341d8276b3f14947db6b879b1 route53.py
7da28446762a36a6737c5b30becbce78775bd943b4d0c5ef938a50f49b4f51f66708434aa79004c19d16c56c83f54c8d6d68e1502ebc250c73f8aae12bed83c0 get_iam_sshkeys.py
ae1941fc45e61fa8d211f5ef7eff2dd01510a6d364c4302cab267812321a10e7434ecc8d8c9263d8671ce5604d04d6531601bf42886a55fb6aec7f321651e1dc uniq_hostname.py
ee4264337d86ad99ba6cf9ec3017986c804ac208c0beb5fc8651345bd277bb6de03e7c3a8c1b751767647be48f9d45ac47a7d14cf040d9c827780984394e826d write_parameters.py
"

View File

@ -26,7 +26,7 @@ setup_var() {
case "$CLOUD" in
aws)
# on AWS look for sdx/xvdx
# on AWS look for sdx/xvdx
if [ "$d" = "/dev/sdx" -o "$d" = "/dev/xvdx" ]; then
# check volume for existing filesystem
type=$(file -Lbs $d)
@ -43,6 +43,10 @@ setup_var() {
log -i -t early info "mounted $d at /var"
fi
;;
nocloud)
# Todo: should we try to mount a special tagged block device as /var ?
return 0
;;
*)
ewarn "Unsupported cloud: $CLOUD"
return 1

View File

@ -0,0 +1,205 @@
#!/bin/bash
# Todo: This should go into a yaml file
query_imds() {
MAC=$(imds meta-data/mac)
AVAILABILITY_ZONE=$(imds meta-data/placement/availability-zone)
REGION=$(echo ${AVAILABILITY_ZONE} | sed "s/[a-z]$//")
INSTANCE_ID=$(imds meta-data/instance-id)
cat <<EOF >> /var/lib/cloud/meta-data
AVAILABILITY_ZONE=$AVAILABILITY_ZONE
REGION=$REGION
INSTANCE_ID=$INSTANCE_ID
IP_ADDRESS=$(imds meta-data/local-ipv4)
PUBLIC_IP_ADDRESS=$(imds meta-data/public-ipv4 || true)
DEFAULT_GW_INTERFACE=$(ip -o route get 8.8.8.8 | awk '{print $5}')
MAC=$MAC
VPC_CIDR_RANGE=$(imds meta-data/network/interfaces/macs/${MAC}/vpc-ipv4-cidr-block)
SUBNET=$(imds meta-data/network/interfaces/macs/${MAC}/subnet-ipv4-cidr-block)
_META_HOSTNAME=$(imds meta-data/hostname)
DOMAIN_NAME=\${_META_HOSTNAME#*.}
AWS_ACCOUNT_ID=$(imds meta-data/network/interfaces/macs/${MAC}/owner-id)
INSTANCE_LIFE_CYCLE=$(imds meta-data/instance-life-cycle)
INSTANCE_TYPE=$(imds meta-data/instance-type)
EOF
}
# Todo: This should go into a yaml file
get_tags() {
# via metadata AWS restricts tags to NOT have " " or "/" ;-(
# Replace all /:.- with _ for valid variable names
for key in $(imds meta-data/tags/instance); do
value="$(imds meta-data/tags/instance/$key)"
key=$(echo ${key//[\/:.-]/_} | tr '[:lower:]' '[:upper:]')
echo "$key=\"$value\"" >> /var/lib/cloud/meta-data
done
#while read _key value; do
# key=$(echo ${_key//[\/:.-]/_} | tr '[:lower:]' '[:upper:]')
# echo "$key=\"$value\"" >> /var/lib/cloud/meta-data
#done < <(aws ec2 describe-tags --filters "Name=resource-id,Values=${INSTANCE_ID}" --query 'Tags[*].[Key,Value]' --region $REGION --output text)
}
# extract user-data args and cloud meta-data into /var/lib/cloud/meta-data
get_meta_data() {
if [ ! -f /var/lib/cloud/meta-data ]; then
echo '#!/bin/bash' > /var/lib/cloud/meta-data
query_imds
get_tags
fi
if [ ! -f /etc/cloudbender.conf ]; then
bash /var/lib/cloud/user-data extract_parameters
fi
}
import_meta_data() {
. /etc/cloudbender.conf
. /var/lib/cloud/meta-data
export AWS_DEFAULT_REGION=$REGION
export AWS_DEFAULT_OUTPUT=text
# Enabled LaunchHooks if not DEBUG
is_enabled $ZDT_CLOUDBENDER_DEBUG || LAUNCH_HOOK="CloudBenderLaunchHook"
# Workaround for current CFN ASG_<parameter> hack
_key=$(echo $AWS_CLOUDFORMATION_LOGICAL_ID | tr '[:lower:]' '[:upper:]')
[ -n "$(eval echo \$${_key}_CUSTOMHOSTNAME)" ] && CUSTOMHOSTNAME="$(eval echo \$${_key}_CUSTOMHOSTNAME)"
[ -n "$(eval echo \$${_key}_VOLUMES)" ] && VOLUMES="$(eval echo \$${_key}_VOLUMES)"
return 0
}
# various early volume functions
attach_ebs() {
local volId="$1"
local device="$2"
local tries=30
while true; do
_json="$(aws ec2 describe-volumes --volume-ids $volId --region $REGION --output json)"
rc=$?; [ $rc -ne 0 ] && return $rc
vol_status=$(echo "$_json" | jq -r .Volumes[].State)
attachedId=$(echo "$_json" | jq -r .Volumes[].Attachments[].InstanceId)
[ "$attachedId" = "$INSTANCE_ID" ] && break
if [ "$vol_status" = "available" ]; then
aws ec2 attach-volume --volume-id "$volId" --instance-id "$INSTANCE_ID" --region "$REGION" --device "$device" > /dev/null
rc=$?; [ $rc -ne 0 ] && return $rc
break
fi
# if attached but not to us -> detach
if [ "$vol_status" = "in-use" ]; then
aws ec2 detach-volume --volume-id "$volId" --region "$REGION" --force
rc=$?; [ $rc -ne 0 ] && return $rc
fi
((tries=tries-1))
[ $tries -eq 0 ] && return 1
sleep 5
done
}
asg_heartbeat() {
[ -n "$LAUNCH_HOOK" ] && aws autoscaling record-lifecycle-action-heartbeat --instance-id $INSTANCE_ID --lifecycle-hook-name $LAUNCH_HOOK --auto-scaling-group-name $AWS_AUTOSCALING_GROUPNAME || true
}
setup_sns_alarms() {
# store SNS message json template
cat <<EOF > /var/lib/cloud/sns_alarm.json
{
"Source": "CloudBender",
"AWSAccountId": "$AWS_ACCOUNT_ID",
"Region": "$REGION",
"Artifact": "$ARTIFACT",
"Asg": "$AWS_AUTOSCALING_GROUPNAME",
"Instance": "$INSTANCE_ID",
"ip": "$IP_ADDRESS"
}
EOF
cat <<'EOF' > /var/lib/cloud/sns_alarm.sh
#!/bin/bash
SUBJECT=$1
MSG=$2
LEVEL=${3:-Info}
ATTACHMENT=${4:-""}
EMOJI=${5:-""}
EOF
if [ -n "$ALARMSNSARN" ]; then
cat <<EOF >> /var/lib/cloud/sns_alarm.sh
jq -M --arg subject "\$SUBJECT" --arg level "\$LEVEL" --arg msg "\$MSG" --arg attachment "\$ATTACHMENT" --arg emoji "\$EMOJI" --arg hostname "\$HOSTNAME" '.Subject = \$subject | .Level = \$level | .Message = \$msg | .Attachment = \$attachment | .Emoji = \$emoji | .Hostname = \$hostname' < /var/lib/cloud/sns_alarm.json | sed -e 's/\\\\\\\\/\\\\/g' > /tmp/sns.json
aws sns publish --region ${REGION} --target-arn $ALARMSNSARN --message file:///tmp/sns.json
EOF
fi
chmod +x /var/lib/cloud/sns_alarm.sh
}
# associate EIP
# return 0 if we attached an EIP
# return 1 if we the public IP did NOT change or other error
associate_eip() {
local instance_id=$1
local eip=$(echo $2 | sed -e 's/\/32//' | grep -E -o "(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)") || true
local current_instance
if [ -n "$eip" ]; then
if [ "$eip" != "0.0.0.0" ]; then
read eip_alloc_id eip_assoc_id current_instance < <(aws ec2 describe-addresses --public-ips $eip --query 'Addresses[*].[AllocationId,AssociationId,InstanceId]' || true)
# If we already own and have the EIP attached -> done
[ "$instance_id" == "$current_instance" ] && return
if [ ! -z "$eip_alloc_id" ]; then
if [[ "$eip_assoc_id" =~ ^eipassoc- ]]; then
log -t user-data info "EIP $eip already associated via Association ID ${eip_assoc_id}. Disassociating."
retry 3 10 aws ec2 disassociate-address --association-id $eip_assoc_id
fi
log -t user-data info "Associating Elastic IP $eip via Allocation ID $eip_alloc_id with Instance $instance_id"
aws ec2 associate-address --no-allow-reassociation --instance-id $instance_id --allocation-id $eip_alloc_id
return
else
log -t user-data warn "Elastic IP $eip address not found."
fi
else
log -t user-data info "0.0.0.0 requested, keeping AWS assigned IP."
fi
else
log -t user-data debug "Invalid or no ElasticIP defined. Skip"
fi
return 1
}
# Accept incoming traffic for everything
disable_source_dest_check() {
aws ec2 modify-instance-attribute --instance-id ${INSTANCE_ID} --source-dest-check "{\"Value\": false}"
}
# Register ourself at route tables
register_routes() {
local rtb_id_list="$1"
local route_cidr="$2"
for cidr in ${route_cidr//,/ }; do
if [ "$cidr" != "$VPC_CIDR_RANGE" ]; then
for rt in ${rtb_id_list//,/ }; do
if [[ "$rt" =~ ^rtb-[a-f0-9]*$ ]]; then
aws ec2 create-route --route-table-id $rt --destination-cidr-block "${cidr}" --instance-id ${INSTANCE_ID} || \
aws ec2 replace-route --route-table-id $rt --destination-cidr-block "${cidr}" --instance-id ${INSTANCE_ID}
else
log -t user-data warn "Invalid Route Table ID: $rt"
fi
done
fi
done
}

View File

@ -0,0 +1,9 @@
#!/bin/bash
get_meta_data() {
SSHPORT=$(imds meta-data/cloudbender/sshPort)
}
import_meta_data() {
echo Noop
}

View File

@ -1,7 +1,8 @@
#!/sbin/openrc-run
# vim:set ts=8 noet ft=sh:
description="CloudBender early tasks (no network / metadata available yet)"
# no network / metadata available yet
description="CloudBender early tasks"
depend() {
need fsck root

View File

@ -2,7 +2,7 @@
# vim:set ts=8 noet ft=sh:
#
description="CloudBender - setup meta_data, mount additional volumes, send shutdown messages"
description="CloudBender - main phase"
depend() {
need net
@ -13,6 +13,8 @@ depend() {
start() {
source /usr/lib/cloudbender/common.sh
ebegin "CloudBender"
get_meta_data
import_meta_data
@ -25,15 +27,18 @@ start() {
# add optional ssh keys, eg. via IAM for AWS
configure_sshd
set_hostname $CUSTOMHOSTNAME
if [ "$CLOUD" == "aws" ]; then
set_hostname $CUSTOMHOSTNAME
# if fixed hostname use persistent sshd keys
[ -n "$CUSTOMHOSTNAME" ] && persistent_sshd_hostkeys "/_ssh/${ARTIFACT}/${CONGLOMERATE}/${HOSTNAME}"
# if fixed hostname use persistent sshd keys
[ -n "$CUSTOMHOSTNAME" ] && persistent_sshd_hostkeys "/_ssh/${ARTIFACT}/${CONGLOMERATE}/${HOSTNAME}"
associate_eip $INSTANCE_ID $ELASTICIP && PUBLIC_IP_ADDRESS=$ELASTICIP
associate_eip $INSTANCE_ID $ELASTICIP && PUBLIC_IP_ADDRESS=$ELASTICIP
fi
register_service_dns
is_enabled $PROMETHEUS_ENABLED && setup_prometheus $PROMETHEUS_ALLOW
is_enabled $LOGGING_ENABLED && setup_fluentbit $LOGGING_HOST
# cleanup previous reboot logs
@ -50,7 +55,7 @@ stop() {
unmount_volumes "$VOLUMES"
[ -n "$DEBUG" ] && [ -r /tmp/shutdown.log ] && SHUTDOWNLOG="$(cat /tmp/shutdown.log)"
is_enabled $ZDT_CLOUDBENDER_DEBUG && [ -r /tmp/shutdown.log ] && SHUTDOWNLOG="$(cat /tmp/shutdown.log)"
[ -n "$RC_REBOOT" ] && ACTION="rebooting" || ACTION="terminated"
[ -z "$DISABLE_SCALING_EVENTS" ] && /var/lib/cloud/sns_alarm.sh "Instance $ACTION" "" Info "$SHUTDOWNLOG"

View File

@ -0,0 +1,10 @@
# mounts are shared to run containers later, eg. cilium, falco
# should be handled in openrc, see: https://github.com/OpenRC/openrc/pull/526/files
mount --make-rshared /
# Enable THP incl. defrag but very conservatively
# see: https://go.dev/doc/gc-guide#Linux_transparent_huge_pages
echo "madvise" > /sys/kernel/mm/transparent_hugepage/enabled
echo "defer+madvise" > /sys/kernel/mm/transparent_hugepage/defrag
echo "0" > /sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_none

View File

View File

@ -1,90 +1,18 @@
# We built on top of tiny-cloud
. /lib/tiny-cloud/common
. /usr/lib/cloudbender/cloud/"$CLOUD".sh
IMDS_ENDPOINT="169.254.169.254"
. /lib/tiny-cloud/cloud/"$CLOUD"/imds
# boolean flags
is_enabled() {
local flag=$(echo "$1" | tr '[:upper:]' '[:lower:]')
_imds() {
wget --quiet --timeout 1 --output-document - \
--header "$(_imds_header)" \
"http://$IMDS_ENDPOINT/$IMDS_URI/$1$IMDS_QUERY"
[ "$flag" == 1 -o "$flag" == "true" ] && return 0
[ "$flag" == 0 -o "$flag" == "false" -o "$flag" == "none" -o -z "$flag" ] && return 1
log -t user-data warn "Unknown value for boolean option: $flag - assuming False"
return 1
}
# Todo: This should go into a yaml file
query_imds() {
MAC=$(_imds meta-data/mac)
AVAILABILITY_ZONE=$(_imds meta-data/placement/availability-zone)
REGION=$(echo ${AVAILABILITY_ZONE} | sed "s/[a-z]$//")
INSTANCE_ID=$(_imds meta-data/instance-id)
cat <<EOF >> /var/lib/cloud/meta-data
AVAILABILITY_ZONE=$AVAILABILITY_ZONE
REGION=$REGION
INSTANCE_ID=$INSTANCE_ID
IP_ADDRESS=$(_imds meta-data/local-ipv4)
PUBLIC_IP_ADDRESS=$(_imds meta-data/public-ipv4 || true)
DEFAULT_GW_INTERFACE=$(ip -o route get 8.8.8.8 | awk '{print $5}')
MAC=$MAC
VPC_CIDR_RANGE=$(_imds meta-data/network/interfaces/macs/${MAC}/vpc-ipv4-cidr-block)
SUBNET=$(_imds meta-data/network/interfaces/macs/${MAC}/subnet-ipv4-cidr-block)
_META_HOSTNAME=$(_imds meta-data/hostname)
DOMAIN_NAME=\${_META_HOSTNAME#*.}
AWS_ACCOUNT_ID=$(_imds meta-data/network/interfaces/macs/${MAC}/owner-id)
INSTANCE_LIFE_CYCLE=$(_imds meta-data/instance-life-cycle)
INSTANCE_TYPE=$(_imds meta-data/instance-type)
EOF
}
# Todo: This should go into a yaml file
get_tags() {
# via metadata AWS restricts tags to NOT have " " or "/" ;-(
# Replace all /:.- with _ for valid variable names
for key in $(_imds meta-data/tags/instance); do
value="$(_imds meta-data/tags/instance/$key)"
key=$(echo ${key//[\/:.-]/_} | tr '[:lower:]' '[:upper:]')
echo "$key=\"$value\"" >> /var/lib/cloud/meta-data
done
#while read _key value; do
# key=$(echo ${_key//[\/:.-]/_} | tr '[:lower:]' '[:upper:]')
# echo "$key=\"$value\"" >> /var/lib/cloud/meta-data
#done < <(aws ec2 describe-tags --filters "Name=resource-id,Values=${INSTANCE_ID}" --query 'Tags[*].[Key,Value]' --region $REGION --output text)
}
# extract user-data args and cloud meta-data into /var/lib/cloud/meta-data
get_meta_data() {
if [ ! -f /var/lib/cloud/meta-data ]; then
echo '#!/bin/bash' > /var/lib/cloud/meta-data
query_imds
get_tags
fi
if [ ! -f /etc/cloudbender.conf ]; then
bash /var/lib/cloud/user-data extract_parameters
fi
}
import_meta_data() {
. /etc/cloudbender.conf
. /var/lib/cloud/meta-data
export AWS_DEFAULT_REGION=$REGION
export AWS_DEFAULT_OUTPUT=text
# some basic logic
if [ "$DEBUG" == "None" -o "$DEBUG" == "False" ]; then
unset DEBUG
LAUNCH_HOOK="CloudBenderLaunchHook"
fi
# Workaround for current CFN ASG_<parameter> hack
_key=$(echo $AWS_CLOUDFORMATION_LOGICAL_ID | tr '[:lower:]' '[:upper:]')
[ -n "$(eval echo \$${_key}_CUSTOMHOSTNAME)" ] && CUSTOMHOSTNAME="$(eval echo \$${_key}_CUSTOMHOSTNAME)"
[ -n "$(eval echo \$${_key}_VOLUMES)" ] && VOLUMES="$(eval echo \$${_key}_VOLUMES)"
return 0
}
# setup_instance, various OS tweaks impossible to do via AMI baking
setup_instance() {
@ -92,24 +20,19 @@ setup_instance() {
[ -f /etc/machine-id ] || uuidgen > /etc/machine-id
# add and mount bpf file system
add_once /etc/fstab "bpffs /sys/fs/bpf bpf rw,nosuid,nodev,noexec,relatime,mode=700 0 0"
add_once /etc/fstab "bpffs /sys/fs/bpf bpf rw,nosuid,nodev,noexec,relatime,mode=700 0 0"
mount -a
# Ensure certain mounts are shared to run containers later, eg. cilium, falco
mount --make-shared /sys/fs/cgroup
mount --make-shared /sys/fs/bpf
mount --make-shared /sys
add_once /etc/hosts "${IP_ADDRESS} ${_META_HOSTNAME} ${HOSTNAME}"
# Set system wide default region for boto3
echo "export AWS_DEFAULT_REGION=$REGION" > /etc/profile.d/aws.sh
# workaround for dhcpcd / openresolv to omit search domain if equal to domain breaking DNS resolution of shortnames for eg. etcd and kube-apiserver
add_once /etc/resolv.conf "search $DOMAIN_NAME"
case "$CLOUD" in
aws)
# Set system wide default region for boto3
echo "export AWS_DEFAULT_REGION=$REGION" > /etc/profile.d/aws.sh
setup_sns_alarms
;;
*)
@ -119,9 +42,9 @@ setup_instance() {
esac
}
################
################
# IAM SSH KEYS #
################
################
configure_sshd() {
# Change Listen port
local port=${SSHPORT:-"22"}
@ -139,9 +62,12 @@ configure_sshd() {
sed -i -e 's,^[\s#]*AuthorizedKeysCommand\s.*,AuthorizedKeysCommand /usr/sbin/get_iam_sshkeys.py --user %u --group '$group' --iamRole "'$role'",' /etc/ssh/sshd_config
sed -i -e 's,^[\s#]*AuthorizedKeysCommandUser\s.*,AuthorizedKeysCommandUser nobody,' /etc/ssh/sshd_config
ebegin "added $group to SSH admin keys"
einfo "added $group to SSH admin keys"
fi
;;
nocloud)
return 0
;;
*)
ewarn "Unsupported Cloud: $CLOUD"
# return 1
@ -223,38 +149,6 @@ set_hostname() {
fi
}
# various early volume functions
attach_ebs() {
local volId="$1"
local device="$2"
local tries=30
while true; do
_json="$(aws ec2 describe-volumes --volume-ids $volId --region $REGION --output json)"
rc=$?; [ $rc -ne 0 ] && return $rc
vol_status=$(echo "$_json" | jq -r .Volumes[].State)
attachedId=$(echo "$_json" | jq -r .Volumes[].Attachments[].InstanceId)
[ "$attachedId" = "$INSTANCE_ID" ] && break
if [ "$vol_status" = "available" ]; then
aws ec2 attach-volume --volume-id "$volId" --instance-id "$INSTANCE_ID" --region "$REGION" --device "$device" > /dev/null
rc=$?; [ $rc -ne 0 ] && return $rc
break
fi
# if attached but not to us -> detach
if [ "$vol_status" = "in-use" ]; then
aws ec2 detach-volume --volume-id "$volId" --region "$REGION" --force
rc=$?; [ $rc -ne 0 ] && return $rc
fi
((tries=tries-1))
[ $tries -eq 0 ] && return 1
sleep 5
done
}
_parse_volume() {
# Todo: proper checks once all is yaml
@ -294,7 +188,7 @@ mount_volumes() {
mkdir -p $volPath
mount -t xfs -o noatime $volDevice $volPath
ebegin "mounting $volDevice at $volPath"
einfo "mounting $volDevice at $volPath"
done
}
@ -311,17 +205,6 @@ unmount_volumes() {
# msg used for sns event, last one wins
msg() { MSG="$@"; log -t user-data info "$@"; }
# boolean flags
is_enabled() {
local flag=$(echo "$1" | tr '[:upper:]' '[:lower:]')
[ "$flag" == 1 -o "$flag" == "true" ] && return 0
[ "$flag" == 0 -o "$flag" == "false" -o -z "$flag" ] && return 1
log -t user-data warn "Unknown value for boolean option: $flag - assuming False"
return 1
}
# Generic retry command wrapper, incl. timeout of 30s
# $1 = number of tries; 0 = forever
# $2 = number of seconds to sleep between tries
@ -369,42 +252,29 @@ init_passphrase() {
{ xxd -l16 -p /dev/random > $_PPFILE; chmod 600 $_PPFILE; put_secret $_URL "$(cat $_PPFILE)"; }
}
asg_heartbeat() {
[ -n "$LAUNCH_HOOK" ] && aws autoscaling record-lifecycle-action-heartbeat --instance-id $INSTANCE_ID --lifecycle-hook-name $LAUNCH_HOOK --auto-scaling-group-name $AWS_AUTOSCALING_GROUPNAME || true
# upload various useful logs to s3 if configured
upload_debug_logs(){
[ -z $ZDT_CLOUDBENDER_DEBUG_REMOTELOGS ] && return 0
local s3Url="$ZDT_CLOUDBENDER_DEBUG_REMOTELOGS/$INSTANCE_ID/$(date +'%Y%m%d-%H%M%Z')"
local _tmp=$(mktemp -d)
ps -ef > ${_tmp}/process.list
cp /var/log/messages \
/var/log/rc.log \
/var/log/user-data.log \
/etc/cloudbender.conf \
/var/lib/cloud/meta-data \
/var/log/kubelet/kubelet.log \
/var/log/crio/crio.log \
$_tmp
tar cfz /tmp/debuglogs.tgz -C $_tmp .
aws s3 cp /tmp/debuglogs.tgz $s3Url/debuglogs.tgz
return 0
}
setup_sns_alarms() {
# store SNS message json template
cat <<EOF > /var/lib/cloud/sns_alarm.json
{
"Source": "CloudBender",
"AWSAccountId": "$AWS_ACCOUNT_ID",
"Region": "$REGION",
"Artifact": "$ARTIFACT",
"Asg": "$AWS_AUTOSCALING_GROUPNAME",
"Instance": "$INSTANCE_ID",
"ip": "$IP_ADDRESS"
}
EOF
cat <<'EOF' > /var/lib/cloud/sns_alarm.sh
#!/bin/bash
SUBJECT=$1
MSG=$2
LEVEL=${3:-Info}
ATTACHMENT=${4:-""}
EMOJI=${5:-""}
EOF
if [ -n "$ALARMSNSARN" ]; then
cat <<EOF >> /var/lib/cloud/sns_alarm.sh
jq -M --arg subject "\$SUBJECT" --arg level "\$LEVEL" --arg msg "\$MSG" --arg attachment "\$ATTACHMENT" --arg emoji "\$EMOJI" --arg hostname "\$HOSTNAME" '.Subject = \$subject | .Level = \$level | .Message = \$msg | .Attachment = \$attachment | .Emoji = \$emoji | .Hostname = \$hostname' < /var/lib/cloud/sns_alarm.json | sed -e 's/\\\\\\\\/\\\\/g' > /tmp/sns.json
aws sns publish --region ${REGION} --target-arn $ALARMSNSARN --message file:///tmp/sns.json
EOF
fi
chmod +x /var/lib/cloud/sns_alarm.sh
}
exit_trap() {
set +e
@ -434,7 +304,7 @@ exit_trap() {
MSG="$ERR_CMD"
fi
if [ -n "$DEBUG" ]; then
if [ -n "$ZDT_CLOUDBENDER_DEBUG" ]; then
SUBJECT="$SUBJECT Instance kept running for debug."
else
SUBJECT="$SUBJECT Instance terminated by ASG lifecycle hook."
@ -460,10 +330,12 @@ exit_trap() {
# timestamp being done
end_uptime=$(awk '{print $1}' < /proc/uptime)
log -t user-data info "Exiting user-data. Duration: $(echo "$end_uptime-$start_uptime" | bc) seconds"
log -t user-data info "Exiting user-data. $end_uptime seconds after boot. Duration: $(echo "$end_uptime-$start_uptime" | bc)"
# Shutdown / poweroff if we ran into error and not DEBUG
[ $ERR_CODE -ne 0 -a -z "$DEBUG" ] && poweroff
# if we ran into error, either upload debug files or poweroff
if [ $ERR_CODE -ne 0 ]; then
is_enabled $ZDT_CLOUDBENDER_DEBUG && upload_debug_logs || poweroff
fi
exit 0
}
@ -567,96 +439,28 @@ register_service_dns() {
route53.py --fqdn "${SERVICENAME}.${DNSZONE}" --record $_IP
# Register shutdown hook to remove DNS entry on terminate
cat <<EOF >> /etc/local.d/route53.stop
echo "Deleting Route53 record for ${SERVICENAME}.${DNSZONE}" >> /tmp/shutdown.log
route53.py --delete --fqdn "${SERVICENAME}.${DNSZONE}" --record ${PUBLIC_IP_ADDRESS:-$IP_ADDRESS}
EOF
chmod +x /etc/local.d/route53.stop
add_once /etc/local.d/cloudbender.stop "echo \"Deleting Route53 record for ${SERVICENAME}.${DNSZONE}\" >> /tmp/shutdown.log"
add_once /etc/local.d/cloudbender.stop "route53.py --delete --fqdn \"${SERVICENAME}.${DNSZONE}\" --record ${PUBLIC_IP_ADDRESS:-$IP_ADDRESS}"
# Short cut our public IP to private one to allow talking to our own service name
add_once /etc/hosts "${IP_ADDRESS} ${SERVICENAME}.${DNSZONE}"
log -t user-data info "Registered $_IP with ${SERVICENAME}.$DNSZONE"
fi
}
# associate EIP
# return 0 if we attached an EIP
# return 1 if we the public IP did NOT change or other error
associate_eip() {
local instance_id=$1
local eip=$(echo $2 | sed -e 's/\/32//' | grep -E -o "(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)") || true
local current_instance
setup_prometheus() {
rc-update add node-exporter default
rc-service node-exporter start
if [ -n "$eip" ]; then
if [ "$eip" != "0.0.0.0" ]; then
read eip_alloc_id eip_assoc_id current_instance < <(aws ec2 describe-addresses --public-ips $eip --query 'Addresses[*].[AllocationId,AssociationId,InstanceId]' || true)
# If we already own and have the EIP attached -> done
[ "$instance_id" == "$current_instance" ] && return
if [ ! -z "$eip_alloc_id" ]; then
if [[ "$eip_assoc_id" =~ ^eipassoc- ]]; then
log -t user-data info "EIP $eip already associated via Association ID ${eip_assoc_id}. Disassociating."
retry 3 10 aws ec2 disassociate-address --association-id $eip_assoc_id
fi
log -t user-data info "Associating Elastic IP $eip via Allocation ID $eip_alloc_id with Instance $instance_id"
aws ec2 associate-address --no-allow-reassociation --instance-id $instance_id --allocation-id $eip_alloc_id
return
else
log -t user-data warn "Elastic IP $eip address not found."
fi
else
log -t user-data info "0.0.0.0 requested, keeping AWS assigned IP."
fi
else
log -t user-data debug "Invalid or no ElasticIP defined. Skip"
fi
return 1
}
# Accept incoming traffic for everything
disable_source_dest_check() {
aws ec2 modify-instance-attribute --instance-id ${INSTANCE_ID} --source-dest-check "{\"Value\": false}"
}
# Register ourself at route tables
register_routes() {
local rtb_id_list="$1"
local route_cidr="$2"
for cidr in ${route_cidr//,/ }; do
if [ "$cidr" != "$VPC_CIDR_RANGE" ]; then
for rt in ${rtb_id_list//,/ }; do
if [[ "$rt" =~ ^rtb-[a-f0-9]*$ ]]; then
aws ec2 create-route --route-table-id $rt --destination-cidr-block "${cidr}" --instance-id ${INSTANCE_ID} || \
aws ec2 replace-route --route-table-id $rt --destination-cidr-block "${cidr}" --instance-id ${INSTANCE_ID}
else
log -t user-data warn "Invalid Route Table ID: $rt"
fi
done
fi
done
}
setup_nat() {
local mode=$1
# Masquerade all outgoing traffic
iptables -t nat -A POSTROUTING -o $DEFAULT_GW_INTERFACE -s ${VPC_CIDR_RANGE} -j MASQUERADE
log -t user-data info "Enabled and started Prometheus node-exporter"
}
setup_fluentbit() {
local key="cloudbender"
local host="$1"
local host="${1:-fluentd}"
if [[ "$host" =~ "@" ]]; then
key=${host%%@*}
@ -688,9 +492,14 @@ EOF
Send_options true
Require_ack_response true
EOF
LOG_FILES=$LOGGING_FILES
## TODO:
# Add parameter parsing for custom logfile tailing
rc-update add fluent-bit default
rc-service fluent-bit start
log -t user-data info "Enabled and started fluent-bit logging agent sending logs to $host"
}

View File

@ -1,5 +1,5 @@
print_info() {
echo -e "\n"
#echo -e "\n"
prin "$(color 1)Welcome to Alpine - ZeroDownTime edition"
echo

View File

@ -1,12 +1,12 @@
# syslog-ng, format all json into messages
# https://www.syslog-ng.com/technical-documents/doc/syslog-ng-open-source-edition/3.23/administration-guide/63#TOPIC-1268643
@version: 4.1
@version: 4.5
@include "scl.conf"
options { chain_hostnames(off); flush_lines(0); use_dns(no); use_fqdn(no);
dns_cache(no); owner("root"); group("adm"); perm(0640);
stats(freq(43200)); bad_hostname("^gconfd$"); frac-digits(6);
stats(freq(43200)); bad_hostname("^gconfd$"); frac-digits(9); keep-timestamp(no);
};
source s_sys { system(); internal();};
@ -15,8 +15,9 @@ destination d_mesg { file("/var/log/messages" template("$(format-json time=\"$UN
# filter ipvs loggging each SYN to closed port
# IPVS: rr: TCP 10.52.82.199:31021 - no destination available
filter f_drop_ipvs { not (facility(kern) and match("IPVS: rr:.*no destination available" value("MESSAGE"))); };
# filter f_drop_ipvs { not (facility(kern) and match("IPVS: rr:.*no destination available" value("MESSAGE"))); };
# "message":"net_ratelimit: 16 callbacks suppressed"
filter f_drop_ipvs_ratelimit { not (facility(kern) and match("net_ratelimit:.*callbacks suppressed" value("MESSAGE"))); };
# filter f_drop_ipvs_ratelimit { not (facility(kern) and match("net_ratelimit:.*callbacks suppressed" value("MESSAGE"))); };
# log { source(s_sys); filter(f_drop_ipvs); filter(f_drop_ipvs_ratelimit); destination(d_mesg); };
log { source(s_sys); filter(f_drop_ipvs); filter(f_drop_ipvs_ratelimit); destination(d_mesg); };
log { source(s_sys); destination(d_mesg); };

View File

@ -0,0 +1,63 @@
#!/usr/bin/python3
import os
import boto3
import argparse
parser = argparse.ArgumentParser(
description="Get SSM parameters beyond <path> and write to files")
parser.add_argument(
"--path",
dest="path",
action="store",
required=True,
help="SSM parameter path")
parser.add_argument(
"--root",
dest="root",
action="store",
required=True,
help="root filesystem path to create files")
args = parser.parse_args()
session = boto3.Session()
awsSSMClient = session.client('ssm')
def get_parameters_by_path(nextToken=None):
params = {
'Path': args.path,
'Recursive': True,
'WithDecryption': True
}
if nextToken is not None:
params['NextToken'] = nextToken
return awsSSMClient.get_parameters_by_path(**params)
def getParameters():
nextToken = None
while True:
response = get_parameters_by_path(nextToken)
parameters = response['Parameters']
if len(parameters) == 0:
break
for parameter in parameters:
yield parameter
if 'NextToken' not in response:
break
nextToken = response['NextToken']
for parameter in getParameters():
file_name = os.path.join(
args.root, parameter["Name"].removeprefix(
args.path).lstrip("/"))
os.makedirs(os.path.dirname(file_name), mode=0o755, exist_ok=True)
#print(f'{file_name}={parameter["Value"]}')
with open(file_name, "w") as file:
file.write(parameter["Value"])

View File

@ -1,5 +1,7 @@
#!/bin/sh
. /lib/tiny-cloud/common
# Enable SSH keepalive
sed -i -e 's/^[\s#]*TCPKeepAlive\s.*/TCPKeepAlive yes/' -e 's/^[\s#]*ClientAliveInterval\s.*/ClientAliveInterval 60/' /etc/ssh/sshd_config
echo 'enabled SSH keep alives'
@ -9,6 +11,10 @@ sed -i -e 's/^[\s#]*rc_cgroup_mode=.*/rc_cgroup_mode="unified"/' /etc/rc.conf
sed -i -e 's/^[\s#]*rc_logger=.*/rc_logger="YES"/' /etc/rc.conf
echo 'enabled cgroupv2, openRC logging'
# speed up dhcpcd and chronyd
add_once /etc/dhcpcd.conf noarp >/dev/null
sed -i -e 's/^[\s#]*FAST_STARTUP=.*/FAST_STARTUP=yes/' /etc/conf.d/chronyd
# OpenRC parallel - causes too much chaos
#sed -i -e 's/^[\s#]*rc_parallel=.*/rc_parallel="YES"/' /etc/rc.conf
#echo 'enable parallel openRC'
@ -25,7 +31,7 @@ cp /lib/zdt/syslog-ng.apparmor /etc/apparmor.d/local/sbin.syslog-ng
echo 'syslog-ng: all to /var/log/messages as json, rotate hourly'
# use init to spawn monit
echo ":2345:respawn:/usr/bin/monit -Ic /etc/monitrc.zdt" >> /etc/inittab
add_once /etc/inittab ":2345:respawn:/usr/bin/monit -Ic /etc/monitrc.zdt" >/dev/null
echo 'Enable monit via inittab'
# QoL - color prompt even for doas bash

View File

@ -13,3 +13,4 @@ net.ipv4.ip_forward_use_pmtu = 0
kernel.panic = 10
kernel.panic_on_oops = 1
vm.oom_dump_tasks = 0
vm.max_map_count=262144