Compare commits

...

22 Commits

Author SHA1 Message Date
Stefan Reimer 239143c856 feature: final touches to make latest Nvidia GPU support work on V1.28 2024-04-19 13:39:25 +00:00
Stefan Reimer e4518617a0 feat: add libexecinfo to support Lambda containers based on Alpine 3.19 2024-04-08 12:19:01 +00:00
Stefan Reimer 363ba90c3c feat: KubeZero v1.28 2024-04-03 14:27:05 +00:00
Stefan Reimer f289ad4d07 feat: Nocloud metal and vm configs, Alpine 3.19 os WIP 2024-03-20 11:04:12 +00:00
Stefan Reimer bfba223c17 feat: First steps of V1.28 based on Alpine 3.19 2024-03-12 15:18:42 +00:00
Stefan Reimer a2acb94732 Add remaining falco pieces 2023-11-08 13:00:26 +00:00
Stefan Reimer f5c51cd71c falco version bump 2023-11-07 16:31:20 +00:00
Stefan Reimer 62a146f1a2 Minor falco tweaks before the version upgrade 2023-11-06 14:25:51 +00:00
Stefan Reimer 9cf3c9d95a Merge latest ci-tools-lib 2023-10-05 16:49:28 +00:00
Stefan Reimer 3ee4cb1d85 Squashed '.ci/' changes from c1a48a6..7144a42
7144a42 Improve Trivy scanning logic

git-subtree-dir: .ci
git-subtree-split: 7144a42a3c436996722f1e67c3cce5c87fdbf464
2023-10-05 16:49:28 +00:00
Stefan Reimer b0ca301ee5 RC - Point release v3.18.4 2023-10-05 16:46:40 +00:00
Stefan Reimer 070b8d0233 Squashed '.ci/' changes from 318c19e..c1a48a6
c1a48a6 Remove auto stash push / pop as being too dangerous

git-subtree-dir: .ci
git-subtree-split: c1a48a6aede4a08ad1e230121bf8b085ce9ef9e6
2023-09-26 18:06:16 +00:00
Stefan Reimer d4c543a330 Merge latest ci-tools-lib 2023-09-26 18:06:16 +00:00
Stefan Reimer 80550a76c6 Squashed '.ci/' changes from 79eebe4..318c19e
318c19e Add merge comment for subtree
22ed100 Fix custom branch docker tags
227e39f Allow custom GIT_TAG
38a9cda Debug CI pipeline
3efcc81 Debug CI pipeline
5023473 Make branch detection work for tagged commits
cdc32e0 Improve cleanup flow
8df60af Fix derp
748a4bd Migrate to :: to allow custom make steps, add generic stubs
955afa7 Apply pep8
5819ded Improve ECR public lifecycle handling via python script
5d4e4ad Make rm-remote-untagged less noisy
f00e541 Add cleanup step to remove untagged images by default
0821e91 Ensure tag names are valid for remote branches like PRs

git-subtree-dir: .ci
git-subtree-split: 318c19e5d7ac0fd5675a13652650faa8aced67f9
2023-08-18 15:19:17 +00:00
Stefan Reimer 78826b6604 Merge commit '80550a76c6d6d875d4a0915e9bc48f73b3565dcd' 2023-08-18 15:19:17 +00:00
Stefan Reimer 961d6621da Kubezero 1.26.7-rc1 2023-08-18 15:17:00 +00:00
Stefan Reimer 0402adbf2e New ZeroDownTime CDN infra 2023-07-31 18:19:59 +00:00
Stefan Reimer 59a6f7521f feat: build falco dynamically, dedicated falco-kernel package for quick kernel updates 2023-07-31 18:19:31 +00:00
Stefan Reimer c850734f9a Feat: KubeZero v1.26.6 2023-07-04 15:10:30 +00:00
Stefan Reimer 4c8c495e77 Squashed '.ci/' changes from aea1ccc..79eebe4
79eebe4 add ARCH support for tests

git-subtree-dir: .ci
git-subtree-split: 79eebe4d3de843d921994db20e20dda801272934
2023-06-21 14:52:11 +00:00
Stefan Reimer 582fbd5da3 Merge commit '4c8c495e771962e0940f19195f3b14e9748e0954' 2023-06-21 14:52:11 +00:00
Stefan Reimer b3e8338df9 Add kubezero-imagecache 2023-05-06 18:07:41 +00:00
88 changed files with 6933 additions and 773 deletions

63
.ci/ecr_public_lifecycle.py Executable file
View File

@ -0,0 +1,63 @@
#!/usr/bin/env python3
import argparse
import boto3
parser = argparse.ArgumentParser(
description='Implement basic public ECR lifecycle policy')
parser.add_argument('--repo', dest='repositoryName', action='store', required=True,
help='Name of the public ECR repository')
parser.add_argument('--keep', dest='keep', action='store', default=10, type=int,
help='number of tagged images to keep, default 10')
parser.add_argument('--dev', dest='delete_dev', action='store_true',
help='also delete in-development images only having tags like v0.1.1-commitNr-githash')
args = parser.parse_args()
client = boto3.client('ecr-public', region_name='us-east-1')
images = client.describe_images(repositoryName=args.repositoryName)[
"imageDetails"]
untagged = []
kept = 0
# actual Image
# imageManifestMediaType: 'application/vnd.oci.image.manifest.v1+json'
# image Index
# imageManifestMediaType: 'application/vnd.oci.image.index.v1+json'
# Sort by date uploaded
for image in sorted(images, key=lambda d: d['imagePushedAt'], reverse=True):
# Remove all untagged
# if registry uses image index all actual images will be untagged anyways
if 'imageTags' not in image:
untagged.append({"imageDigest": image['imageDigest']})
# print("Delete untagged image {}".format(image["imageDigest"]))
continue
# check for dev tags
if args.delete_dev:
_delete = True
for tag in image["imageTags"]:
# Look for at least one tag NOT beign a SemVer dev tag
if "-" not in tag:
_delete = False
if _delete:
print("Deleting development image {}".format(image["imageTags"]))
untagged.append({"imageDigest": image['imageDigest']})
continue
if kept < args.keep:
kept = kept+1
print("Keeping tagged image {}".format(image["imageTags"]))
continue
else:
print("Deleting tagged image {}".format(image["imageTags"]))
untagged.append({"imageDigest": image['imageDigest']})
deleted_images = client.batch_delete_image(
repositoryName=args.repositoryName, imageIds=untagged)
if deleted_images["imageIds"]:
print("Deleted images: {}".format(deleted_images["imageIds"]))

View File

@ -1,25 +1,26 @@
# Parse version from latest git semver tag
GIT_TAG ?= $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
GIT_TAG := $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
TAG := $(GIT_TAG)
TAG ::= $(GIT_TAG)
# append branch name to tag if NOT main nor master
ifeq (,$(filter main master, $(GIT_BRANCH)))
# If branch is substring of tag, omit branch name
ifeq ($(findstring $(GIT_BRANCH), $(GIT_TAG)),)
# only append branch name if not equal tag
ifneq ($(GIT_TAG), $(GIT_BRANCH))
TAG = $(GIT_TAG)-$(GIT_BRANCH)
# Sanitize GIT_BRANCH to allowed Docker tag character set
TAG = $(GIT_TAG)-$(shell echo $$GIT_BRANCH | sed -e 's/[^a-zA-Z0-9]/-/g')
endif
endif
endif
ARCH := amd64
ALL_ARCHS := amd64 arm64
ARCH ::= amd64
ALL_ARCHS ::= amd64 arm64
_ARCH = $(or $(filter $(ARCH),$(ALL_ARCHS)),$(error $$ARCH [$(ARCH)] must be exactly one of "$(ALL_ARCHS)"))
ifneq ($(TRIVY_REMOTE),)
TRIVY_OPTS := --server $(TRIVY_REMOTE)
TRIVY_OPTS ::= --server $(TRIVY_REMOTE)
endif
.SILENT: ; # no need for @
@ -32,18 +33,20 @@ endif
help: ## Show Help
grep -E '^[a-zA-Z_-]+:.*?## .*$$' .ci/podman.mk | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
prepare:: ## custom step on the build agent before building
fmt:: ## auto format source
lint:: ## Lint source
build: ## Build the app
buildah build --rm --layers -t $(IMAGE):$(TAG)-$(_ARCH) --build-arg TAG=$(TAG) --build-arg ARCH=$(_ARCH) --platform linux/$(_ARCH) .
test: rm-test-image ## Execute Dockerfile.test
test -f Dockerfile.test && \
{ buildah build --rm --layers -t $(REGISTRY)/$(IMAGE):$(TAG)-test --from=$(REGISTRY)/$(IMAGE):$(TAG) -f Dockerfile.test --platform linux/$(_ARCH) . && \
podman run --rm --env-host -t $(REGISTRY)/$(IMAGE):$(TAG)-$(_ARCH)-test; } || \
echo "No Dockerfile.test found, skipping test"
test:: ## test built artificats
scan: ## Scan image using trivy
echo "Scanning $(IMAGE):$(TAG)-$(_ARCH) using Trivy $(TRIVY_REMOTE)"
trivy image $(TRIVY_OPTS) localhost/$(IMAGE):$(TAG)-$(_ARCH)
trivy image $(TRIVY_OPTS) --quiet --no-progress localhost/$(IMAGE):$(TAG)-$(_ARCH)
# first tag and push all actual images
# create new manifest for each tag and add all available TAG-ARCH before pushing
@ -63,24 +66,19 @@ push: ecr-login ## push images to registry
ecr-login: ## log into AWS ECR public
aws ecr-public get-login-password --region $(REGION) | podman login --username AWS --password-stdin $(REGISTRY)
clean: rm-test-image rm-image ## delete local built container and test images
rm-remote-untagged: ## delete all remote untagged and in-dev images, keep 10 tagged
echo "Removing all untagged and in-dev images from $(IMAGE) in $(REGION)"
.ci/ecr_public_lifecycle.py --repo $(IMAGE) --dev
rm-remote-untagged: ## delete all remote untagged images
echo "Removing all untagged images from $(IMAGE) in $(REGION)"
IMAGE_IDS=$$(for image in $$(aws ecr-public describe-images --repository-name $(IMAGE) --region $(REGION) --output json | jq -r '.imageDetails[] | select(.imageTags | not ).imageDigest'); do echo -n "imageDigest=$$image "; done) ; \
[ -n "$$IMAGE_IDS" ] && aws ecr-public batch-delete-image --repository-name $(IMAGE) --region $(REGION) --image-ids $$IMAGE_IDS || echo "No image to remove"
clean:: ## clean up source folder
rm-image:
test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH))" || podman image rm -f $(IMAGE):$(TAG)-$(_ARCH) > /dev/null
test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH))" || echo "Error: Removing image failed"
# Ensure we run the tests by removing any previous runs
rm-test-image:
test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH)-test)" || podman image rm -f $(IMAGE):$(TAG)-$(_ARCH)-test > /dev/null
test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH)-test)" || echo "Error: Removing test image failed"
## some useful tasks during development
ci-pull-upstream: ## pull latest shared .ci subtree
git stash && git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash && git stash pop
git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash -m "Merge latest ci-tools-lib"
create-repo: ## create new AWS ECR public repository
aws ecr-public create-repository --repository-name $(IMAGE) --region $(REGION)

View File

@ -10,18 +10,22 @@ def call(Map config=[:]) {
stages {
stage('Prepare') {
steps {
sh 'mkdir -p reports'
// we set pull tags as project adv. options
// pull tags
withCredentials([gitUsernamePassword(credentialsId: 'gitea-jenkins-user')]) {
sh 'git fetch -q --tags ${GIT_URL}'
}
sh 'make prepare || true'
//withCredentials([gitUsernamePassword(credentialsId: 'gitea-jenkins-user')]) {
// sh 'git fetch -q --tags ${GIT_URL}'
//}
// Optional project specific preparations
sh 'make prepare'
}
}
// Build using rootless podman
stage('Build') {
steps {
sh 'make build'
sh 'make build GIT_BRANCH=$GIT_BRANCH'
}
}
@ -33,12 +37,13 @@ def call(Map config=[:]) {
// Scan via trivy
stage('Scan') {
environment {
TRIVY_FORMAT = "template"
TRIVY_OUTPUT = "reports/trivy.html"
}
steps {
sh 'mkdir -p reports && make scan'
// we always scan and create the full json report
sh 'TRIVY_FORMAT=json TRIVY_OUTPUT="reports/trivy.json" make scan'
// render custom full html report
sh 'trivy convert -f template -t @/home/jenkins/html.tpl -o reports/trivy.html reports/trivy.json'
publishHTML target: [
allowMissing: true,
alwaysLinkToLastBuild: true,
@ -48,26 +53,33 @@ def call(Map config=[:]) {
reportName: 'TrivyScan',
reportTitles: 'TrivyScan'
]
sh 'echo "Trivy report at: $BUILD_URL/TrivyScan"'
// Scan again and fail on CRITICAL vulns, if not overridden
// fail build if issues found above trivy threshold
script {
if (config.trivyFail == 'NONE') {
echo 'trivyFail == NONE, review Trivy report manually. Proceeding ...'
} else {
sh "TRIVY_EXIT_CODE=1 TRIVY_SEVERITY=${config.trivyFail} make scan"
if ( config.trivyFail ) {
sh "TRIVY_SEVERITY=${config.trivyFail} trivy convert --report summary --exit-code 1 reports/trivy.json"
}
}
}
}
// Push to container registry, skip if PR
// Push to container registry if not PR
// incl. basic registry retention removing any untagged images
stage('Push') {
when { not { changeRequest() } }
steps {
sh 'make push'
sh 'make rm-remote-untagged'
}
}
// generic clean
stage('cleanup') {
steps {
sh 'make clean'
}
}
}
}
}

View File

@ -1,12 +1,11 @@
FROM alpine:3.17
ARG ALPINE="v3.17"
FROM alpine:3.19
ARG ALPINE="v3.19"
ARG BUILDUSER=alpine
RUN echo "http://dl-cdn.alpinelinux.org/alpine/${ALPINE}/main" > /etc/apk/repositories && \
echo "http://dl-cdn.alpinelinux.org/alpine/${ALPINE}/community" >> /etc/apk/repositories && \
echo "@edge-main http://dl-cdn.alpinelinux.org/alpine/edge/main" >> /etc/apk/repositories && \
echo "@edge-community http://dl-cdn.alpinelinux.org/alpine/edge/community" >> /etc/apk/repositories && \
echo "@kubezero https://cdn.zero-downtime.net/alpine/${ALPINE}/kubezero" >> /etc/apk/repositories && \
wget -q -O /etc/apk/keys/stefan@zero-downtime.net-61bb6bfb.rsa.pub https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub
echo "@edge-community http://dl-cdn.alpinelinux.org/alpine/edge/community" >> /etc/apk/repositories
RUN apk -U --no-cache upgrade && \
apk --no-cache add \
@ -17,15 +16,19 @@ RUN apk -U --no-cache upgrade && \
xz
# gcc-gnat \
RUN adduser -D alpine && \
addgroup alpine abuild && \
RUN adduser -D $BUILDUSER && \
addgroup $BUILDUSER abuild && \
echo "permit nopass :abuild" > /etc/doas.d/doas.conf && \
install -d -g abuild -m 775 /var/cache/distfiles && \
install -d -g abuild -m 775 /packages
install -d -g abuild -m 775 /packages && \
echo -e "$BUILDUSER:1001:64535" > /etc/subuid && \
echo -e "$BUILDUSER:1001:64535" > /etc/subgid && \
echo "@kubezero https://cdn.zero-downtime.net/alpine/${ALPINE}/kubezero" >> /etc/apk/repositories && \
wget -q -O /etc/apk/keys/stefan@zero-downtime.net-61bb6bfb.rsa.pub https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub
COPY abuilder aarch64-toolchain.sh /usr/bin/
WORKDIR /home/alpine
USER alpine
WORKDIR /home/$BUILDUSER
USER $BUILDUSER
ENTRYPOINT ["abuilder"]

View File

@ -4,9 +4,9 @@ REGION := us-east-1
include .ci/podman.mk
BUILDER := v3.17.3
BUILDER := v3.19.1
PKG := '*'
CF_DIST := E1YFUJXMCXT2RN
CF_DIST := E11OFTOA3L8IVY
BUILDER_RELEASE = $(shell echo $(BUILDER) | sed -e 's/-.*$$//')
RELEASE := $(shell echo $(BUILDER_RELEASE) | sed -e 's/\.[0-9]$$//')
@ -46,14 +46,14 @@ apk: packages distfiles
$(REGISTRY)/$(IMAGE):$(BUILDER) $(PKG)
download:
aws s3 sync s3://zero-downtime-web/cdn/alpine/$(RELEASE)/kubezero/x86_64/ packages/kubezero/x86_64/ --exclude APKINDEX.tar.gz
aws s3 sync s3://zero-downtime-web/cdn/alpine/$(RELEASE)/kubezero/aarch64/ packages/kubezero/aarch64/ --exclude APKINDEX.tar.gz
aws s3 sync s3://zero-downtime-web-cdn/alpine/$(RELEASE)/kubezero/x86_64/ packages/kubezero/x86_64/ --exclude APKINDEX.tar.gz
#aws s3 sync s3://zero-downtime-web-cdn/alpine/$(RELEASE)/kubezero/aarch64/ packages/kubezero/aarch64/ --exclude APKINDEX.tar.gz
invalidate_cdn:
aws cloudfront create-invalidation --distribution $(CF_DIST) --paths "/alpine/*"
upload: invalidate_cdn
aws s3 sync --delete packages/kubezero/x86_64/ s3://zero-downtime-web/cdn/alpine/$(RELEASE)/kubezero/x86_64/ --exclude APKINDEX.tar.gz
aws s3 sync --delete packages/kubezero/aarch64/ s3://zero-downtime-web/cdn/alpine/$(RELEASE)/kubezero/aarch64/ --exclude APKINDEX.tar.gz
aws s3 cp packages/kubezero/x86_64/APKINDEX.tar.gz s3://zero-downtime-web/cdn/alpine/$(RELEASE)/kubezero/x86_64/ --cache-control max-age=1
aws s3 cp packages/kubezero/aarch64/APKINDEX.tar.gz s3://zero-downtime-web/cdn/alpine/$(RELEASE)/kubezero/aarch64/ --cache-control max-age=1
aws s3 sync --delete packages/kubezero/x86_64/ s3://zero-downtime-web-cdn/alpine/$(RELEASE)/kubezero/x86_64/ --exclude APKINDEX.tar.gz
#aws s3 sync --delete packages/kubezero/aarch64/ s3://zero-downtime-web-cdn/alpine/$(RELEASE)/kubezero/aarch64/ --exclude APKINDEX.tar.gz
aws s3 cp packages/kubezero/x86_64/APKINDEX.tar.gz s3://zero-downtime-web-cdn/alpine/$(RELEASE)/kubezero/x86_64/ --cache-control max-age=1
#aws s3 cp packages/kubezero/aarch64/APKINDEX.tar.gz s3://zero-downtime-web-cdn/alpine/$(RELEASE)/kubezero/aarch64/ --cache-control max-age=1

View File

@ -11,7 +11,7 @@ if [ "$1" = 'aarch64-toolchain' ]; then
else
# Set ENV for cross compile for aarch64
if [ "$2" = "cross-arm64" ]; then
ALPINE="v3.16"
ALPINE="v3.19"
TARGET_ARCH=aarch64
SUDO_APK=abuild-apk
APORTS=/home/alpine/aports
@ -38,7 +38,7 @@ else
# If checksum is OK, build package
APKBUILD=$pkg abuild verify && rc=$? || rc=$?
if [ $rc -eq 0 ]; then
CHOST=$TARGET_ARCH APKBUILD=$pkg abuild -r
APKBUILD=$pkg abuild -r
else
APKBUILD=$pkg abuild checksum

View File

@ -1,7 +1,7 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=aws-iam-authenticator
pkgver=0.6.2
pkgver=0.6.14
pkgrel=0
pkgdesc="AWS aws-iam-authenticator"
url="https://github.com/kubernetes-sigs/aws-iam-authenticator"
@ -20,5 +20,5 @@ package() {
}
sha512sums="
4789fe7c11d4d1b94da5f35844a0da8e62da743bef3fc13f668c542f3dbc83584ef29abbcebc6f4651aad8ecbd9195d6bfc13476c7dd4a1d34ed11822652fc5e aws-iam-authenticator-0.6.2.tar.gz
26a6b394fbe767910f605a356032338a4ec254b81cd470796e3137e3595fef338bd213dee8d956c8d23e16f5508741e78664cd0f8b1acd97321d2fb5b7b723af aws-iam-authenticator-0.6.14.tar.gz
"

View File

@ -1,7 +1,11 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
# https://awsdocs-neuron.readthedocs-hosted.com/en/latest/release-notes/runtime/aws-neuronx-dkms/index.html#neuron-driver-release-notes
#
# Todo: needs fix of https://github.com/aws-neuron/aws-neuron-sdk/issues/843
#
pkgname=aws-neuron-driver
pkgver=2.8.4.0
pkgver=2.16.7.0
pkgrel=0
pkgdesc="Linux Kernel module for AWS Neuron INF instances"
url="https://awsdocs-neuron.readthedocs-hosted.com/en/latest/release-notes/index.html#"
@ -10,8 +14,6 @@ license="GPL-2.0"
makedepends="bash xz linux-headers linux-virt-dev"
options="!check"
# https://awsdocs-neuron.readthedocs-hosted.com/en/latest/release-notes/neuron-driver.html#neuron-driver-release-notes
# apt-get download --print-uris aws-neuron-dkms | cut -d' ' -f1
source="$pkgname-$pkgver.deb::https://apt.repos.neuron.amazonaws.com/pool/main/a/aws-neuronx-dkms/aws-neuronx-dkms_"$pkgver"_amd64.deb"
unpack() {
@ -28,6 +30,9 @@ build() {
# Hack running the build inside a container other uname -r returns host kernel
KERNEL_VERSION=$(basename $(ls -d /lib/modules/*-virt))
unset CFLAGS CPPFLAGS CXXFLAGS
unset LDFLAGS
make KERNEL_SRC_DIR=/lib/modules/$KERNEL_VERSION/build
}
@ -42,5 +47,5 @@ package() {
}
sha512sums="
1fa536cf32fb9a0d383e73c6694ddbdee38a775a25a7d0013322c4e4b4c724d546082f88ac1c8e485e808312d7821453a9d27391e98f613431ccff0081a76483 aws-neuron-driver-2.8.4.0.deb
968eb60bcd6826fa7dba827c29eda0033c626e016c7a57389a821f64c966d076a85da885f772e937098885853fe50765013a1368aab8b0ca85d732c34e60a26e aws-neuron-driver-2.16.7.0.deb
"

View File

@ -3,14 +3,14 @@
# Contributor: TBK <alpine@jjtc.eu>
# Maintainer: ungleich <foss@ungleich.ch>
pkgname=cri-o
pkgver=1.25.3
pkgver=1.28.4
pkgrel=0
pkgdesc="OCI-based implementation of Kubernetes Container Runtime Interface"
url="https://github.com/cri-o/cri-o/"
arch="all"
license="Apache-2.0"
# Most tests will fail if not ran as root
# since it tries to create network interfaces
# Most tests will fail if not ran as root
# since it tries to create network interfaces
options="net chmod-clean !check"
depends="
cni-plugins
@ -19,7 +19,7 @@ depends="
containers-common
iproute2
iptables
runc
oci-runtime
"
makedepends="
bash
@ -33,35 +33,46 @@ makedepends="
libselinux-dev
lvm2-dev
ostree-dev
sqlite-dev
tzdata
"
checkdepends="bats cri-tools jq parallel sudo"
checkdepends="bats cri-tools jq parallel sudo conmon"
subpackages="
$pkgname-doc
$pkgname-bash-completion
$pkgname-zsh-completion
$pkgname-fish-completion
$pkgname-contrib-cni:contrib_cni:noarch
$pkgname-openrc
"
source="https://github.com/cri-o/cri-o/archive/v$pkgver/cri-o-$pkgver.tar.gz
#$pkgname-fish-completion
source="
$pkgname-$pkgver.tar.gz::https://github.com/cri-o/cri-o/archive/v$pkgver/cri-o-$pkgver.tar.gz
crio.conf
crio.initd
crio.logrotated
cni-plugins-path.patch
makefile-fix-install.patch
fix-test.patch
remove-systemd-files.patch
crictl.yaml
"
# secfixes:
# 1.23.2-r0:
# - CVE-2022-0811
# 1.24.1-r0:
# - CVE-2022-1708
# 1.26.2-r0:
# - CVE-2022-4318
export GOCACHE="${GOCACHE:-"$srcdir/go-cache"}"
export GOTMPDIR="${GOTMPDIR:-"$srcdir"}"
export GOMODCACHE="${GOMODCACHE:-"$srcdir/go"}"
export GOBIN="$GOPATH/bin"
build() {
export GOPATH="$srcdir"
export GOBIN="$GOPATH/bin"
# https://github.com/cri-o/cri-o/blob/master/install.md#build-tags
make BUILDTAGS="seccomp selinux containers_image_openpgp containers_image_ostree_stub apparmor"
make BUILDTAGS="libsqlite3 seccomp selinux apparmor containers_image_openpgp containers_image_ostree_stub"
}
check() {
@ -69,29 +80,36 @@ check() {
}
package() {
make DESTDIR="$pkgdir" PREFIX=/usr CRICTL_CONFIG_DIR="/etc/crio" OCIUMOUNTINSTALLDIR="/etc/crio" install
make \
DESTDIR="$pkgdir" \
PREFIX=/usr \
CRICTL_CONFIG_DIR="/etc/crio" \
OCIUMOUNTINSTALLDIR="/etc/crio" \
FISHINSTALLDIR=/usr/share/fish/vendor_completions.d \
install.bin-nobuild install.man-nobuild install.completions install.config-nobuild
# We want it in etc so apk does not overwrite it
mkdir -p "$pkgdir"/usr/share/oci-umount/oci-umount.d/
ln -sf /etc/crio/crio-umount.conf "$pkgdir"/usr/share/oci-umount/oci-umount.d/crio-umount.conf
ln -sf ../../../../etc/crio/crio-umount.conf "$pkgdir"/usr/share/oci-umount/oci-umount.d/crio-umount.conf
# The CNI plugins are recommended to be installed as examples
install -Dm644 contrib/cni/*.conflist -t "$pkgdir"/usr/share/doc/cri-o/examples/cni/
install -Dm755 "$srcdir"/crio.initd "$pkgdir"/etc/init.d/crio
install -Dm644 "$srcdir"/crio.conf "$pkgdir"/etc/crio/crio.conf
install -Dm644 "$srcdir"/crio.logrotated "$pkgdir"/etc/logrotate.d/crio
}
contrib_cni() {
pkgdesc="$pkgname contrib cni config files"
mkdir -p "$subpkgdir"/etc/cni/net.d
cp "$builddir"/contrib/cni/*.conf "$subpkgdir"/etc/cni/net.d
install -Dm644 "$srcdir"/crictl.yaml "$pkgdir"/etc/crictl.yaml
}
sha512sums="
39b162c55141af009879f600c4b6cf91b6d710392bf07783080efe195f3ece1a0ed186eeadaf3a84bbed11a376995c3fab3c951a6d7ed14bb7e85b39e7920e21 cri-o-1.25.3.tar.gz
e026f056ed92489413e16ed7955a9dcd7d1f4df1cc28e3ea785771b44d43811fea4f5b953cc46bc0c4aeac8ad07115bfff304d7516ebd24f2e58fe782ff812c8 crio.conf
29561e95398975748236217bbd9df64997f6e3de6c0555d007306bd0535895a648368385a13079eb7d52c06249a91980523a73b6563e86d0575d9cd9c3fa4ee9 crio.initd
8d27211a4baad86d5251faa396a23d78d2962de894124be851172d6e85fbf3c0da57ec08f70840c7d8526dc6daa93999485a8d92a1d2c33b374eff84b1e063ae cri-o-1.28.4.tar.gz
1f60719677295c9c5c615eb25d9159bde0af68a132eee67747f57fe76642d457c98c896c6189f85637d7b4ac24ba55fd9eaeb1699f43c3c5077b645f72a479fb crio.conf
e9149cc2ddd24328c5290d3aea895c01e2798e066897535384f615a556496acdd52a603a0f4ac3c4c70bd5c363592f23c8b4d1987bf738300112fc62e1def555 crio.initd
1115228546a696eeebeb6d4b3e5c3152af0c99a2559097fc5829d8b416d979c457b4b1789e0120054babf57f585d3f63cbe49949d40417ae7aab613184bf4516 crio.logrotated
0a567dfa431ab1e53f2a351689be8d588a60cc5fcdbda403ec4f8b6ab9b1c18ad425f6c47f9a5ab1491e3a61a269dc4efa6a59e91e7521fa2b6bb165074aa8e0 cni-plugins-path.patch
f9577aa7b1c90c6809010e9e406e65092251b6e82f6a0adbc3633290aa35f2a21895e1a8b6ba4b6375dcad3e02629b49a34ab16387e1c36eeb32c8f4dac74706 makefile-fix-install.patch
78c150f87027de489289596371dce0465159ced0758776b445deb58990e099de9c654406183c9da3cc909878b24d28db62121b7056cd180a6f2820e79e165cc6 remove-systemd-files.patch
b0fdaf2280968a69e05ef72288bbf6fc03787616c6b6fca1e4398f9849167f4773e5e6e72bf1738d1fff2a84e97aa00f23aabcd50898ba8ed130969f50363006 fix-test.patch
ae7e4a43f18076f19f3ae37d7302bfdf7a3befadf33e46bc9b1b14d50b605e8ba0d06d479568c24e8bf68f17c80ae48798068b2a46c3bcab565a5d225779f30e remove-systemd-files.patch
79e1a7c6183ba56f55d923e9d738be945564494042bc011d31e9195f66c268d702ee5c86711d4b46618285fc1b10b59ea55c321390feca770cfc7de334e103bd crictl.yaml
"

View File

@ -0,0 +1,5 @@
# runtime entpoint, see https://github.com/kubernetes/enhancements/issues/2371 might be fixed with 1.29 ?
runtime-endpoint: unix:///run/crio/crio.sock
image-endpoint: unix:///var/run/crio/crio.sock
timeout: 60
debug: false

View File

@ -3,3 +3,9 @@
# Overide defaults to not use systemd cgroups.
conmon_cgroup = "pod"
cgroup_manager = "cgroupfs"
default_runtime = "crun"
[crio.runtime.runtimes.crun]
runtime_type = "oci"
runtime_root = "/run/crun"

View File

@ -8,12 +8,15 @@ extra_started_commands="reload"
command="/usr/bin/${RC_SVCNAME}"
command_args="${command_args}"
command_background="true"
start_stop_daemon_args=" \
# We run all containers with nice level 1
start_stop_daemon_args="-N 1 \
--stdout /var/log/${RC_SVCNAME}/${RC_SVCNAME}.log \
--stderr /var/log/${RC_SVCNAME}/${RC_SVCNAME}.log"
depend() {
need net
use dns
}
checkconfig() {
@ -21,6 +24,8 @@ checkconfig() {
/var/log/${RC_SVCNAME}
checkpath --file --owner root:root --mode 0644 \
/var/log/${RC_SVCNAME}/${RC_SVCNAME}.log
checkpath --directory --owner root:root --mode 0750 \
/var/lib/crio
}
start() {

View File

@ -1,26 +1,45 @@
--- a/test/helpers.bash
+++ b/test/helpers.bash
@@ -68,7 +68,7 @@
# The default log directory where all logs will go unless directly specified by the kubelet
DEFAULT_LOG_PATH=${DEFAULT_LOG_PATH:-/var/log/crio/pods}
# Cgroup manager to be used
-CONTAINER_CGROUP_MANAGER=${CONTAINER_CGROUP_MANAGER:-systemd}
+CONTAINER_CGROUP_MANAGER=${CONTAINER_CGROUP_MANAGER:-cgroupfs}
# Image volumes handling
CONTAINER_IMAGE_VOLUMES=${CONTAINER_IMAGE_VOLUMES:-mkdir}
# Container pids limit
@@ -166,7 +166,7 @@
# Copy all the CNI dependencies around to ensure encapsulated tests
CRIO_CNI_PLUGIN="$TESTDIR/cni-bin"
mkdir "$CRIO_CNI_PLUGIN"
- cp /opt/cni/bin/* "$CRIO_CNI_PLUGIN"
+ cp /usr/libexec/cni/* "$CRIO_CNI_PLUGIN"
cp "$INTEGRATION_ROOT"/cni_plugin_helper.bash "$CRIO_CNI_PLUGIN"
sed -i "s;%TEST_DIR%;$TESTDIR;" "$CRIO_CNI_PLUGIN"/cni_plugin_helper.bash
diff --git a/test/cgroups.bats b/test/cgroups.bats
index 8beb6f06..80193413 100644
--- a/test/cgroups.bats
+++ b/test/cgroups.bats
@@ -45,6 +45,10 @@ EOF
}
@test "conmon pod cgroup" {
+ if [[ "$CONTAINER_CGROUP_MANAGER" != "systemd" ]]; then
+ skip "need systemd cgroup manager"
+ fi
+
CONTAINER_CGROUP_MANAGER="systemd" CONTAINER_DROP_INFRA_CTR=false CONTAINER_CONMON_CGROUP="pod" start_crio
jq ' .linux.cgroup_parent = "Burstablecriotest123.slice"' \
@@ -61,6 +65,10 @@ EOF
skip "not yet supported by conmonrs"
fi
+ if [[ "$CONTAINER_CGROUP_MANAGER" != "systemd" ]]; then
+ skip "need systemd cgroup manager"
+ fi
+
CONTAINER_CGROUP_MANAGER="systemd" CONTAINER_DROP_INFRA_CTR=false CONTAINER_CONMON_CGROUP="customcrioconmon.slice" start_crio
jq ' .linux.cgroup_parent = "Burstablecriotest123.slice"' \
@@ -77,6 +85,10 @@ EOF
skip "not supported for conmon"
fi
+ if [[ "$CONTAINER_CGROUP_MANAGER" != "systemd" ]]; then
+ skip "need systemd cgroup manager"
+ fi
+
configure_monitor_cgroup_for_conmonrs "customcrioconmon.slice"
CONTAINER_CGROUP_MANAGER="systemd" CONTAINER_DROP_INFRA_CTR=true start_crio
diff --git a/test/cni_plugin_helper.bash b/test/cni_plugin_helper.bash
index 04492172..abae521e 100755
--- a/test/cni_plugin_helper.bash
+++ b/test/cni_plugin_helper.bash
@@ -43,7 +43,7 @@
@@ -48,7 +48,7 @@ EOT
. "$TEST_DIR"/cni_plugin_helper_input.env
rm -f "$TEST_DIR"/cni_plugin_helper_input.env
@ -29,16 +48,25 @@
if [[ "${DEBUG_ARGS}" == "malformed-result" ]]; then
cat <<-EOF
--- a/test/cgroups.bats
+++ b/test/cgroups.bats
@@ -26,6 +26,10 @@
}
@test "conmon custom cgroup" {
+ if [[ "$CONTAINER_CGROUP_MANAGER" != "systemd" ]]; then
+ skip "need systemd cgroup manager"
+ fi
+
CONTAINER_CGROUP_MANAGER="systemd" CONTAINER_DROP_INFRA_CTR=false CONTAINER_MANAGE_NS_LIFECYCLE=false CONTAINER_CONMON_CGROUP="customcrioconmon.slice" start_crio
jq ' .linux.cgroup_parent = "Burstablecriotest123.slice"' \
diff --git a/test/common.sh b/test/common.sh
index f7f8e1f2..45b7dd58 100644
--- a/test/common.sh
+++ b/test/common.sh
@@ -41,7 +41,7 @@ # CNI path
if command -v host-local >/dev/null; then
CONTAINER_CNI_PLUGIN_DIR=${CONTAINER_CNI_PLUGIN_DIR:-$(dirname "$(readlink "$(command -v host-local)")")}
else
- CONTAINER_CNI_PLUGIN_DIR=${CONTAINER_CNI_PLUGIN_DIR:-/opt/cni/bin}
+ CONTAINER_CNI_PLUGIN_DIR=${CONTAINER_CNI_PLUGIN_DIR:-/usr/libexec/cni}
fi
# Runtime
CONTAINER_DEFAULT_RUNTIME=${CONTAINER_DEFAULT_RUNTIME:-runc}
@@ -74,7 +74,7 @@ CHECKCRIU_BINARY=${CHECKCRIU_BINARY:-${CRIO_ROOT}/test/checkcriu/checkcriu}
# The default log directory where all logs will go unless directly specified by the kubelet
DEFAULT_LOG_PATH=${DEFAULT_LOG_PATH:-/var/log/crio/pods}
# Cgroup manager to be used
-CONTAINER_CGROUP_MANAGER=${CONTAINER_CGROUP_MANAGER:-systemd}
+CONTAINER_CGROUP_MANAGER=${CONTAINER_CGROUP_MANAGER:-cgroupfs}
# Image volumes handling
CONTAINER_IMAGE_VOLUMES=${CONTAINER_IMAGE_VOLUMES:-mkdir}
# Container pids limit

View File

@ -6,8 +6,8 @@ index 19f8052..135385c 100644
sed -i '/# INCLUDE/q' scripts/get
cat contrib/bundle/install-paths contrib/bundle/install >> scripts/get
-install: .gopathok install.bin install.man install.completions install.systemd install.config
+install: .gopathok install.bin install.man install.completions install.config
-install: install.bin install.man install.completions install.systemd install.config
+install: install.bin install.man install.completions install.config
install.bin-nobuild:
install ${SELINUXOPT} -D -m 755 bin/crio $(BINDIR)/crio

View File

@ -1,25 +1,31 @@
# Contributor: Francesco Colista <fcolista@alpinelinux.org>
# Maintainer: Francesco Colista <fcolista@alpinelinux.org>
pkgname=cri-tools
pkgver=1.24.2
pkgver=1.28.0
pkgrel=0
pkgdesc="CLI tool for Kubelet Container Runtime Interface (CRI)"
url="https://github.com/kubernetes-sigs/cri-tools"
arch="x86_64 aarch64 ppc64le s390x armv7 x86"
arch="all !armhf"
license="Apache-2.0"
makedepends="go"
options="!check" # no check available
source="$pkgname-$pkgver.tar.gz::https://github.com/kubernetes-sigs/cri-tools/archive/v$pkgver.tar.gz"
export GOCACHE="${GOCACHE:-"$srcdir/go-cache"}"
export GOTMPDIR="${GOTMPDIR:-"$srcdir"}"
export GOMODCACHE="${GOMODCACHE:-"$srcdir/go"}"
build() {
make all
}
check() {
make test
}
package() {
install -Dm755 build/bin/crictl "$pkgdir/usr/bin/crictl"
install -Dm755 build/bin/critest "$pkgdir/usr/bin/critest"
make BINDIR="$pkgdir"/usr/bin install
}
sha512sums='
9b5907b37bb5f00295eff4fa4207ae55d930feae7e0f48fa130c7ecc936bcd259a11d59ed240684a3e12c8bcee40f2c67d7f4af52c2a76df3d7bf82e5e388a75 cri-tools-1.24.2.tar.gz
'
sha512sums="
222d3785dc7e8485538b4745766494be02d359347eb1337c9dd04839e19269d768922ff04f07d1fb72291c3554ecf91b382307253a288c9376079135a625cc0c cri-tools-1.28.0.tar.gz
"

View File

@ -1,8 +1,8 @@
# Contributor: Christian Kampka <christian@kampka.net>
# Maintainer:
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=docker-registry
pkgver=2.9.0_git20230327
pkgrel=1
pkgver=2.8.3
pkgrel=0
pkgdesc="An implementation of the Docker Registry HTTP API V2 for use with docker 1.6+"
url="https://github.com/distribution/distribution"
# riscv64 blocked by dependency panicwrap
@ -14,7 +14,7 @@ pkgusers="docker-registry"
pkggroups="docker-registry"
subpackages="$pkgname-openrc"
#source="$pkgname-$pkgver.tar.gz::$url/archive/v$pkgver.tar.gz
source="$pkgname-$pkgver.tar.gz::$url/archive/0c958010ace2e0c2a87f1bf9915b7c74157dfb62.tar.gz
source="$pkgname-$pkgver.tar.gz::$url/archive/983358f8e2509bf8ae196a8e135180a2c5b11264.tar.gz
docker-registry.initd
config-example.patch"
builddir="$srcdir/src/github.com/docker/distribution"
@ -23,7 +23,7 @@ options="chmod-clean"
prepare() {
mkdir -p "${builddir%/*}"
#mv "$srcdir"/distribution-$pkgver "$builddir"
mv "$srcdir"/distribution-0c958010ace2e0c2a87f1bf9915b7c74157dfb62 "$builddir"
mv "$srcdir"/distribution-* "$builddir"
}
build() {
@ -57,7 +57,7 @@ package() {
}
sha512sums="
baf540b81d5f736e105eb2c05f5f4775c61ace3118f965a52b7b477a596291e12b33e56f882ce364731e9701ae6e9b2e09add3bcf8a1a11bb25eb54833c14368 docker-registry-2.9.0_git20230327.tar.gz
8ceb8b994085bc6522e8a203785bd670977117988d391023148a4153e3c150ad7c17fb98de863c4c2300714022444dc5141a75a2899b8b0f04cbbdc17794b5c7 docker-registry-2.8.3.tar.gz
96100a4de311afa19d293a3b8a63105e1fcdf49258aa8b1752befd389e6b4a2b1f70711341ea011b450d4468bd37dbd07a393ffab3b9aa1b2213cf0fdd915904 docker-registry.initd
5a38f4d3f0ee5cd00c0a5ced744eb5b29b839da5921adea26c5de3eb88b6b2626a7ba29b1ab931e5f8fbfafbed8c94cb972a58737ec0c0a69cf515c32139e387 config-example.patch
"

View File

@ -1,7 +1,7 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=ecr-credential-provider
pkgver=1.25.3
pkgver=1.28.1
pkgrel=0
pkgdesc="AWS Kubernetes ecr-credential-provider"
url="https://github.com/kubernetes/cloud-provider-aws"
@ -15,7 +15,7 @@ builddir="$srcdir/cloud-provider-aws-$pkgver"
build() {
unset LDFLAGS # the default abuild LDFLAGS break the go linker
make GOFLAGS="-buildmode=pie -v" GOLDFLAGS="-extldflags=-static -w -s" ecr-credential-provider
make VERSION="v$pkgver" GOFLAGS="-buildmode=pie -v" GOLDFLAGS="-extldflags=-static -w -s" ecr-credential-provider
}
package() {
@ -24,5 +24,5 @@ package() {
}
sha512sums="
d727c01ea98608b0b51edc2bfe892218b55eee7148e358e18387f3f4a52ad765f8d0ee372884e36f95f1303c13dbeba81926f7560c325a8d3c258da11cdfc24b ecr-credential-provider-1.25.3.tar.gz
b9adc389be9301dc4be36c6bf546f354b9f2895cbad13d28d074dbab77f9aecec8d5fd02590d21c2a4acc91b559371adfe9702898c7880d92aea6657b315a539 ecr-credential-provider-1.28.1.tar.gz
"

View File

@ -0,0 +1,43 @@
From dca56cf4d28bbbb1d3be029ce9a6710cb3f6cd2f Mon Sep 17 00:00:00 2001
From: Laszlo Ersek <lersek@redhat.com>
Date: Thu, 4 Jun 2020 13:34:12 +0200
Subject: BaseTools: do not build BrotliCompress (RH only)
Notes about the RHEL-8.3/20200603-ca407c7246bf [edk2-stable202005] ->
RHEL-8.5/20210520-e1999b264f1f [edk2-stable202105] rebase:
- no change
Notes about the RHEL-8.2/20190904-37eef91017ad [edk2-stable201908] ->
RHEL-8.3/20200603-ca407c7246bf [edk2-stable202005] rebase:
- New patch.
BrotliCompress is not used for building ArmVirtPkg or OvmfPkg platforms.
It depends on one of the upstream Brotli git submodules that we removed
earlier in this rebase series. (See patch "remove upstream edk2's Brotli
submodules (RH only").
Do not attempt to build BrotliCompress.
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
(cherry picked from commit db8ccca337e2c5722c1d408d2541cf653d3371a2)
---
BaseTools/Source/C/GNUmakefile | 1 -
1 file changed, 1 deletion(-)
diff --git a/BaseTools/Source/C/GNUmakefile b/BaseTools/Source/C/GNUmakefile
index 8c191e0c38..3eae824a1c 100644
--- a/BaseTools/Source/C/GNUmakefile
+++ b/BaseTools/Source/C/GNUmakefile
@@ -48,7 +48,6 @@ all: makerootdir subdirs
LIBRARIES = Common
VFRAUTOGEN = VfrCompile/VfrLexer.h
APPLICATIONS = \
- BrotliCompress \
VfrCompile \
EfiRom \
GenFfs \
--
2.27.0

View File

@ -0,0 +1,49 @@
From 9729dd1d6b83961d531e29777d0cc4a610b108be Mon Sep 17 00:00:00 2001
From: Laszlo Ersek <lersek@redhat.com>
Date: Thu, 4 Jun 2020 13:39:08 +0200
Subject: MdeModulePkg: remove package-private Brotli include path (RH only)
Notes about the RHEL-8.3/20200603-ca407c7246bf [edk2-stable202005] ->
RHEL-8.5/20210520-e1999b264f1f [edk2-stable202105] rebase:
- no change
Notes about the RHEL-8.2/20190904-37eef91017ad [edk2-stable201908] ->
RHEL-8.3/20200603-ca407c7246bf [edk2-stable202005] rebase:
- New patch.
Originating from upstream commit 58802e02c41b
("MdeModulePkg/BrotliCustomDecompressLib: Make brotli a submodule",
2020-04-16), "MdeModulePkg/MdeModulePkg.dec" contains a package-internal
include path into a Brotli submodule.
The edk2 build system requires such include paths to resolve successfully,
regardless of the firmware platform being built. Because
BrotliCustomDecompressLib is not consumed by any OvmfPkg or ArmVirtPkg
platforms, and we've removed the submodule earlier in this patch set,
remove the include path too.
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
(cherry picked from commit e05e0de713c4a2b8adb6ff9809611f222bfe50ed)
---
MdeModulePkg/MdeModulePkg.dec | 3 ---
1 file changed, 3 deletions(-)
diff --git a/MdeModulePkg/MdeModulePkg.dec b/MdeModulePkg/MdeModulePkg.dec
index 8d38383915..ba2d0290e7 100644
--- a/MdeModulePkg/MdeModulePkg.dec
+++ b/MdeModulePkg/MdeModulePkg.dec
@@ -24,9 +24,6 @@
[Includes]
Include
-[Includes.Common.Private]
- Library/BrotliCustomDecompressLib/brotli/c/include
-
[LibraryClasses]
## @libraryclass Defines a set of methods to reset whole system.
ResetSystemLib|Include/Library/ResetSystemLib.h
--
2.27.0

178
kubezero/edk2/APKBUILD Normal file
View File

@ -0,0 +1,178 @@
# Contributor: Timo Teräs <timo.teras@iki.fi>
# Maintainer: Natanael Copa <ncopa@alpinelinux.org>
pkgname=edk2
pkgver=0.0.202308
_realver=edk2-stable${pkgver##*.}
_sslver=3.0.9
_sfver=3e
pkgrel=0
pkgdesc="EFI Development Kit II"
url="https://github.com/tianocore/tianocore.github.io/wiki/EDK-II/"
arch="x86_64 aarch64"
license="BSD-2-Clause-Patent"
makedepends="bash python3 iasl nasm util-linux-dev util-linux-misc"
options="!archcheck !check" # has no checks
subpackages="$pkgname-pyc"
_mipisyst_commit=370b5944c046bab043dd8b133727b2135af7747a
source="$pkgname-$pkgver.tar.gz::https://github.com/tianocore/edk2/archive/$_realver.tar.gz
mipisyst-$_mipisyst_commit.tar.gz::https://github.com/MIPI-Alliance/public-mipi-sys-t/archive/$_mipisyst_commit.tar.gz
https://www.openssl.org/source/openssl-$_sslver.tar.gz
http://www.jhauser.us/arithmetic/SoftFloat-$_sfver.zip
build-hack.patch
0008-BaseTools-do-not-build-BrotliCompress-RH-only.patch
0009-MdeModulePkg-remove-package-private-Brotli-include-p.patch
"
builddir="$srcdir/$pkgname-$_realver"
case "$CARCH" in
x86)
TARGET_ARCH=IA32
PLATFORM=OvmfPkg/OvmfPkgIa32X64.dsc
;;
x86_64)
TARGET_ARCH=X64
PLATFORM="OvmfPkg/OvmfPkgX64.dsc OvmfPkg/OvmfXen.dsc OvmfPkg/CloudHv/CloudHvX64.dsc"
subpackages="$subpackages ovmf:_ovmf:noarch ovmf-xen:_xen:noarch cloudhv:_cloudhv:noarch"
;;
aarch64)
TARGET_ARCH=AARCH64
PLATFORM=ArmVirtPkg/ArmVirtQemu.dsc
subpackages="$subpackages aavmf::noarch"
;;
esac
TOOLCHAIN=GCC5
RELEASE=RELEASE
prepare() {
# unix line endings for the files to be patched
sed -e 's/\r$//' -i BaseTools/Source/C/VfrCompile/VfrUtilityLib.cpp \
BaseTools/Source/C/VolInfo/VolInfo.c
rm -rf CryptoPkg/Library/OpensslLib/openssl
ln -s "$srcdir"/openssl-$_sslver CryptoPkg/Library/OpensslLib/openssl
rm -rf ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3
ln -s "$srcdir"/SoftFloat-$_sfver \
ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3
rm -rf MdePkg/Library/MipiSysTLib/mipisyst
ln -s "$srcdir"/public-mipi-sys-t-$_mipisyst_commit \
MdePkg/Library/MipiSysTLib/mipisyst
default_prepare
}
build() {
export PYTHON_COMMAND=python3
export WORKSPACE=$PWD
export PACKAGES_PATH=$PWD
export EDK_TOOLS_PATH=$PWD/BaseTools/
export PATH=$PWD/BaseTools/BinWrappers/PosixLike/:$PATH
# parallel build fails
unset MAKEFLAGS
bash -c ". edksetup.sh"
make -C BaseTools
for _p in $PLATFORM; do
msg "Building Plaform Files $_p"
command build -b $RELEASE \
-a $TARGET_ARCH \
-t $TOOLCHAIN \
-p $_p \
-n ${JOBS:-2} \
-DSECURE_BOOT_ENABLE=TRUE \
-DTPM2_ENABLE=TRUE
done
}
package() {
mkdir -p "$pkgdir"/usr/bin \
"$pkgdir"/usr/share/$pkgname/Conf \
"$pkgdir"/usr/share/$pkgname/Scripts
install BaseTools/Source/C/bin/* BaseTools/BinWrappers/PosixLike/LzmaF86Compress \
"$pkgdir"/usr/bin
install BaseTools/BuildEnv "$pkgdir"/usr/share/$pkgname/
install BaseTools/Conf/*.template "$pkgdir"/usr/share/$pkgname/Conf
install BaseTools/Scripts/GccBase.lds "$pkgdir"/usr/share/$pkgname/Scripts
for i in $(find BaseTools/Source/Python -type d -maxdepth 1); do
local mod=${i##*/}
test -f "$i/$mod.py" || continue
cp -R BaseTools/Source/Python/"$mod" "$pkgdir"/usr/share/edk2/Python/
cat <<- EOF > "$pkgdir"/usr/bin/"$mod".py
#!/bin/sh
export PYTHONPATH=/usr/share/edk2/Python
exec $PYTHON_COMMAND /usr/share/edk2/Python/$mod/$mod.py "\$@"
EOF
chmod +x "$pkgdir"/usr/bin/"$mod".py
done
}
_ovmf() {
pkgdesc="Open Virtual Machine Firmware (OVMF) BIOS"
license="BSD MIT"
for fw in "$builddir"/Build/OvmfX64/"$RELEASE"_"$TOOLCHAIN"/FV/*.fd; do
install -D $fw "$subpkgdir"/usr/share/OVMF/${fw##*/}
done
# dont ship memfd for now to save space
rm -f "$subpkgdir"/usr/share/OVMF/MEMFD.fd
install -d "$subpkgdir"/usr/share/ovmf
ln -sf ../OVMF/OVMF.fd "$subpkgdir"/usr/share/ovmf/bios.bin
}
_xen() {
pkgdesc="Open Virtual Machine Firmware (OVMF) - Xen build"
license="BSD MIT"
install -D "$builddir"/Build/OvmfXen/"$RELEASE"_"$TOOLCHAIN"/FV/OVMF.fd \
"$subpkgdir"/usr/lib/xen/boot/ovmf.bin
}
_cloudhv() {
pkgdesc="EDK2 EFI Firmware - Cloud-Hypervisor build"
license="BSD MIT"
install -D "$builddir"/Build/CloudHvX64/"$RELEASE"_"$TOOLCHAIN"/FV/CLOUDHV.fd \
"$subpkgdir"/usr/share/cloudhv/CLOUDHV.fd
}
aavmf() {
pkgdesc="ARM (aarch64) Virtual Machine Firmware EFI"
license="BSD MIT"
dd if=/dev/zero \
of="$builddir"/Build/ArmVirtQemu-AARCH64/"$RELEASE"_$TOOLCHAIN/FV/AAVMF_CODE.fd \
bs=1M seek=64 count=0
dd if="$builddir"/Build/ArmVirtQemu-AARCH64/"$RELEASE"_$TOOLCHAIN/FV/QEMU_EFI.fd \
of="$builddir"/Build/ArmVirtQemu-AARCH64/"$RELEASE"_$TOOLCHAIN/FV/AAVMF_CODE.fd \
conv=notrunc
dd if=/dev/zero \
of="$builddir"/Build/ArmVirtQemu-AARCH64/"$RELEASE"_$TOOLCHAIN/FV/AAVMF_VARS.fd \
bs=1M seek=64 count=0
for fw in "$builddir"/Build/*/"$RELEASE"_"$TOOLCHAIN"/FV/*.fd; do
install -D $fw "$subpkgdir"/usr/share/AAVMF/${fw##*/}
done
}
pyc() {
default_pyc
local IFS=$'\n'
amove $(find usr/share/edk2/Python -type d -name __pycache__)
}
sha512sums="
668411dc64a4a69afd145221c599fffc3797de26e801dda7d9b7ed92f755ff4fda4635dbc21c821f527e56eb71c4ad98c1fb079112a56d6b6eea5ff4d010e3cf edk2-0.0.202308.tar.gz
de6888577ceab7ab6915d792f3c48248cfa53357ccd310fc7f7eae4d25a932de8c7c23e5b898c9ebf61cf86cb538277273f2eb131a628b3bf0d46c9a3b9b6686 mipisyst-370b5944c046bab043dd8b133727b2135af7747a.tar.gz
86c99146b37236419b110db77dd3ac3992e6bed78c258f0cc3434ca233460b4e17c0ac81d7058547fe9cb72a9fd80ee56d4b4916bb731dbe2bbcf1c3d46bf31a openssl-3.0.9.tar.gz
3fedcd0060affb2d8fc7995894133cfed6a495c8717df0d30c89885223c38749f25743598383736036332dad6353c6a3f027f5a94a696660f7c4b607e33e534c SoftFloat-3e.zip
a7d4ab2c82b62ba01c86e59f53bd3896d661c9bfbb9db9598734155b66d5fe03eca4a2a9993a14d3bf555992c6d01ba5d7a15868ff9ec6ed98b8a9b3895bb7df build-hack.patch
ecbfc1ec3b732580c33c477191b71553247af1a68f1754bd363d179e0f5aabde93e3c5ec7f2574f9a9ffefef34e75787a2a87b1057b02cd206e8f0618a252871 0008-BaseTools-do-not-build-BrotliCompress-RH-only.patch
ecad98ff84ab307bda751c8a9a321e064ef880dc66b4d107e66aedbc4e14d00eed76770437e25fa9153dc30803f5cbbf1299329f56865a3b75d2c19f6615e68b 0009-MdeModulePkg-remove-package-private-Brotli-include-p.patch
"

View File

@ -0,0 +1,13 @@
VfrCompile seg.faults with fortify enabled. It's probably broken.
diff -ru a/edk2-e242cdfb307a6dfe2c0f75c4719f5c1f6b418625/BaseTools/Source/C/VfrCompile/VfrUtilityLib.cpp b/edk2-e242cdfb307a6dfe2c0f75c4719f5c1f6b418625/BaseTools/Source/C/VfrCompile/VfrUtilityLib.cpp
--- edk2-e242cdfb307a6dfe2c0f75c4719f5c1f6b418625/BaseTools/Source/C/VfrCompile/VfrUtilityLib.cpp 2016-11-16 10:01:14.000000000 +0200
+++ edk2-e242cdfb307a6dfe2c0f75c4719f5c1f6b418625/BaseTools/Source/C/VfrCompile/VfrUtilityLib.cpp 2016-11-16 14:47:30.211978076 +0200
@@ -13,6 +13,7 @@
**/
+#define _FORTIFY_SOURCE 0
#include "stdio.h"
#include "stdlib.h"
#include "CommonLib.h"

View File

@ -22,5 +22,5 @@ package() {
}
sha512sums="
97abd4e5a0078112a048037512b041bcefb9e660131403e9c87bf5fc8b632eb17ab66d20a477a2ef4808f54ae29941d74bd61390143e5781058d7bbd4333dd78 etcdhelper-0.1.0.tar.gz
d1f3d239899a2392d11c45ea49b3bfc18255c00933e677f02eab1f0f59a940722fb40de1842a8a4253aabf066508be028443adb8920e82673342ba50130556ca etcdhelper-0.1.0.tar.gz
"

View File

@ -0,0 +1,71 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
_flavor=lts
_extra_flavors=virt
pkgver=0.37.1
pkgrel=0
pkgname=falco-kernel-$_flavor
pkgdesc="Falco kernel module"
url="https://github.com/falcosecurity/falco"
arch="x86_64 aarch64"
license="AGPL-3.0"
makedepends="cmake linux-$_flavor-dev linux-headers"
# protobuf-dev jq-dev openssl-dev curl-dev c-ares-dev grpc-dev yaml-dev yaml-cpp-dev jsoncpp-dev re2-dev"
# perl autoconf elfutils-dev libtool argp-standalone musl-fts-dev musl-libintl musl-obstack-dev"
options="!check"
source="
falco-$pkgver.tar.gz::https://github.com/falcosecurity/falco/archive/refs/tags/$pkgver.tar.gz
"
builddir="$srcdir/falco-$pkgver"
for f in $_extra_flavors; do
makedepends="$makedepends linux-$f-dev"
subpackages="$subpackages falco-kernel-$f:_extra"
done
build() {
for flavor in $_flavor $_extra_flavors; do
mkdir -p $srcdir/falco-$pkgver/build-$flavor
# Hack running the build inside a container other uname -r returns host kernel
KERNEL_VERSION=$(basename $(ls -d /lib/modules/*-"$flavor"))
cd $srcdir/falco-$pkgver/build-$flavor
cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-DFALCO_VERSION=$pkgver \
-DCMAKE_INSTALL_PREFIX=/usr \
-DUSE_BUNDLED_DEPS=On \
-DMUSL_OPTIMIZED_BUILD=On
KERNELDIR=/lib/modules/$KERNEL_VERSION/build make driver
done
}
_package() {
local flavor=$1
local _out=$2
KERNEL_VERSION=$(basename $(ls -d /lib/modules/*-"$flavor"))
depends="linux-$flavor~$(echo $KERNEL_VERSION | sed -e 's/-.*$//')"
cd $srcdir/falco-$pkgver/build-$flavor
mkdir -p "$_out"/lib/modules/$KERNEL_VERSION/kernel
gzip -9 -c driver/falco.ko > "$_out"/lib/modules/$KERNEL_VERSION/kernel/falco.ko.gz
}
package() {
_package $_flavor $pkgdir
}
_extra() {
flavor=${subpkgname##*-}
_package $flavor $subpkgdir
}
sha512sums="
257d526c4d3eadbe2c79852221fdb8076f94e421aa66753628770ae7384137b4672064cbe1ba0a4d88d14e8a7d08e2521d5bd82a312c4b1442d8ea6fbbbb2f28 falco-0.37.1.tar.gz
"

79
kubezero/falco/APKBUILD Normal file
View File

@ -0,0 +1,79 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=falco
pkgver=0.37.1
pkgrel=0
pkgdesc="Falco is the open source solution for runtime security for hosts, containers, Kubernetes and the cloud"
url="https://github.com/falcosecurity/falco"
arch="x86_64 aarch64"
license="AGPL-3.0"
makedepends="cmake linux-headers bash perl autoconf elfutils-dev libtool argp-standalone
musl-fts-dev
musl-libintl
musl-legacy-error
musl-obstack-dev "
# protobuf-dev
# c-ares-dev
# openssl-dev
# curl-dev
# grpc-dev
# yaml-cpp-dev
# "
options="!check"
#depends="falco-kernel~$pkgver"
# Original config
# https://raw.githubusercontent.com/falcosecurity/rules/main/rules/falco_rules.yaml
# https://raw.githubusercontent.com/falcosecurity/falco/master/falco.yaml
source="
$pkgname-$pkgver.tar.gz::https://github.com/falcosecurity/falco/archive/refs/tags/$pkgver.tar.gz
falco.patch
rules.patch
"
prepare() {
[[ -d build ]] || mkdir build
}
build() {
cd build
cmake \
-DCPACK_GENERATOR=TGZ \
-DCMAKE_BUILD_TYPE=Release \
-DFALCO_VERSION=$pkgver \
-DCMAKE_INSTALL_PREFIX=/usr \
-DFALCO_ETC_DIR=/etc/falco \
-DUSE_BUNDLED_DEPS=On \
-DMINIMAL_BUILD=On \
-DUSE_DYNAMIC_LIBELF=Off \
-DMUSL_OPTIMIZED_BUILD=On \
-DBUILD_DRIVER=Off \
-DBUILD_BPF=Off \
-DBUILD_LIBSCAP_MODERN_BPF=Off \
..
make falco || bash
}
package() {
cd build
make DESTDIR="${pkgdir}" install
# patch falco config
cd $pkgdir/etc/falco
patch --no-backup-if-mismatch -i $srcdir/falco.patch
patch --no-backup-if-mismatch -i $srcdir/rules.patch
# We dont build anything on targets so remove sources
rm -rf $pkgdir/usr/src
rm -rf $pkgdir/usr/lib
rm -rf $pkgdir/usr/include
}
sha512sums="
257d526c4d3eadbe2c79852221fdb8076f94e421aa66753628770ae7384137b4672064cbe1ba0a4d88d14e8a7d08e2521d5bd82a312c4b1442d8ea6fbbbb2f28 falco-0.37.1.tar.gz
b152fcf6cd81895efa37797ab7ff1aac7350b5f51f2648aa9e3cce9d5ece55791ddf82c396e9da216293e2379a785a294cc972f28a91162dc5bc88ab09e1ab08 falco.patch
487b8b64d2399fd7b706be29e3722983bcdfde3ab5cf0f78b2e9fe1055a4ad958976f591e739491e25a06d7cdf6894c1e153e892a87b83c7a962e23c9a104528 rules.patch
"

View File

@ -0,0 +1,11 @@
--- CMakeLists.txt.orig 2023-07-31 17:28:10.413951551 +0000
+++ CMakeLists.txt 2023-07-31 17:28:19.927330458 +0000
@@ -89,7 +89,7 @@
endif()
if(MUSL_OPTIMIZED_BUILD)
- set(MUSL_FLAGS "-static -Os -fPIE -pie")
+ set(MUSL_FLAGS "-fPIE -pie")
add_definitions(-DMUSL_OPTIMIZED)
endif()

View File

@ -0,0 +1,20 @@
--- falco.yaml 2023-07-05 11:42:11.816317256 +0000
+++ zdt_falco.yaml 2023-07-05 11:31:07.476468029 +0000
@@ -238,7 +238,7 @@
# When enabled, Falco will output alert messages and rules file
# loading/validation results in JSON format, making it easier for downstream
# programs to process and consume the data. By default, this option is disabled.
-json_output: false
+json_output: true
# [Stable] `json_include_output_property`
#
@@ -263,7 +263,7 @@
# Enabling buffering for the output queue can offer performance optimization,
# efficient resource usage, and smoother data flow, resulting in a more reliable
# output mechanism. By default, buffering is disabled (false).
-buffered_outputs: false
+buffered_outputs: true
# [Stable] `outputs`
#

1053
kubezero/falco/falco.yaml Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,29 @@
--- falco_rules.yaml 2023-11-07 16:26:40.171716913 +0000
+++ zdt_falco_rules.yaml 2023-11-07 16:30:24.912804117 +0000
@@ -171,7 +171,7 @@
# A canonical set of processes that run other programs with different
# privileges or as a different user.
- list: userexec_binaries
- items: [sudo, su, suexec, critical-stack, dzdo]
+ items: [doas, sudo, su, suexec, critical-stack, dzdo]
- list: user_mgmt_binaries
items: [login_binaries, passwd_binaries, shadowutils_binaries]
@@ -200,7 +200,7 @@
]
- list: sensitive_file_names
- items: [/etc/shadow, /etc/sudoers, /etc/pam.conf, /etc/security/pwquality.conf]
+ items: [/etc/shadow, /etc/doas.d/doas.conf, /etc/sudoers, /etc/pam.conf, /etc/security/pwquality.conf]
- list: sensitive_directory_names
items: [/, /etc, /etc/, /root, /root/]
@@ -208,7 +208,7 @@
- macro: sensitive_files
condition: >
((fd.name startswith /etc and fd.name in (sensitive_file_names)) or
- fd.directory in (/etc/sudoers.d, /etc/pam.d))
+ fd.directory in (/etc/sudoers.d, /etc/pam.d, /etc/doas.d))
# Indicates that the process is new. Currently detected using time
# since process was started, using a threshold of 5 seconds.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,37 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=falcoctl
pkgver=0.7.3
pkgrel=0
pkgdesc="The official CLI tool for working with Falco and its ecosystem components."
url="https://github.com/falcosecurity/falcoctl"
arch="x86_64 aarch64"
license="AGPL-3.0"
makedepends="bash go"
options="!check"
source="
$pkgname-$pkgver.tar.gz::https://github.com/falcosecurity/falcoctl/archive/refs/tags/v$pkgver.tar.gz
"
export GOCACHE="${GOCACHE:-"$srcdir/go-cache"}"
export GOTMPDIR="${GOTMPDIR:-"$srcdir"}"
export GOMODCACHE="${GOMODCACHE:-"$srcdir/go"}"
export GOBIN="$GOPATH/bin"
build() {
make GOFLAGS="-buildmode=pie -v" GOLDFLAGS="-extldflags=-static -w -s" falcoctl
# cleanup 444 files
go clean -modcache
}
package() {
mkdir -p "$pkgdir/usr/bin"
install -Dm755 falcoctl "$pkgdir/usr/bin/falcoctl"
}
sha512sums="
61e539322c91125569c432ea1fc98c84b928795089829a062e6b5c74c7d1223cd71e557b7a8972ba7c6d1b534d1b87da254ee01e12c14038ced5a8f85a22a623 falcoctl-0.7.3.tar.gz
"

View File

@ -1,7 +1,7 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=fluent-bit
pkgver=2.1.1
pkgver=2.2.2
pkgrel=0
pkgdesc="Fast and Lightweight Log processor and forwarder"
url="https://fluentbit.io/"
@ -12,10 +12,10 @@ makedepends="
bison
cmake
flex
gtest-dev
linux-headers
musl-fts-dev
openssl-dev
gtest-dev
yaml-dev
zlib-dev
"
@ -27,6 +27,11 @@ source="$pkgname-$pkgver.tar.gz::https://github.com/fluent/fluent-bit/archive/v$
fluent-bit.confd
fluent-bit.initd
chunkio-static-lib-fts.patch
exclude-luajit.patch
xsi-strerror.patch
fluent-bit.conf
zdt-parsers.conf
fluent-bit.logrotated
"
# enable check when this solved - https://github.com/fluent/fluent-bit/issues/2464#issuecomment-673280055
# Disable all things AWS to make tests pass
@ -38,7 +43,7 @@ build() {
fi
# default CORE_STACK_SIZE=((3 * PTHREAD_STACK_MIN) / 2)=3072 is invalid
# set default to 24576
# Disable stream processor due to issue see: https://github.com/fluent/fluent-bit/issues/2464
# Disable stream processor due to issue see: https://github.com/fluent/fluent-bit/issues/2464
cmake -B build \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_INSTALL_LIBDIR=lib \
@ -48,15 +53,16 @@ build() {
-DFLB_DEBUG=Off \
-DFLB_SHARED_LIB=Off \
-DFLB_JEMALLOC=Yes \
-DFLB_LUAJIT=Yes \
-DFLB_IN_SYSTEMD=Off \
-DFLB_PROXY_GO=No \
-DFLB_TLS=Yes \
-DFLB_HTTP_SERVER=Yes \
$CMAKE_CROSSOPTS .
$CMAKE_CROSSOPTS .
make -C build
#-DCMAKE_FIND_LIBRARY_SUFFIXES=".a" \
#-DCMAKE_EXE_LINKER_FLAGS="-static" \
#-DFLB_STREAM_PROCESSOR=No \
#-DFLB_LUAJIT=Yes \
#-DFLB_FILTER_LUA=Off \
#-DFLB_TESTS_INTERNAL=Yes \
#-DFLB_AWS=No \
@ -83,11 +89,25 @@ package() {
"$pkgdir"/etc/conf.d/$pkgname
mv "$pkgdir"/usr/etc/* "$pkgdir"/etc
rm "$pkgdir"/etc/fluent-bit/fluent-bit.conf
mkdir -p "$pkgdir"/var/spool/fluent-bit
install -Dm644 "$srcdir/fluent-bit.conf" "$pkgdir/etc/fluent-bit/fluent-bit.conf"
install -Dm644 "$srcdir/zdt-parsers.conf" "$pkgdir/etc/fluent-bit/zdt-parsers.conf"
install -Dm644 "$srcdir"/fluent-bit.logrotated "$pkgdir"/etc/logrotate.d/fluentbit
touch "$pkgdir"/etc/fluent-bit/metadata.conf
}
sha512sums="
8c682e41411cae42580636a8d55b3f6c09b729f8e25f7d4e4b306ff286e0aea91da9ebc1a57dee153a90117884cc2a9d4342cae0e860a2f5f74a8a8c4f3b1e81 fluent-bit-2.1.1.tar.gz
681c1db0256d0b50d986194597b700f790726a1394b3ad92c92a26c95d04bf2b65203e94ef2aeb0f0b3403870748ec0ebbec2cd49548857fbadc5c745581452f fluent-bit-2.2.2.tar.gz
f6431397c80a036980b5377b51e38aec25dfceeb8dbe4cd54dce1f6e77d669d9f8daf983fcc96d25332385888f1809ced5e8ab0e8ccfcd93d19494036e3dc949 fluent-bit.confd
8ba6c8e84dee90176f9b4375fb2c6444fa5d32fa601d9bcf3ea7960fec87f1ef664f175caf08bd0b052843e971efdbf08e2a5cd180ad9a8f23ff2c5cb233814f fluent-bit.initd
6bd7d8b4da93a17f29b6ea1e0286ea226d0e376024284741110936779b3229bd8d6cd03ffbdc5d3b4842294e7f32a888de0dd16b0851b65d91b062ca58530ea0 chunkio-static-lib-fts.patch
e3308a8377fb8ba496415b7a31e9e022e5aa9965d27a0c33ea5166a29049b72cb364bbcdf9d8611ef3407b0968f9bd4adff12cdb39728bbebd382710e5bc75d0 exclude-luajit.patch
d61f30344af997f126486fa5b34cd3fbfe88bfc9aea394a8c60d0206f4db8db998eadf637a3a581b89512411c1e7980c414e236e455d5e2b889d20a556ee6577 xsi-strerror.patch
52aba9d23584d64842bc967504701a10166a43a03ca0d31de9b6cbffaacdbaa7d99f0fd55a4b0194e3b65d456817cb1779b86d468d81c1d9681a6fa708e85449 fluent-bit.conf
31899a3c68bbb43adb9025a3a46bad4ca0c740d5bca5c252c8667197575698d98ac4a3b6e11ee160c4bb8df0d0089b639bfd7d0ffa52391e6c4f8f734a6952a6 zdt-parsers.conf
e166b0ff11a1789599e93f86b72102ca6a06725c98553a8fdd48c8d6414bfa765c3958d07bfb4c4d99101d8cdf7d00db1a8506d48c2cbd6bd375ce43c43d2bf9 fluent-bit.logrotated
"

View File

@ -0,0 +1,12 @@
otherwise it installs an unused /usr/bin/luajit ....
-0
diff --git a/cmake/luajit.cmake b/cmake/luajit.cmake
index b6774eb..f8042ae 100644
--- a/cmake/luajit.cmake
+++ b/cmake/luajit.cmake
@@ -1,4 +1,4 @@
# luajit cmake
option(LUAJIT_DIR "Path of LuaJIT 2.1 source dir" ON)
set(LUAJIT_DIR ${FLB_PATH_ROOT_SOURCE}/${FLB_PATH_LIB_LUAJIT})
-add_subdirectory("lib/luajit-cmake")
+add_subdirectory("lib/luajit-cmake" EXCLUDE_FROM_ALL)

View File

@ -0,0 +1,33 @@
[SERVICE]
Flush 5
Daemon Off
Log_Level info
log_file /var/log/fluent-bit.log
HTTP_Server On
HTTP_Listen 0.0.0.0
HTTP_Port 2020
Health_Check On
parsers_file parsers.conf
parsers_file zdt-parsers.conf
plugins_file plugins.conf
storage.metrics on
storage.path /var/spool/fluent-bit
storage.sync normal
storage.checksum on
storage.backlog.mem_limit 5M
[INPUT]
Name tail
Path /var/log/messages
Parser syslog-ng-json
Tag system
DB /var/log/flb_kube.db
DB.Sync Normal
DB.locking true
@INCLUDE metadata.conf
@INCLUDE output.conf

View File

@ -0,0 +1,12 @@
/var/log/fluentbit.log
{
rotate 2
missingok
notifempty
compress
maxsize 10M
daily
postrotate
rc-service fluent-bit restart
endscript
}

View File

@ -0,0 +1,15 @@
--- a/src/flb_network.c
+++ b/src/flb_network.c
@@ -523,9 +523,10 @@
}
/* Connection is broken, not much to do here */
- str = strerror_r(error, so_error_buf, sizeof(so_error_buf));
+ /* XXX: XSI */
+ int _err = strerror_r(error, so_error_buf, sizeof(so_error_buf));
flb_error("[net] TCP connection failed: %s:%i (%s)",
- u->tcp_host, u->tcp_port, str);
+ u->tcp_host, u->tcp_port, so_error_buf);
return -1;
}
}

View File

@ -0,0 +1,22 @@
[PARSER]
Name syslog-ng-json
Format json
Time_Key time
Time_Format %s.%L
# SQUID access.logs
# "%9d.%03d %6d %s %s/%03d %d %s %s %s %s%s/%s %s"
[PARSER]
Name squid-access
Format regex
Regex /^(?<time>\d+\.\d{1,3})\s+\d+\s(?<host>[^ ]*)\s(?<cache>\w+)\/(?<code>\d+)\s(?<size>\d+)\s(?<method>\S+)\s(?<request_url>[^ ]*)\s(?<user>\S+)\s(?<hierachy_code>\S+)\/(?<forward_host>[^ ]*)\s(?<content_type>\S+)$/
Time_Key time
Time_Format %s.%L
# Laravel logs
[PARSER]
Name laravel
Format regex
Regex /^\[(?<time>\d{4}-\d\d-\d\d \d\d:\d\d:\d\d)\]\s(?<ident>[a-zA-Z0-9]*)\.(?<severity>[a-zA-Z0-9]*):\s(?<message>.*)$/
Time_Key time
Time_Format %Y-%m-%d %H:%M:%S

View File

@ -15,7 +15,7 @@ triggers="$pkgname-bin.trigger=/lib:/usr/lib:/usr/glibc-compat/lib:/lib64"
options="!check lib64"
package() {
conflicts="libc6-compat"
conflicts="gcompat"
mkdir -p "$pkgdir/lib" "$pkgdir/lib64" "$pkgdir/usr/glibc-compat/lib/locale" "$pkgdir"/usr/glibc-compat/lib64 "$pkgdir"/etc
cp -a "$srcdir"/usr "$pkgdir"
cp "$srcdir"/ld.so.conf "$pkgdir"/usr/glibc-compat/etc/ld.so.conf

View File

@ -5,14 +5,13 @@
# Contributor: Dave <dj.2dixx@gmail.com>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=kubernetes
pkgver=1.25.8
pkgver=1.28.9
pkgrel=0
pkgdesc="Container Cluster Manager"
url="https://kubernetes.io/"
# ppc64le: failed to build
arch="x86_64 aarch64 armv7 x86"
arch="all !armhf !riscv64"
license="Apache-2.0"
options="!check chmod-clean" # Tests hang
options="!check chmod-clean net" # Tests hang
_kube_proxy_deps="iptables"
_kubelet_deps="iptables"
@ -74,15 +73,14 @@ export GOTMPDIR="${GOTMPDIR:-"$srcdir"}"
export GOMODCACHE="${GOMODCACHE:-"$srcdir/go"}"
build() {
make generated_files
for _pkgs in $_agent $_cli $_services ; do
make -j1 GOFLAGS="-buildmode=pie -v -tags=providerless" GOLDFLAGS="-extldflags=-static" WHAT=cmd/$_pkgs
make -j1 GOFLAGS="$GOFLAGS -buildmode=pie -v -tags=providerless" GOLDFLAGS="-extldflags=-static" WHAT=cmd/$_pkgs
done
}
package() {
for bin in $_agent $_cli $_services; do
install -Dm755 _output/local/bin/linux/*/$bin "$pkgdir"/usr/bin/$bin
install -Dm755 _output/local/go/bin/$bin "$pkgdir"/usr/bin/$bin
done
mkdir -p "$pkgdir"/etc/kubernetes
}
@ -207,9 +205,9 @@ _do_zshcomp() {
}
sha512sums="
30f1815de4d5bf3a091f1937c94e0e6cf0abc0f527488b72ec4a7d72c014bb8fef450abbf4c908f8a5b791e8d4ab501edb3f5c55af4e370359a952a6228362be kubernetes-1.25.8.tar.gz
c350bb0a63ada0cc3657fe07598101775243083aa1eabda898080b7b01b129e6fdd7ad1a61950cc039b73b081f38de3b856baedf5c075f39916be1547b11d184 make-e2e_node-run-over-distro-bins.patch
56201491d2dfe3a487931cbf5c6e60af898701b9541a936d80e3823948fcfb98508e3d51f4aaa415ce971f7bd20a7b51f74f025c76b83f58d5a8de8ce0ab679b make-test-cmd-run-over-hyperkube-based-kubectl.patch
cb10da770f8bb035c98b2c02b9ff202194ae69983d7c4d5052b03f5f5522e57f70a88105039265e1892039c566cfd7d043fcb44ad958823be0f5bee352f864a0 kubernetes-1.28.9.tar.gz
5427c2e653504cfd5b0bcaf195d4734ee40947ddfebc9f155cd96dddccfc27692c29d94af4ac99f1018925b52995c593b584c5d7a82df2f185ebce1a9e463c40 make-e2e_node-run-over-distro-bins.patch
94d07edfe7ca52b12e85dd9e29f4c9edcd144abc8d120fb71e2a0507f064afd4bac5dde30da7673a35bdd842b79a4770a03a1f3946bfae361c01dd4dc4903c64 make-test-cmd-run-over-hyperkube-based-kubectl.patch
e690daff2adb1013c92124f32e71f8ed9a18c611ae6ae5fcb5ce9674768dbf9d911a05d7e4028488cda886e63b82e8ac0606d14389a05844c1b5538a33dd09d1 kube-apiserver.initd
302b2a7ec715967c0aa7d1c177d4e55b26e37ebba8d04dd37ecf627d20042fe91cd7e6192ff9d71422129b0ea54a9eec6046f505af550548bd450998924f37ee kube-apiserver.confd
1a4bcd54dafaedc614e34bbadc2a1163f003b5925d47552fb2c47049c033c147e612171e263d9659d189fc2d95688a0b7153322d8dba97c083c079fdef6c400e kube-apiserver.logrotated
@ -222,7 +220,7 @@ d7e022ee22da191bda7382f87cb293d9c9d115a3df0c2054bf918279eb866f99c6d5c21e4c98eae8
561bef5633ba4b9021720624443d9c279a561e5fabea76e5d0fbee2e7ad8999029a2511a45895fbec8448026212a3c5b4c197b248a6afa7f8bd945f705524ea7 kube-scheduler.initd
af88b382ab75657d0ff13c3f8f6d924cef9f2df7807a9a27daa63495981801bc4b607998f65c0758c11a7e070e43c24f7184ba7720711109c74b1c4d57919e34 kube-scheduler.confd
3692da349dd6ed0f5acc09d7b95ac562ffecb103e2270bebdfe4a7808d48dada9d2debff262d85b11c47f9ca3f0c20000712d03629ed813ff08a3e02d69267e6 kube-scheduler.logrotated
1b0e0cc45666b18ecb333bf3835b282e9f72e40bf29b7a9d7b9e5e2bbbd009297be8c31d284e865af45d66434a27dee14e617e61fac0dda0242d7d0f3fc89ce8 kubelet.initd
f79ea6dec632ca052f8cd061bf0c5f7f821c7013c048b4737752e1a41c677c020c1f9454ddabe7f9ba5d55c2a8a4718170e30049b7212a4a2dc91148a3ac7ebc kubelet.confd
372cdf2fbb24a229ed7b3450b54197c006928cb8d2fd756f2713e1e6961849c7aaa35b20b14fb75d1a12ef1e35258048738aa22b5f9783af8fa0a31dfd1b5bbd kubelet.initd
44eb973de8ee8e0c5a77d76ab0e105fe0ae892be1ff86c238a5449b43f83cab6f844575b6c3218f08c5ff077e9f828f5aef72425c1d77546cce2e0136e8a8da8 kubelet.confd
941f4a7579dcf78da2d323ac69195e95eba6600e6fcefe9231447f11c9867a7aa57b4189ee1fefb10eab19c89665ea2e7696b539c92e99fbcde905d2ff85be58 kubelet.logrotated
"

View File

@ -1 +1 @@
command_args="--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --cgroup-driver=cgroupfs --config=/var/lib/kubelet/config.yaml"
command_args="--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --config=/var/lib/kubelet/config.yaml --image-credential-provider-bin-dir=/usr/libexec/kubernetes/kubelet-plugins --image-credential-provider-config=/etc/kubernetes/credential-provider.yaml"

View File

@ -5,6 +5,13 @@
supervisor=supervise-daemon
description="Kubelet, a Kubernetes node agent"
# do not start without kubezero node config in place
required_files="/var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml"
# Restart forever just like systemd upstream
respawn_max=0
respawn_delay=5
if [ -e /var/lib/kubelet/kubeadm-flags.env ]; then
. /var/lib/kubelet/kubeadm-flags.env;
fi
@ -16,7 +23,7 @@ pidfile="${KUBELET_PIDFILE:-/run/${RC_SVCNAME}.pid}"
: ${error_log:=/var/log/$RC_SVCNAME/$RC_SVCNAME.log}
depend() {
after net
after net cloudbender
need cgroups
want containerd crio
}

View File

@ -1,10 +1,10 @@
diff --git a/hack/make-rules/test-e2e-node.sh b/hack/make-rules/test-e2e-node.sh
index fb0720a7..43a57c1f 100755
index f468d36b..a115aab9 100755
--- a/hack/make-rules/test-e2e-node.sh
+++ b/hack/make-rules/test-e2e-node.sh
@@ -240,6 +240,6 @@ else
--alsologtostderr --v 4 --report-dir=${artifacts} --node-name $(hostname) \
${test_args}" --runtime-config="${runtime_config}" \
@@ -225,6 +225,6 @@ else
--test-flags="--v 4 --report-dir=${artifacts} --node-name $(hostname) ${test_args}" \
--runtime-config="${runtime_config}" \
--kubelet-config-file="${kubelet_config_file}" \
- --build-dependencies=true 2>&1 | tee -i "${artifacts}/build-log.txt"
+ --k8s-bin-dir "/usr/bin" --build-dependencies=true 2>&1 | tee -i "${artifacts}/build-log.txt"

View File

@ -1,8 +1,8 @@
diff --git a/hack/lib/test.sh b/hack/lib/test.sh
index 62a6765f..775d1d96 100644
index 36ea5f04..045de151 100644
--- a/hack/lib/test.sh
+++ b/hack/lib/test.sh
@@ -77,12 +77,12 @@ kube::test::object_assert() {
@@ -78,12 +78,12 @@ kube::test::object_assert() {
local object=$2
local request=$3
local expected=$4
@ -11,9 +11,9 @@ index 62a6765f..775d1d96 100644
for j in $(seq 1 "${tries}"); do
# shellcheck disable=SC2086
# Disabling because "args" needs to allow for expansion here
- res=$(eval kubectl get "${kube_flags[@]}" ${args} "${object}" -o go-template=\""${request}"\")
+ res=$(eval kubectl ${args} get "${kube_flags[@]}" ${get_args} "${object}" -o go-template=\""${request}"\")
# Disabling because to allow for expansion here
- res=$(kubectl get "${kube_flags[@]}" ${args} ${object} -o go-template="${request}")
+ res=$(eval kubectl ${args} get "${kube_flags[@]}" ${get_args} "${object}" -o go-template=\""${request}"\")
if [[ "${res}" =~ ^$expected$ ]]; then
echo -n "${green}"
echo "$(kube::test::get_caller 3): Successful get ${object} ${request}: ${res}"

View File

@ -1,42 +1,92 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=kubezero
pkgver=1.25
pkgver=1.28.9
_crio=1.28.4
_ecr=1.28.1
pkgrel=0
pkgdesc="KubeZero release package"
url="https://git.zero-downtime.net/ZeroDownTime/alpine-overlay/src/branch/master/kubezero/kubezero"
arch="noarch"
arch="x86_64"
license="AGPL-3.0"
depends="
podman
podman
xz
cri-tools
cri-o~$pkgver
cri-o~$_crio
kubelet~$pkgver
kubectl~$pkgver
ecr-credential-provider~$pkgver
aws-iam-authenticator~0.6.2
ecr-credential-provider~$_ecr
aws-iam-authenticator~0.6.14
"
options="!check"
install="$pkgname.post-install"
#install="$pkgname.post-install"
subpackages="
$pkgname-imagecache
"
IMAGES="
quay.io/cilium/cilium:v1.15.3
ghcr.io/k8snetworkplumbingwg/multus-cni:v3.9.3
"
#multus_version="4.0.2"
source="
shared-sys-fs.start
evictLocalNode.sh
credential-provider.yaml
kubelet.monit
crio.monit
crio.conf
"
#multus-"$multus_version".tar.gz::https://github.com/k8snetworkplumbingwg/multus-cni/releases/download/v"$multus_version"/multus-cni_"$multus_version"_linux_amd64.tar.gz
# get multus and cilium binaries and drop them in /usr/libexec/cni
build() {
return 0
# pre loaded images
for i in $IMAGES; do
IMAGE_NAME=$(echo $i | sed -e 's/.*\///' -e 's/:.*//')
podman --storage-driver vfs pull $i
podman --storage-driver vfs save $i | xz -z -T 0 - > $IMAGE_NAME.tar.xz
done
}
package() {
# make /sys shared
install -Dm755 "$srcdir"/shared-sys-fs.start "$pkgdir/etc/local.d/shared-sys-fs.start"
# drain local node
install -Dm755 "$srcdir"/evictLocalNode.sh "$pkgdir/usr/bin/evictLocalNode.sh"
mkdir -p $pkgdir/etc/kubernetes/manifests
install -Dm644 "$srcdir"/credential-provider.yaml "$pkgdir/etc/kubernetes/credential-provider.yaml"
# crio settings
install -Dm644 "$srcdir"/crio.conf "$pkgdir/etc/crio/crio.conf.d/01-kubezero.conf"
# monit
install -Dm644 "$srcdir"/kubelet.monit "$pkgdir/etc/monit.d/kubelet.conf"
install -Dm644 "$srcdir"/crio.monit "$pkgdir/etc/monit.d/crio.conf"
# multus
#install -Dm755 "$srcdir"/multus-cni_"$multus_version"_linux_amd64/multus $pkgdir/usr/libexec/cni/multus
}
# Preload container images all nodes need to speed up boot time and reduce data transfer
imagecache() {
mkdir -p "$subpkgdir/mnt/zdt/cache"
for i in $IMAGES; do
IMAGE_NAME=$(echo $i | sed -e 's/.*\///' -e 's/:.*//')
install -Dm644 "$srcdir"/$IMAGE_NAME.tar.xz "$subpkgdir/mnt/zdt/cache/$IMAGE_NAME.tar.xz"
done
}
sha512sums="
b0cadf577ea912630efabf8d104f2edaa79bd1697a1f9224ce8a75354dd204196c6d3c15c0318afa44be10be9696ce20ef0015198ee0b74050897d164f77ae60 shared-sys-fs.start
ecb33fc3a0ffc378723624858002f9f5e180e851b55b98ab6611ecc6a73d4719bc7de240f87683fc58de8bf577059e6f19b417655b5301ef8c32deff67a29dff shared-sys-fs.start
fce1013f7b1bfa8ee526de62e642a37fda3168889723e873d3fb69e257f4caa1423b5a14b9343b12a87f3b6f93c7d3861b854efda67ef2d6a42a5ca8cf3d1593 evictLocalNode.sh
92499ec9a8b3634c42b16c01d27f1c1bb650bcc074a2c8d9d16cfe2ea08942948989c6aae79bd2df562ff17df11bbc329e0971f15c4e64f944457825dee7aa79 credential-provider.yaml
8b81eb0fb66e6a739965db6af6a31c443e8f612c06146bd51107372abd833b527423299ee11b27e011f46cfbee11415234b3fa0dea695dbbb06711e0ad58f08d kubelet.monit
e801df9ede6065395db75154735ca9368882d4225452a33f2b54b98cd0c4f3ceb730762d8745c6aea350a3a50a1df0c79ab46f422f94e9a40e621528e9d82055 crio.monit
064fc245b7ffd67834a2f5fd13cb0bcb5f4a5caf79b8113b3669bf1d0e1a4af2042e69f8f496991de76d621fd01bc7e67de37c59f034584d12622c6af96376ff crio.conf
"

View File

@ -0,0 +1,12 @@
apiVersion: kubelet.config.k8s.io/v1
kind: CredentialProviderConfig
providers:
- name: ecr-credential-provider
matchImages:
- "*.dkr.ecr.*.amazonaws.com"
- "*.dkr.ecr.*.amazonaws.cn"
- "*.dkr.ecr-fips.*.amazonaws.com"
- "*.dkr.ecr.us-iso-east-1.c2s.ic.gov"
- "*.dkr.ecr.us-isob-east-1.sc2s.sgov.gov"
defaultCacheDuration: "12h"
apiVersion: credentialprovider.kubelet.k8s.io/v1

View File

@ -0,0 +1,8 @@
[crio.metrics]
enable_metrics = true
[crio.runtime]
default_ulimits = [ "nofile=65535:65535", "memlock=-1:-1" ]
[crio.network]
cni_default_network="multus-cni-network"

View File

@ -0,0 +1,4 @@
check process crio pidfile /run/crio.pid
start program = "/sbin/rc-service crio start"
stop program = "/sbin/rc-service crio stop"
restart program = "/sbin/rc-service crio restart"

View File

@ -0,0 +1,10 @@
check process kubelet pidfile /run/kubelet.pid
start program = "/sbin/rc-service kubelet start"
stop program = "/sbin/rc-service kubelet stop"
restart program = "/sbin/rc-service kubelet restart"
if failed
port 10248
protocol http
request "/healthz"
for 2 cycles
then restart

View File

@ -1,8 +0,0 @@
#!/bin/sh
CILIUM=v1.13.1
MULTUS=v3.9.3
# Pre-Pull CNI images
podman pull quay.io/cilium/cilium:$CILIUM
podman pull ghcr.io/k8snetworkplumbingwg/multus-cni:$MULTUS

View File

@ -1,3 +1,4 @@
#!/bin/sh
mount --make-shared /sys/fs/cgroup
mount --make-shared /sys/fs/bpf
mount --make-shared /sys

View File

@ -0,0 +1,64 @@
--- a/execinfo.c.orig
+++ b/execinfo.c
@@ -69,7 +69,8 @@
char **
backtrace_symbols(void *const *buffer, int size)
{
- int i, clen, alen, offset;
+ size_t clen, alen;
+ int i, offset;
char **rval;
char *cp;
Dl_info info;
@@ -78,7 +79,6 @@
rval = malloc(clen);
if (rval == NULL)
return NULL;
- (char **)cp = &(rval[size]);
for (i = 0; i < size; i++) {
if (dladdr(buffer[i], &info) != 0) {
if (info.dli_sname == NULL)
@@ -92,14 +92,14 @@
2 + /* " <" */
strlen(info.dli_sname) + /* "function" */
1 + /* "+" */
- D10(offset) + /* "offset */
+ 10 + /* "offset */
5 + /* "> at " */
strlen(info.dli_fname) + /* "filename" */
1; /* "\0" */
rval = realloc_safe(rval, clen + alen);
if (rval == NULL)
return NULL;
- snprintf(cp, alen, "%p <%s+%d> at %s",
+ snprintf((char *) rval + clen, alen, "%p <%s+%d> at %s",
buffer[i], info.dli_sname, offset, info.dli_fname);
} else {
alen = 2 + /* "0x" */
@@ -108,12 +108,15 @@
rval = realloc_safe(rval, clen + alen);
if (rval == NULL)
return NULL;
- snprintf(cp, alen, "%p", buffer[i]);
+ snprintf((char *) rval + clen, alen, "%p", buffer[i]);
}
- rval[i] = cp;
- cp += alen;
+ rval[i] = (char *) clen;
+ clen += alen;
}
+ for (i = 0; i < size; i++)
+ rval[i] += (long) rval;
+
return rval;
}
@@ -155,6 +158,6 @@
return;
snprintf(buf, len, "%p\n", buffer[i]);
}
- write(fd, buf, len - 1);
+ write(fd, buf, strlen(buf));
}
}

View File

@ -0,0 +1,24 @@
--- a/execinfo.c.orig
+++ b/execinfo.c
@@ -26,6 +26,7 @@
* $Id: execinfo.c,v 1.3 2004/07/19 05:21:09 sobomax Exp $
*/
+#define _GNU_SOURCE
#include <sys/types.h>
#include <sys/uio.h>
#include <dlfcn.h>
--- a/stacktraverse.c.orig
+++ b/stacktraverse.c
@@ -1,3 +1,4 @@
+#define _GNU_SOURCE
#include <stddef.h>
#include "stacktraverse.h"
--- a/test.c.orig
+++ b/test.c
@@ -1,3 +1,4 @@
+#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>

View File

@ -0,0 +1,44 @@
--- a/Makefile.orig
+++ b/Makefile
@@ -23,24 +23,25 @@
# SUCH DAMAGE.
#
# $Id: Makefile,v 1.3 2004/07/19 05:19:55 sobomax Exp $
+#
+# Linux Makefile by Matt Smith <mcs@darkregion.net>, 2011/01/04
-LIB= execinfo
+CC=cc
+AR=ar
+EXECINFO_CFLAGS=$(CFLAGS) -O2 -pipe -fno-strict-aliasing -std=gnu99 -fstack-protector -c
+EXECINFO_LDFLAGS=$(LDFLAGS)
-SRCS= stacktraverse.c stacktraverse.h execinfo.c execinfo.h
+all: static dynamic
-INCS= execinfo.h
+static:
+ $(CC) $(EXECINFO_CFLAGS) $(EXECINFO_LDFLAGS) stacktraverse.c
+ $(CC) $(EXECINFO_CFLAGS) $(EXECINFO_LDFLAGS) execinfo.c
+ $(AR) rcs libexecinfo.a stacktraverse.o execinfo.o
-SHLIB_MAJOR= 1
-SHLIB_MINOR= 0
+dynamic:
+ $(CC) -fpic -DPIC $(EXECINFO_CFLAGS) $(EXECINFO_LDFLAGS) stacktraverse.c -o stacktraverse.So
+ $(CC) -fpic -DPIC $(EXECINFO_CFLAGS) $(EXECINFO_LDFLAGS) execinfo.c -o execinfo.So
+ $(CC) -shared -Wl,-soname,libexecinfo.so.1 -o libexecinfo.so.1 stacktraverse.So execinfo.So
-NOPROFILE= yes
-
-DPADD= ${LIBM}
-LDADD= -lm
-
-#WARNS?= 4
-
-#stacktraverse.c: gen.py
-# ./gen.py > stacktraverse.c
-
-.include <bsd.lib.mk>
+clean:
+ rm -rf *.o *.So *.a *.so

View File

@ -0,0 +1,50 @@
# Contributor: Philipp Andronov <filipp.andronov@gmail.com>
# Maintainer: Matt Smith <mcs@darkregion.net>
pkgname=libexecinfo
pkgver=1.1
pkgrel=1
pkgdesc="A quick-n-dirty BSD licensed clone of the GNU libc backtrace facility."
options="!check" # No testsuite
url="https://www.freshports.org/devel/libexecinfo"
arch="all"
license="BSD-2-Clause"
subpackages="$pkgname-static ${pkgname}-dev"
source="http://distcache.freebsd.org/local-distfiles/itetcu/$pkgname-$pkgver.tar.bz2
10-execinfo.patch
20-define-gnu-source.patch
30-linux-makefile.patch
"
build() {
cd "$builddir"
export CFLAGS="${CFLAGS} -fno-omit-frame-pointer"
make
}
package() {
cd "$builddir"
install -D -m755 "$builddir"/execinfo.h \
"$pkgdir"/usr/include/execinfo.h
install -D -m755 "$builddir"/stacktraverse.h \
"$pkgdir"/usr/include/stacktraverse.h
install -D -m755 "$builddir"/libexecinfo.a \
"$pkgdir"/usr/lib/libexecinfo.a
install -D -m755 "$builddir"/libexecinfo.so.1 \
"$pkgdir"/usr/lib/libexecinfo.so.1
ln -s /usr/lib/libexecinfo.so.1 \
"$pkgdir"/usr/lib/libexecinfo.so
}
static() {
depends=""
pkgdesc="$pkgdesc (static library)"
mkdir -p "$subpkgdir"/usr/lib
mv "$pkgdir"/usr/lib/*.a "$subpkgdir"/usr/lib
}
sha512sums="51fea7910ef6873061a25c22434ce4da724e9d8e37616a069ad0a58c0463755be4c6c7da88cd747484c2f3373909d7be4678b32a4bd91b6d9e0f74526094e92c libexecinfo-1.1.tar.bz2
cd35c9046d88b39b05bc36faffb1e71ae3b2140632da7da37f374fff671d4ad812eebd0581011ff9e3b25d3cb4bc962cf35957074f713817b3b617511425af1a 10-execinfo.patch
c961b2f86cba291e8c69a507e3880354ad7369fd04c8948d54c4db0578fe30cca8f4250742cb63d1ab6e5875988f04c4729256197030369404f0e925f299a66c 20-define-gnu-source.patch
13d3df88a6dabd78ee2cf50092511f5a10f0e5ff3d81dbacb182fcf85ceb0c13a5f0252397b4eb0ac57f8d8bd3fc3af6c05865d6398cbc1517f347210c5750da 30-linux-makefile.patch"

View File

@ -1,8 +1,9 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
# https://github.com/NVIDIA/nvidia-container-toolkit
pkgname=nvidia-container-toolkit
pkgver=1.10.0
pkgrel=1
pkgver=1.15.0
pkgrel=0
pkgdesc="NVIDIA Container toolkit incl. cri hooks"
url="https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/overview.html"
arch="x86_64"
@ -12,17 +13,17 @@ depends="glibc-bin nvidia-drivers"
options="!check !tracedeps"
_nv_ver="$pkgver"-1
_libcap=2.25-2
_libcap=2.44-1
_libseccomp=2.3.3-4
source="https://nvidia.github.io/libnvidia-container/stable/debian10/amd64/libnvidia-container1_"$_nv_ver"_amd64.deb
https://nvidia.github.io/libnvidia-container/stable/debian10/amd64/libnvidia-container-tools_"$_nv_ver"_amd64.deb
https://nvidia.github.io/libnvidia-container/stable/debian10/amd64/nvidia-container-toolkit_"$_nv_ver"_amd64.deb
http://deb.debian.org/debian/pool/main/libc/libcap2/libcap2_"$_libcap"_amd64.deb
http://deb.debian.org/debian/pool/main/libs/libseccomp/libseccomp2_"$_libseccomp"_amd64.deb
config.toml
oci-nvidia-hook.json
"
source="https://nvidia.github.io/libnvidia-container/stable/deb/amd64/libnvidia-container1_"$_nv_ver"_amd64.deb
https://nvidia.github.io/libnvidia-container/stable/deb/amd64/libnvidia-container-tools_"$_nv_ver"_amd64.deb
https://nvidia.github.io/libnvidia-container/stable/deb/amd64/nvidia-container-toolkit_"$_nv_ver"_amd64.deb
http://deb.debian.org/debian/pool/main/libs/libseccomp/libseccomp2_"$_libseccomp"_amd64.deb
http://deb.debian.org/debian/pool/main/libc/libcap2/libcap2_"$_libcap"_amd64.deb
config.toml
oci-nvidia-hook.json
"
build() {
return 0
@ -51,7 +52,6 @@ package() {
ar -x "$srcdir"/libcap2_"$_libcap"_amd64.deb && tar xfJ data.tar.xz
mv lib/x86_64-linux-gnu/libcap.so.* "$pkgdir"/usr/glibc-compat/lib
rm -rf control.tar.xz data.tar.xz debian-binary usr
# libseccomp
ar -x "$srcdir"/libseccomp2_"$_libseccomp"_amd64.deb && tar xfJ data.tar.xz
mv usr/lib/x86_64-linux-gnu/libseccomp.so.* "$pkgdir"/usr/glibc-compat/lib
@ -67,12 +67,12 @@ package() {
install -Dm644 config.toml "$pkgdir"/etc/nvidia-container-runtime/config.toml
}
sha512sums='
23ba2aec28f01c0037bbb4812ea542589e96f6527cf49468a4a7c54ca05808cf0984a8dfe13ee3455b8c6ae8468c58590f9e1e6996927c710bcf3e545772a356 libnvidia-container1_1.10.0-1_amd64.deb
c5369c832bd91703e6e6e86a4431c2eebb2ddeaadff126174b41ed11e969dc8cc49dcab26b3ac18abb43b466a86ce76908eaa2f5e4109c689a0c3a4fa47548b5 libnvidia-container-tools_1.10.0-1_amd64.deb
3043729bd96dd153db1dc317685167f34da6b9d202134335212fb7d861532a265a59e002c86fff2664c67687f4c8bcc75913c74018930a6c68c0f2044eceacf0 nvidia-container-toolkit_1.10.0-1_amd64.deb
694a3ec64ef3056d5874ff03b889b868c294bccb16506468fdf1c289fe3aaadc2da25a5934de653af9633a5d993d2bb21491d84b3b2e2529e6b31d92c78a2228 libcap2_2.25-2_amd64.deb
sha512sums="
36adc14f49b3827ba5b86fdf75b2eb91fd2b2621e9be3a02c2f7f94b2c30e47b9f9d7482ae4f788cee804b12b359e4dc597878171f6e68f7287c18b3d3dfdf8d libnvidia-container1_1.15.0-1_amd64.deb
686a642649104379710056740dd0e9004a20809729231d88557c85844fe83ea5b279fe6ac49e890bcc8727b050c285f4d1b2cba584b12be1158c5b7af48d27a3 libnvidia-container-tools_1.15.0-1_amd64.deb
f8507ca4d5b4678c6afaa271b5930f856d33d2ab92ed70fbd2a5047eb6fe7635f55758047117119f8656270d96667ddb154bb95074d58a34ad37ffb7832ce951 nvidia-container-toolkit_1.15.0-1_amd64.deb
5a4eaa96e6e774948889909d618a8ed44a82f649cbba11622dc7b4478098bea006995d5a5a60ca026a57b76ad866d1e2c6caebd154a26eb6bd7e15291b558057 libseccomp2_2.3.3-4_amd64.deb
cc9109cdcf51dc40db732e10ac3eda7e4ac73299ad51d2ec619d7f4cff3f0311be0937530d2175e5486c393bc9e91c709072094fad510573785739afaad831f1 libcap2_2.44-1_amd64.deb
040ac2e3f58549dc09e5bce0d694e4be2f6aae736014bf0ee90042646562d5f1ef1f5990eb9f2c2a2fdf504587b82f4aa0eb99d04c5d3e407670e4012e3edd4e config.toml
cf5673231d1862e3ec03f792cddf54ff27237656f762c3f42b6d7e1584de2201c487861ac399ab26951b5dbf3e3cd9b4451dbf61f02b55e0991889b507319764 oci-nvidia-hook.json
'
0f150ea59b2372bf3ef60e657142b19f46500d1c70cb179d37ce117d6b03e86427dbf356873affb7639e082a07f852a922ae3aea4a8f8885640e43675c4e4add oci-nvidia-hook.json
"

View File

@ -0,0 +1 @@
libpsx.so.2.66

View File

@ -1,8 +1,8 @@
{
"version": "1.0.0",
"hook": {
"path": "/usr/bin/nvidia-container-toolkit",
"args": ["nvidia-container-toolkit", "prestart"]
"path": "/usr/bin/nvidia-container-runtime-hook",
"args": ["nvidia-container-runtime-hook", "prestart"]
},
"when": {
"always": true,

View File

@ -1,7 +1,7 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=nvidia-drivers
pkgver=515.65.01
pkgver=550.76
pkgrel=0
pkgdesc="NVIDIA Driver"
url="https://www.nvidia.com/download/index.aspx"
@ -27,7 +27,7 @@ package() {
cd "$srcdir"/NVIDIA-Linux-x86_64-$pkgver
# GSP firmware
cp firmware/gsp.bin "$pkgdir"/lib/firmware/nvidia/"$pkgver"/gsp.bin
cp firmware/*.bin "$pkgdir"/lib/firmware/nvidia/"$pkgver"/
# Various bins
SBINS="nvidia-smi nvidia-debugdump nvidia-cuda-mps-control nvidia-cuda-mps-server nvidia-persistenced"
@ -38,7 +38,7 @@ package() {
# which libs are from debug log at runtime
# LIBS=$(grep "missing library" /var/log/nvidia-container-toolkit.log | awk '{print $7}' | sort | uniq)
# cross checked via .manifest for targets and symlinks
LIBS="libEGL_nvidia.so libGLESv1_CM_nvidia.so libGLESv2_nvidia.so libGLX_nvidia.so libcuda.so libcudadebugger.so libnvcuvid.so libnvidia-allocator.so libnvidia-cbl.so libnvidia-cfg.so libnvidia-compiler.so libnvidia-eglcore.so libnvidia-encode.so libnvidia-fatbinaryloader.so libnvidia-fbc.so libnvidia-glcore.so libnvidia-glsi.so libnvidia-glvkspirv.so libnvidia-ifr.so libnvidia-ml.so libnvidia-ngx.so libnvidia-nscq.so libnvidia-opencl.so libnvidia-opticalflow.so libnvidia-pkcs11.so libnvidia-ptxjitcompiler.so libnvidia-rtcore.so libnvidia-tls.so libnvoptix.so libvdpau_nvidia.so"
LIBS="libEGL_nvidia.so libGLESv1_CM_nvidia.so libGLESv2_nvidia.so libGLX_nvidia.so libcuda.so libcudadebugger.so libnvcuvid.so libnvidia-allocator.so libnvidia-cbl.so libnvidia-cfg.so libnvidia-compiler.so libnvidia-eglcore.so libnvidia-encode.so libnvidia-fatbinaryloader.so libnvidia-fbc.so libnvidia-glcore.so libnvidia-glsi.so libnvidia-glvkspirv.so libnvidia-ifr.so libnvidia-ml.so libnvidia-ngx.so libnvidia-nscq.so libnvidia-opencl.so libnvidia-opticalflow.so libnvidia-pkcs11.so libnvidia-ptxjitcompiler.so libnvidia-rtcore.so libnvidia-tls.so libnvoptix.so libvdpau_nvidia.so libnvidia-gpucomp.so libnvidia-nvvm.so"
# inspired from Gentoo x11-drivers/nvidia-drivers
for lib in $LIBS; do
@ -54,6 +54,6 @@ package() {
done
}
sha512sums='
5221a4ac071eb39a37a841f19cfe4983286dc35e918956b40604404ef36c122612475df7b9a391a9a70bd60f44e598c8a0e5ec54ccc3e90d51f01e1b2fbe5e33 NVIDIA-Linux-x86_64-515.65.01.run
'
sha512sums="
a3804501b220d4acbda9633b92c4515bb14d0b5233f3ffd5e173290d310efdb1ed9a9602f727c117c1d0746d596c1125c51cc3e1fde65c79905e60e1d35f50ec NVIDIA-Linux-x86_64-550.76.run
"

View File

@ -1,13 +1,19 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
# Issues:
# - https://github.com/NVIDIA/open-gpu-kernel-modules/issues/468
# https://github.com/NVIDIA/open-gpu-kernel-modules/pull/609/files
# remove coreutils from makedepends
pkgname=nvidia-open-gpu
pkgver=515.65.01
pkgver=550.76
pkgrel=0
pkgdesc="NVIDIA Linux open GPU kernel modules"
url="https://github.com/NVIDIA/open-gpu-kernel-modules"
arch="x86_64"
license="MIT OR GPL-2.0"
makedepends="bash linux-headers linux-virt-dev"
makedepends="bash linux-headers linux-virt-dev coreutils"
options="!check"
source="nvidia-$pkgver.tar.gz::https://github.com/NVIDIA/open-gpu-kernel-modules/archive/refs/tags/$pkgver.tar.gz
@ -20,6 +26,9 @@ build() {
# Hack running the build inside a container other uname -r returns host kernel
KERNEL_VERSION=$(basename $(ls -d /lib/modules/*-virt))
unset CFLAGS CPPFLAGS CXXFLAGS
unset LDFLAGS
make KERNEL_UNAME=$KERNEL_VERSION
}
@ -35,7 +44,7 @@ package() {
mkdir -p "$pkgdir"/lib/modules/$KERNEL_VERSION/kernel
for m in $modules; do
gzip -9 -c kernel-open/$m > "$pkgdir"/lib/modules/$KERNEL_VERSION/kernel/$m.gz
gzip -9 -c kernel-open/$m > "$pkgdir"/lib/modules/$KERNEL_VERSION/kernel/$m.gz
done
# Add some udev rules to automate node handling
@ -43,8 +52,8 @@ package() {
install -Dm755 "$srcdir"/create-nvidia-uvm-dev-node.sh "$pkgdir"/usr/sbin/create-nvidia-uvm-dev-node.sh
}
sha512sums='
c2ff6fd02272b6981a65e7e14c6b636f0113e21da910898c27682f58e60fa8e6deea3670081c57e4961fb5e7794eef8eddb90d134ba1892536a8468c5dc9d669 nvidia-515.65.01.tar.gz
sha512sums="
5126d3b8e3f0635b5b044db4faf0d483e70bb43418bbd21325bb175aaca948e19bd81038fbef9118a95387da65ff0ff3d1592fc54c0d6815a2448b32024468ac nvidia-550.76.tar.gz
b16b86ded8601ff802477e2b191c5728290014f90bb85ad6ec0e5b7e84f8004c467f5b6c66b80dc5d205fb70a3900ac286764a3829ca3ad3b8a3a5fd0b73a702 91-nvidia.rules
8335bd69c482da1f67b5cddd31a0b40d01b5c627aeca137b40ac7776cb3e7475767bec808a972ed739c26914207aca264324c41496f6fb579d910c8477f7cc1c create-nvidia-uvm-dev-node.sh
'
"

View File

@ -1,38 +1,41 @@
# Contributor: Stefan Reimer <stefan@zero-downtime.net>
# Maintainer: Stefan Reimer <stefan@zero-downtime.net>
pkgname=zdt-base
pkgver=0.1.2
pkgver=0.3.19
pkgrel=0
pkgdesc="ZeroDownTime Alpine additions and customizations"
url="https://git.zero-downtime.net/ZeroDownTime/alpine-overlay/src/branch/master/kubezero/zdt-base"
arch="noarch"
license="AGPL-3.0"
depends="logrotate syslog-ng neofetch monit file tiny-cloud"
depends="logrotate syslog-ng neofetch monit file tiny-cloud dhcpcd"
options="!check"
subpackages="$pkgname-openrc $pkgname-aws"
subpackages="$pkgname-openrc $pkgname-aws $pkgname-nocloud"
install="$pkgname.post-install"
source="
cb_base.sh
cb_lock.sh
cb_volumes.sh
cb_init.sh
common.sh
boot.sh
cloudbender-early.init
cloudbender.init
cloud-aws.sh
cloud-nocloud.sh
zdt-sysctl.conf
https://raw.githubusercontent.com/pixelb/ps_mem/v3.14/ps_mem.py
syslog-ng.conf
syslog-ng.logrotate.conf
syslog-ng.apparmor
cloudbender.stop
cloudbender.start
dhcpcd-mtu.hook
monitrc
monit_alert.sh.aws
neofetch.conf
zdt-ascii.txt
dhclient.conf
profile
route53.py
get_iam_sshkeys.py
uniq_hostname.py
write_parameters.py
"
build() {
@ -41,17 +44,15 @@ build() {
package() {
# core libraries
install -Dm755 "$srcdir/cb_base.sh" "$pkgdir/usr/lib/cloudbender/base.sh"
install -Dm755 "$srcdir/cb_lock.sh" "$pkgdir/usr/lib/cloudbender/lock.sh"
install -Dm755 "$srcdir/cb_volumes.sh" "$pkgdir/usr/lib/cloudbender/volumes.sh"
install -Dm755 "$srcdir/cb_init.sh" "$pkgdir/usr/lib/cloudbender/init.sh"
install -Dm755 "$srcdir/boot.sh" "$pkgdir/usr/lib/cloudbender/boot.sh"
install -Dm755 "$srcdir/common.sh" "$pkgdir/usr/lib/cloudbender/common.sh"
# convienience
mkdir -p "$pkgdir"/etc/cloudbender "$pkgdir/home/alpine"
mkdir -p "$pkgdir/home/alpine"
install -Dm644 "$srcdir/profile" "$pkgdir/home/alpine/.profile"
# dhcp tuning for MTU
install -Dm644 "$srcdir"/dhclient.conf "$pkgdir"/etc/dhcp/dhclient.conf
# set mtu on interface via dhcpcd
install -Dm644 "$srcdir/dhcpcd-mtu.hook" "$pkgdir/usr/lib/dhcpcd/dhcpcd-hooks/10-mtu"
# various sysctl tunings
install -Dm644 "$srcdir"/zdt-sysctl.conf "$pkgdir"/etc/sysctl.d/60-zdt.conf
@ -59,9 +60,14 @@ package() {
# early init script to eg. mount var, cannot use any network !
install -Dm755 "$srcdir/cloudbender-early.init" "$pkgdir/etc/init.d/cloudbender-early"
# various tasks during boot
# various tasks during first boot
install -Dm755 "$srcdir/cloudbender.init" "$pkgdir/etc/init.d/cloudbender"
# local boot & shutdown
install -Dm755 "$srcdir/cloudbender.start" "$pkgdir/etc/local.d/cloudbender.start"
install -Dm755 "$srcdir/cloudbender.stop" "$pkgdir/etc/local.d/cloudbender.stop"
# syslog-ng configs, json all into messages
install -Dm644 "$srcdir"/syslog-ng.conf "$pkgdir"/lib/zdt/syslog-ng.conf
install -Dm644 "$srcdir"/syslog-ng.logrotate.conf "$pkgdir"/lib/zdt/syslog-ng.logrotate.conf
@ -82,35 +88,50 @@ package() {
}
aws() {
# Basic AWS tools
mkdir -p "$subpkgdir"
# aws libs
install -Dm755 "$srcdir/cloud-aws.sh" "$pkgdir/usr/lib/cloudbender/cloud/aws.sh"
# other tools
install -Dm755 "$srcdir"/route53.py "$subpkgdir"/usr/sbin/route53.py
install -Dm755 "$srcdir"/uniq_hostname.py "$subpkgdir"/usr/sbin/uniq_hostname.py
install -Dm755 "$srcdir"/get_iam_sshkeys.py "$subpkgdir"/usr/sbin/get_iam_sshkeys.py
install -Dm755 "$srcdir"/write_parameters.py "$subpkgdir"/usr/sbin/write_parameters.py
# Cloudbender SNS integration
install -Dm755 "$srcdir"/monit_alert.sh.aws "$pkgdir"/usr/bin/monit_alert.sh
}
nocloud() {
mkdir -p "$subpkgdir"
# nocloud libs
install -Dm755 "$srcdir/cloud-nocloud.sh" "$pkgdir/usr/lib/cloudbender/cloud/nocloud.sh"
}
sha512sums="
92e669eb440dbc89e083e3b61c9f9fa871bedfdca404b8c3533fa1caec0d061f428c39697921c68c49d3de9af2043946d9baf989ba47482827d5626fe9f3e479 cb_base.sh
3e02b858680d751b2c1fb22feeec2b767e328fdf007d9fb6687f309e62630467e982cc33d47c4417311938d35bb17dc210f9e0a40b90298dc22cf142f022c124 cb_lock.sh
f392ae57c4a5ccc019eb5fcc191429a4ba81b2593bfb12bab3b3da3365342b2b89908dfd90d8debf813e3455a61ff8d123f49fa4adce8ea44c06d9db1f7b7e19 cb_volumes.sh
89a27da781fe302aaed9ed7dcd1b8fc8b94f39802f86b1faeedee50d68a9d61fcbfe037bc29a938994c507cbcda08d86d2c387db118408ca9b7d085f99bb0a17 cb_init.sh
9c688e08e44ae965eaa6021e005f65077690c30fe6b2de7371c57ae3a53a18f12a356587261c950b8c900f73cb35af3ba7795d33181532b485aeee3b6ca41757 cloudbender-early.init
46500f8dc08e2e5e5d34886225ef4993f02da9f0a8b55107e886337ec1318fe683a172c398d1236e8f2559b57e0aba66238725e0a20e0440e1423d71aa3d77ea cloudbender.init
b9479835d8667fa99f8b1b140f969f0464a9bb3c60c7d19b57e306cfe82357d453932791e446caded71fddd379161ae8328367f1ee75ae3afc1b85e12294b621 zdt-sysctl.conf
c1808572d074e1a91e0efc3c31462f6035159338843e51fbccca5102b2923506ce60ba9e1ef00b2fbb134da7a33f55af364e1bff15c272eb7f4ebc6035f33887 common.sh
cf8b75a81bb35e853761d21b15b5b109f15350c54daaf66d2912541a20f758c3ca237d58932e5608d2d3867fe15a07ebd694fd1c313a8290d15afc2b27a575dd boot.sh
eb7d5b6f92f500dbaba04a915cdd8d66e90456ca86bed86b3a9243f0c25577a9aa42c2ba28c3cad9dda6e6f2d14363411d78eff35656c7c60a6a8646f43dcba5 cloudbender-early.init
cac71c605324ad8e60b72f54b8c39ee0924205fcd1f072af9df92b0e8216bcde887ffec677eb2f0eacce3df430f31d5b5609e997d85f14389ee099fbde3c478f cloudbender.init
482438e6d443777636fd8f8f7b3d887c5664243d9547a47a755cbb3f56fac3a145be34e9ef6ce622bf0dcb28f5dda1a53c8448f8dbfb632210cc52a3b786b18c cloud-aws.sh
3a84b728d4169b92356f1da52922c6110efd5bdc2df90b64abe59f89a5de57cc85a81936bdead0cae5071c1ba1735bda1bd866018b5c3f7fd4ef155d0606ac2d cloud-nocloud.sh
06102e56c847637f705d0b29b05b07fbbb2bda9ba69f0a7fe1d716126d3b1c7922fb0df159199809908fa0dc143209775edb1dd5976faa84244dbcaa45f00364 zdt-sysctl.conf
76e6a4f309f31bfa07de2d3b1faebe5670722752e18157b69d6e868cbe9e85eda393aed0728b0347a01a810eee442844c78259f86ff71e3136a013f4cbfaaea4 ps_mem.py
9d087f2d4403a9c6d4d2f06fbb86519f2b8b134d8eb305facaef07c183815f917fb7bac916d39d504dbab7fdf3321a3f70954dde57e8986cc223371715bb1c54 syslog-ng.conf
b86dec8c059642309b2f583191457b7fac7264b75dc5f4a06ad641de6b76589c0571b8b72b51519516ba7e68a128fe2da29b4a2a6dc77c252204675c51b2d128 syslog-ng.conf
484bdcf001b71ce5feed26935db437c613c059790b99f3f5a3e788b129f3e22ba096843585309993446a88c0ab5d60fd0fa530ef3cfb6de1fd34ffc828172329 syslog-ng.logrotate.conf
1db58e670748bf9a507068251c21c9ca1744727bbf53fd925059239a58721f6f60c0bd357e4c52e9277a913640a6784e0025a7f3834868a1e93e9e1cbc66d5a6 syslog-ng.apparmor
b928ba547af080a07dc9063d44cb0f258d0e88e7c5a977e8f1cf1263c23608f0a138b8ffca0cdf5818ee72fccb3ce8433f877811be3107bb9c275dfff988179c monitrc
64944727d658ff37e7ff9d22a23869e225e104d9229756ba4fef1fc481c56f782472a1c74f8127636b4a98d4545ae30e7d35822a1f0a0fa31a59ec8eaf8c0396 monit_alert.sh.aws
346b0170ada6cc1207ffb7b8ef138a1570a63c7df4d57618aa4b6b6c0d2df2197b0f5b23578ec83c641ee5e724865ac06985222e125809c990467426a0851b72 neofetch.conf
e86eed7dd2f4507b04050b869927b471e8de26bc7d97e7064850478323380a0580a92de302509901ea531d6e3fa79afcbf24997ef13cd0496bb3ee719ad674ee syslog-ng.apparmor
cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e cloudbender.stop
b93cec571afe5128ab4d7c3998b3dc48753897f37169a111f606a48d1982e6ffce52a4ac9568a6a062f621148fb652049b84926a40a62d89be3786e6836261e6 cloudbender.start
f8c052c7ec12c71937c7b8bc05d8374c588f345e303b30eda9c8612dff8f8f34a87a433648a3e9b85b278196ece198533b29680a303ff6478171d43f8e095189 dhcpcd-mtu.hook
e00a8f296c76446fe1241bf804c0108f47a2676f377a413ee9fede0943362a6582cad30fe13edd93f3d0daab0e2d7696553fb9458dca62adc05572dce339021a monitrc
c955dabe692c0a4a2fa2b09ab9096f6b14e83064b34ae8d22697096daf6551f00b590d837787d66ea1d0030a7cc30bef583cc4c936c980465663e73aec5fa2dc monit_alert.sh.aws
2c02a1d454881dd7197548286c6cf24c1453dd9d726f3e5445703c12414853b0e12205e5b6a0c3ae09b76097d2bdfcfd6e1bc9a122dd9f66c6d6d03ab41f748a neofetch.conf
532b8e2eb04942ab20bdc36b5dea1c60239fcbfcb85706123f3e05c18d65c938b85e9072d964ae5793177625a8db47b532db1f5bd5ed5ecbb70d5a331666ff54 zdt-ascii.txt
c565516121b9e6f9d5f769511eb900546753e67cc4208d1b388fdce44cd28699261a5c3905f9a168d4b2d45ac65ac3a2a6a95335f1bbd76d2f444d5f50ec5c9e dhclient.conf
c3e72cd92936b03f2b9eab5e97e9a12fcddcdf2c943342e42e7702e2d2407e00859c62dc9b4de3378688d2f05458aa5c104272af7ab13e53a62f1676d1a9a1b4 profile
2d419d5c25a3829e99326b09876f459e48ab66f5756a8ad39b406c0f2829f5a323e8ff512afd8f32b7b07f24c88efa911bee495ce6c4d1925194cb54d3ba57bd route53.py
00eaff6c0a506580340b2547c3b1602a54238bac6090a15516839411478a4b4fdc138668b8ad23455445131f3a3e3fda175ed4bb0dd375402641c0e7b69c3218 get_iam_sshkeys.py
8fd5dca9b9fdae61022f136215afa8adc0d199afcf26593bdd0bd1946d0f2efc5d7ed345704ef9642fbeedeeea007ed31b67fafe89289b749a560a045da45b8e uniq_hostname.py
816049360aa442f9e9aa4d6525795913cfe3dc7c6c14dc4ccad59c0880500f9d42f198edc442fe036bc84ba2690d9c5bc8ae622341d8276b3f14947db6b879b1 route53.py
7da28446762a36a6737c5b30becbce78775bd943b4d0c5ef938a50f49b4f51f66708434aa79004c19d16c56c83f54c8d6d68e1502ebc250c73f8aae12bed83c0 get_iam_sshkeys.py
ae1941fc45e61fa8d211f5ef7eff2dd01510a6d364c4302cab267812321a10e7434ecc8d8c9263d8671ce5604d04d6531601bf42886a55fb6aec7f321651e1dc uniq_hostname.py
ee4264337d86ad99ba6cf9ec3017986c804ac208c0beb5fc8651345bd277bb6de03e7c3a8c1b751767647be48f9d45ac47a7d14cf040d9c827780984394e826d write_parameters.py
"

56
kubezero/zdt-base/boot.sh Normal file
View File

@ -0,0 +1,56 @@
#!/bin/sh
# We have no metadata nor instance parameters yet!
# We built on top of tiny-cloud
. /lib/tiny-cloud/common
# archive orig /var, mount new var on top and restore orig var
copy_and_mount() {
local dev=$1
tar cf /tmp/var.tar /var 2>/dev/null
mount -t xfs -o noatime "$dev" /var
tar xf /tmp/var.tar -C / && rm -f /tmp/var.tar
}
setup_var() {
local _devices="$(find /dev/xvd[a-z] /dev/sd[a-z] -maxdepth 0 2>/dev/null || true)"
for d in $_devices; do
# resolve to a valid block device
dev="$(realpath "$d")"
[ -b "$dev" ] || continue
# already mounted
mount | grep -q "$d" && continue
case "$CLOUD" in
aws)
# on AWS look for sdx/xvdx
if [ "$d" = "/dev/sdx" -o "$d" = "/dev/xvdx" ]; then
# check volume for existing filesystem
type=$(file -Lbs $d)
if [[ "$type" =~ "XFS filesystem" ]]; then
xfs_repair $d >/dev/null 2>&1
mount -t xfs -o noatime "$d" /var
else
mkfs.xfs -qf $d >/dev/null
copy_and_mount "$d"
fi
add_once /etc/fstab "$d /var xfs defaults,noatime,nofail 0 2"
log -i -t early info "mounted $d at /var"
fi
;;
nocloud)
# Todo: should we try to mount a special tagged block device as /var ?
return 0
;;
*)
ewarn "Unsupported cloud: $CLOUD"
return 1
;;
esac
done
}

View File

@ -1,147 +0,0 @@
#!/bin/sh
function log { logger -t "user-data.${_FUNC}" -- $@; }
function die { log "$@"; exit_trap 1 1 / "$@"; }
# msg used for sns event, last one wins
function msg { MSG="$@"; log "$@"; }
# Generic retry command wrapper, incl. timeout of 30s
# $1 = number of tries; 0 = forever
# $2 = number of seconds to sleep between tries
# $@ actual command
retry() {
local tries=$1
local waitfor=$2
shift 2
while true; do
# Only use timeout of $1 is an executable, call directly if function
type -tf $1 >/dev/null && { timeout --preserve-status 30 $@ && return; } || { $@ && return; }
((tries=tries-1))
[ $tries -eq 0 ] && return 1
sleep $waitfor
done
}
function add_swap() {
[ -f /.swapfile ] || { dd if=/dev/zero of=/.swapfile bs=1M count=$1 && chmod 600 /.swapfile && mkswap /.swapfile && swapon /.swapfile; }
grep -q "/.swapfile" /etc/fstab || echo "/.swapfile none swap sw 0 0" >> /etc/fstab
sysctl -w vm.swappiness=10
}
# Get SSM secure string base64 decoded
# $0 SSM_PATH, value to stdout
function get_secret() {
aws ssm get-parameter --name ${1,,} --with-decryption --query 'Parameter.Value' | base64 -d
}
# Store values as base64 on SSM
# $0 SSM_PATH VALUE
function put_secret() {
aws ssm put-parameter --name ${1,,} --type SecureString --value "$(echo "$2" | base64 -w0)" --overwrite
}
# Gets existing passphrase or creates new passphrase and stores it
function init_passphrase() {
local _URL=$1
local _PPFILE=$2
# If secret already exists noop
[ -f $_PPFILE ] && return 0
get_secret $_URL > $_PPFILE && chmod 600 $_PPFILE || \
{ xxd -l16 -p /dev/random > $_PPFILE; chmod 600 $_PPFILE; put_secret $_URL "$(cat $_PPFILE)"; }
}
function asg_heartbeat {
[ -n "$LAUNCH_HOOK" ] && aws autoscaling record-lifecycle-action-heartbeat --instance-id $INSTANCE_ID --lifecycle-hook-name $LAUNCH_HOOK --auto-scaling-group-name $AWS_AUTOSCALING_GROUPNAME || true
}
function setup_sns_alarms() {
# store SNS message json template
cat <<EOF > /etc/cloudbender/sns_alarm.json
{
"Source": "CloudBender",
"AWSAccountId": "$AWS_ACCOUNT_ID",
"Region": "$REGION",
"Artifact": "$ARTIFACT",
"Asg": "$AWS_AUTOSCALING_GROUPNAME",
"Instance": "$INSTANCE_ID",
"ip": "$IP_ADDRESS"
}
EOF
mkdir -p /var/lib/cloudbender
cat <<EOF > /var/lib/cloudbender/sns_alarm.sh
#!/bin/bash
SUBJECT=\$1
MSG=\$2
LEVEL=\${3:-Info}
ATTACHMENT=\${4:-""}
EMOJI=\${5:-""}
jq -M --arg subject "\$SUBJECT" --arg level "\$LEVEL" --arg msg "\$MSG" --arg attachment "\$ATTACHMENT" --arg emoji "\$EMOJI" --arg hostname "\$HOSTNAME" '.Subject = \$subject | .Level = \$level | .Message = \$msg | .Attachment = \$attachment | .Emoji = \$emoji | .Hostname = \$hostname' < /etc/cloudbender/sns_alarm.json | sed -e 's/\\\\\\\\/\\\\/g' > /tmp/sns.json
aws sns publish --region ${REGION} --target-arn $ALARMSNSARN --message file:///tmp/sns.json
EOF
chmod +x /var/lib/cloudbender/sns_alarm.sh
}
function exit_trap {
set +e
trap - ERR EXIT
local ERR_CODE=$1
local ERR_LINE="$2"
local ERR_FUNC="$3"
local ERR_CMD="$4"
if [ $ERR_CODE -ne 0 ]; then
CFN_STATUS="FAILURE"
RESULT="ABANDON"
else
CFN_STATUS="SUCCESS"
RESULT="CONTINUE"
fi
# Add SNS events on demand
if [ "x${ALARMSNSARN}" != 'x' ]; then
if [ $ERR_CODE -ne 0 ]; then
LEVEL="Error"
SUBJECT="Error during cloud-init."
if [ $ERR_LINE -ne 1 ]; then
MSG="$ERR_CMD failed in $ERR_FUNC at $ERR_LINE. Return: $ERR_CODE"
ATTACHMENT="$(pr -tn $0 | tail -n+$((ERR_LINE - 3)) | head -n7)"
else
MSG="$ERR_CMD"
fi
if [ -n "$DEBUG" ]; then
SUBJECT="$SUBJECT Instance kept running for debug."
else
SUBJECT="$SUBJECT Instance terminated by ASG lifecycle hook."
fi
else
LEVEL="Info"
SUBJECT="ZDT Alpine Instance launched."
fi
if [ -z "${DISABLECLOUDBENDERSNSSCALINGEVENTS}" ] || [ "$LEVEL" != "Info" ]; then
/var/lib/cloudbender/sns_alarm.sh "$SUBJECT" "$MSG" "$LEVEL" "$ATTACHMENT"
fi
# Disable scaling events during shutdown
[ -n "${DISABLECLOUDBENDERSNSSCALINGEVENTS}" ] && echo "DISABLE_SCALING_EVENTS=1" >> /etc/cloudbender/rc.conf
fi
[ -n "$LAUNCH_HOOK" ] && aws autoscaling complete-lifecycle-action --lifecycle-action-result $RESULT --instance-id $INSTANCE_ID --lifecycle-hook-name $LAUNCH_HOOK --auto-scaling-group-name ${AWS_AUTOSCALING_GROUPNAME} || true
if [ -n "${AWS_CLOUDFORMATION_LOGICAL_ID}" ]; then
aws cloudformation signal-resource --stack-name ${AWS_CLOUDFORMATION_STACK_NAME} --logical-resource-id ${AWS_CLOUDFORMATION_LOGICAL_ID} --unique-id ${INSTANCE_ID} --status ${CFN_STATUS}
fi
# timestamp being done
end_uptime=$(awk '{print $1}' < /proc/uptime)
log "Exiting user-data. Duration: $(echo "$end_uptime-$start_uptime" | bc) seconds"
exit 0
}

View File

@ -1,108 +0,0 @@
# We built on top of tiny-cloud
. /etc/conf.d/tiny-cloud
IMDS_ENDPOINT="169.254.169.254"
. /lib/tiny-cloud/"$CLOUD"/imds
_imds() {
wget --quiet --timeout 1 --output-document - \
--header "$(_imds_header)" \
"http://$IMDS_ENDPOINT/$IMDS_URI/$1$IMDS_QUERY"
}
function query_imds() {
MAC=$(_imds meta-data/mac)
AVAILABILITY_ZONE=$(_imds meta-data/placement/availability-zone)
REGION=$(echo ${AVAILABILITY_ZONE} | sed "s/[a-z]$//")
INSTANCE_ID=$(_imds meta-data/instance-id)
cat <<EOF >> /var/lib/cloud/meta-data
AVAILABILITY_ZONE=$AVAILABILITY_ZONE
REGION=$REGION
INSTANCE_ID=$INSTANCE_ID
# Get the internal IP of first interface
IP_ADDRESS=$(_imds meta-data/local-ipv4)
PUBLIC_IP_ADDRESS=$(_imds meta-data/public-ipv4 || true)
MAC=$MAC
VPC_CIDR_RANGE=$(_imds meta-data/network/interfaces/macs/${MAC}/vpc-ipv4-cidr-block)
SUBNET=$(_imds meta-data/network/interfaces/macs/${MAC}/subnet-ipv4-cidr-block)
# Make sure we have basic domain / hostname set at all time
_META_HOSTNAME=$(_imds meta-data/hostname)
DOMAIN_NAME=\${_META_HOSTNAME#*.}
HOSTNAME=\${_META_HOSTNAME%%.*}
AWS_ACCOUNT_ID=$(_imds meta-data/network/interfaces/macs/${MAC}/owner-id)
INSTANCE_LIFE_CYCLE=$(_imds meta-data/instance-life-cycle)
INSTANCE_TYPE=$(_imds meta-data/instance-type)
EOF
}
function get_tags() {
# via metadata AWS restricts tags to NOT have " " or "/" ;-(
#for key in $(_imds meta-data/tags/instance); do
# TAGS[$key]=$(_imds meta-data/tags/instance/"$key")
#done
# Replace all /:.- with _ for valid variable names
while read _key value; do
key=$(echo ${_key//[\/:.-]/_} | tr '[:lower:]' '[:upper:]')
echo "$key=\"$value\"" >> /var/lib/cloud/meta-data
done < <(aws ec2 describe-tags --filters "Name=resource-id,Values=${INSTANCE_ID}" --query 'Tags[*].[Key,Value]')
}
# extract user-data args and cloud meta-data into /var/lib/cloud/meta-data
get_meta_data() {
if [ ! -f /var/lib/cloud/meta-data ]; then
ebegin "collecting metadata, instance tags and parameters"
echo '#!/bin/bash' > /var/lib/cloud/meta-data
query_imds
export AWS_DEFAULT_REGION=$REGION
export AWS_DEFAULT_OUTPUT=text
get_tags
[ -f /var/lib/cloud/user-data ] && bash /var/lib/cloud/user-data extract_parameters
fi
. /var/lib/cloud/meta-data
# Workaround for current CFN ASG_<parameter> hack
_key=$(echo $AWS_CLOUDFORMATION_LOGICAL_ID | tr '[:lower:]' '[:upper:]')
[ -n "$(eval echo \$${_key}_CUSTOMHOSTNAME)" ] && CUSTOMHOSTNAME="$(eval echo \$${_key}_CUSTOMHOSTNAME)"
[ -n "$(eval echo \$${_key}_VOLUMES)" ] && VOLUMES="$(eval echo \$${_key}_VOLUMES)"
return 0
}
################
# IAM SSH KEYS #
################
cb_sshkeys() {
case "$CLOUD" in
aws)
# on AWS call IAM for allowed groups and actual keys
GROUP=${SSHKEYIAMGROUP:-""}
ROLE=${SSHKEYIAMROLE:-"arn:aws:iam::000000000000:role/Undefined"}
[ $ROLE == "arn:aws:iam::000000000000:role/Undefined" ] && ROLE=""
if [ -n "$GROUP" ]; then
# Configure SSHD
sed -i -e "s,^[\s#]*AuthorizedKeysCommand\s.*,AuthorizedKeysCommand /usr/sbin/get_iam_sshkeys.py --user %u --group $GROUP --iamRole \"$ROLE\"," /etc/ssh/sshd_config
sed -i -e "s,^[\s#]*AuthorizedKeysCommandUser\s.*,AuthorizedKeysCommandUser nobody," /etc/ssh/sshd_config
ebegin "added $GROUP to SSH admin keys"
fi
;;
*)
ewarn "Unsupported Cloud: $CLOUD"
return 1
;;
esac
}

View File

@ -1,67 +0,0 @@
MUTEX=mutex
MUTEX_OWNER=$HOSTNAME
MUTEX_TIMEOUT=600
release_lock() {
local S3LOCK=$1
rm -f $MUTEX
aws s3 rm $S3LOCK
}
# Lock not timed out and we own it: 0
# Lock not timed out and someone else owns it: 1
# Lock timed out: 2
verify_lock() {
local S3LOCK=$1
aws s3 cp $S3LOCK $MUTEX
_host=$(grep "MUTEX_OWNER=" $MUTEX | sed -e 's/MUTEX_OWNER=//')
_time=$(grep "MUTEX_TIME=" $MUTEX | sed -e 's/MUTEX_TIME=//')
# Check for timestamp and timeout
let timepassed=$(date -u +%s)-$_time
[ $timepassed -gt $MUTEX_TIMEOUT ] && return 2
[ "$_host" == "$MUTEX_OWNER" ] && return 0
return 1
}
aquire_lock() {
local S3LOCK=$1
echo "MUTEX_OWNER=${MUTEX_OWNER}" > $MUTEX
echo "MUTEX_TIME=$(date -u +%s)" >> $MUTEX
aws s3 cp $MUTEX $S3LOCK
# verify we actually got the lock
sleep 2
verify_lock $S3LOCK
}
check_lock() {
local S3LOCK=$1
aws s3 ls $S3LOCK && rc=$? || rc=$?
# No LOCK ? -> get it !
if [ $rc -ne 0 ]; then
aquire_lock $S3LOCK
else
verify_lock $S3LOCK && rc=$? || rc=$?
# Lock timeout -> we get it
if [ $rc -eq 2 ]; then
aquire_lock $S3LOCK
# we already own it
elif [ $rc -eq 0 ]; then
return 0
# someone else has a valid lock
else
return 1
fi
fi
}

View File

@ -1,130 +0,0 @@
#!/bin/sh
# We built on top of tiny-cloud
. /etc/conf.d/tiny-cloud
# archive orig /var, mount new var on top and restore orig var
copy_and_mount() {
local dev=$1
tar cf /tmp/var.tar /var 2>/dev/null
mount -t xfs -o noatime "$dev" /var
tar xf /tmp/var.tar -C / && rm -f /tmp/var.tar
}
setup_var() {
for d in $(find /dev/sd?); do
# resolve to a valid block device
dev="$(realpath "$d")"
[ -b "$dev" ] || continue
# already mounted
mount | grep -q "$d" && continue
case "$CLOUD" in
aws)
# on AWS look for sdx
if [ "$d" = "/dev/sdx" ]; then
# check volume for existing filesystem
type=$(file -Lbs $d)
if [[ "$type" =~ "XFS filesystem" ]]; then
xfs_repair $d >/dev/null 2>&1
mount -t xfs -o noatime "$d" /var
else
mkfs.xfs -qf $d >/dev/null
copy_and_mount "$d"
fi
grep -q "$d" /etc/fstab || echo "$d /var xfs defaults,noatime,nofail 0 2" >> /etc/fstab
fi
;;
*)
ewarn "Unsupported cloud: $CLOUD"
return 1
;;
esac
done
}
attach_ebs() {
local volId="$1"
local device="$2"
local tries=30
while true; do
_json="$(aws ec2 describe-volumes --volume-ids $volId --region $REGION --output json)"
rc=$?; [ $rc -ne 0 ] && return $rc
vol_status=$(echo "$_json" | jq -r .Volumes[].State)
attachId=$(echo "$_json" | jq -r .Volumes[].Attachments[].InstanceId)
[ "$attachId" = "$INSTANCE_ID" ] && break
if [ "$vol_status" = "available" ]; then
aws ec2 attach-volume --volume-id "$volId" --instance-id "$INSTANCE_ID" --region "$REGION" --device "$device" > /dev/null
rc=$?; [ $rc -ne 0 ] && return $rc
break
fi
# if attached but not to us -> detach
if [ "$vol_status" = "in-use" ]; then
aws ec2 detach-volume --volume-id "$volId" --region "$REGION" --force
rc=$?; [ $rc -ne 0 ] && return $rc
fi
((tries=tries-1))
[ $tries -eq 0 ] && return 1
sleep 5
done
}
_parse_volume() {
# Todo: proper checks once all is yaml
# For now just replace ':'
echo $1 | sed -e 's/:/ /g'
}
# mount optional remote volumes
mount_volumes() {
local volumes="$1"
for vol in $volumes; do
# Todo: check volume type and call matching func
read volType volId volDevice volPath < <(_parse_volume $vol)
[ "$volType" != "ebs" ] && { echo "Unknown volume type $volType"; break; }
attach_ebs $volId $volDevice
rc=$?
[ $rc -ne 0 ] && { ewarn "error trying to attach $volId"; break; }
# wait for the block device to become available
while true; do
mdev -s
test -b $volDevice && break
sleep 1
done
# check volume for existing filesystem
type=$(file -Lbs $volDevice)
if [[ "$type" =~ "XFS filesystem" ]]; then
xfs_repair $volDevice >/dev/null 2>&1
else
mkfs.xfs -qf $volDevice >/dev/null
fi
# mount
mkdir -p $volPath
mount -t xfs -o noatime $volDevice $volPath
ebegin "mounting $volDevice at $volPath"
done
}
unmount_volumes() {
local volumes="$1"
for vol in $volumes; do
read volType volId volDevice volPath < <(_parse_volume $vol)
umount $volPath && aws ec2 detach-volume --volume-id "$volId" --instance-id $INSTANCE_ID --region $REGION > /dev/null
done
}

View File

@ -0,0 +1,205 @@
#!/bin/bash
# Todo: This should go into a yaml file
query_imds() {
MAC=$(imds meta-data/mac)
AVAILABILITY_ZONE=$(imds meta-data/placement/availability-zone)
REGION=$(echo ${AVAILABILITY_ZONE} | sed "s/[a-z]$//")
INSTANCE_ID=$(imds meta-data/instance-id)
cat <<EOF >> /var/lib/cloud/meta-data
AVAILABILITY_ZONE=$AVAILABILITY_ZONE
REGION=$REGION
INSTANCE_ID=$INSTANCE_ID
IP_ADDRESS=$(imds meta-data/local-ipv4)
PUBLIC_IP_ADDRESS=$(imds meta-data/public-ipv4 || true)
DEFAULT_GW_INTERFACE=$(ip -o route get 8.8.8.8 | awk '{print $5}')
MAC=$MAC
VPC_CIDR_RANGE=$(imds meta-data/network/interfaces/macs/${MAC}/vpc-ipv4-cidr-block)
SUBNET=$(imds meta-data/network/interfaces/macs/${MAC}/subnet-ipv4-cidr-block)
_META_HOSTNAME=$(imds meta-data/hostname)
DOMAIN_NAME=\${_META_HOSTNAME#*.}
AWS_ACCOUNT_ID=$(imds meta-data/network/interfaces/macs/${MAC}/owner-id)
INSTANCE_LIFE_CYCLE=$(imds meta-data/instance-life-cycle)
INSTANCE_TYPE=$(imds meta-data/instance-type)
EOF
}
# Todo: This should go into a yaml file
get_tags() {
# via metadata AWS restricts tags to NOT have " " or "/" ;-(
# Replace all /:.- with _ for valid variable names
for key in $(imds meta-data/tags/instance); do
value="$(imds meta-data/tags/instance/$key)"
key=$(echo ${key//[\/:.-]/_} | tr '[:lower:]' '[:upper:]')
echo "$key=\"$value\"" >> /var/lib/cloud/meta-data
done
#while read _key value; do
# key=$(echo ${_key//[\/:.-]/_} | tr '[:lower:]' '[:upper:]')
# echo "$key=\"$value\"" >> /var/lib/cloud/meta-data
#done < <(aws ec2 describe-tags --filters "Name=resource-id,Values=${INSTANCE_ID}" --query 'Tags[*].[Key,Value]' --region $REGION --output text)
}
# extract user-data args and cloud meta-data into /var/lib/cloud/meta-data
get_meta_data() {
if [ ! -f /var/lib/cloud/meta-data ]; then
echo '#!/bin/bash' > /var/lib/cloud/meta-data
query_imds
get_tags
fi
if [ ! -f /etc/cloudbender.conf ]; then
bash /var/lib/cloud/user-data extract_parameters
fi
}
import_meta_data() {
. /etc/cloudbender.conf
. /var/lib/cloud/meta-data
export AWS_DEFAULT_REGION=$REGION
export AWS_DEFAULT_OUTPUT=text
# Enabled LaunchHooks if not DEBUG
is_enabled $ZDT_CLOUDBENDER_DEBUG || LAUNCH_HOOK="CloudBenderLaunchHook"
# Workaround for current CFN ASG_<parameter> hack
_key=$(echo $AWS_CLOUDFORMATION_LOGICAL_ID | tr '[:lower:]' '[:upper:]')
[ -n "$(eval echo \$${_key}_CUSTOMHOSTNAME)" ] && CUSTOMHOSTNAME="$(eval echo \$${_key}_CUSTOMHOSTNAME)"
[ -n "$(eval echo \$${_key}_VOLUMES)" ] && VOLUMES="$(eval echo \$${_key}_VOLUMES)"
return 0
}
# various early volume functions
attach_ebs() {
local volId="$1"
local device="$2"
local tries=30
while true; do
_json="$(aws ec2 describe-volumes --volume-ids $volId --region $REGION --output json)"
rc=$?; [ $rc -ne 0 ] && return $rc
vol_status=$(echo "$_json" | jq -r .Volumes[].State)
attachedId=$(echo "$_json" | jq -r .Volumes[].Attachments[].InstanceId)
[ "$attachedId" = "$INSTANCE_ID" ] && break
if [ "$vol_status" = "available" ]; then
aws ec2 attach-volume --volume-id "$volId" --instance-id "$INSTANCE_ID" --region "$REGION" --device "$device" > /dev/null
rc=$?; [ $rc -ne 0 ] && return $rc
break
fi
# if attached but not to us -> detach
if [ "$vol_status" = "in-use" ]; then
aws ec2 detach-volume --volume-id "$volId" --region "$REGION" --force
rc=$?; [ $rc -ne 0 ] && return $rc
fi
((tries=tries-1))
[ $tries -eq 0 ] && return 1
sleep 5
done
}
asg_heartbeat() {
[ -n "$LAUNCH_HOOK" ] && aws autoscaling record-lifecycle-action-heartbeat --instance-id $INSTANCE_ID --lifecycle-hook-name $LAUNCH_HOOK --auto-scaling-group-name $AWS_AUTOSCALING_GROUPNAME || true
}
setup_sns_alarms() {
# store SNS message json template
cat <<EOF > /var/lib/cloud/sns_alarm.json
{
"Source": "CloudBender",
"AWSAccountId": "$AWS_ACCOUNT_ID",
"Region": "$REGION",
"Artifact": "$ARTIFACT",
"Asg": "$AWS_AUTOSCALING_GROUPNAME",
"Instance": "$INSTANCE_ID",
"ip": "$IP_ADDRESS"
}
EOF
cat <<'EOF' > /var/lib/cloud/sns_alarm.sh
#!/bin/bash
SUBJECT=$1
MSG=$2
LEVEL=${3:-Info}
ATTACHMENT=${4:-""}
EMOJI=${5:-""}
EOF
if [ -n "$ALARMSNSARN" ]; then
cat <<EOF >> /var/lib/cloud/sns_alarm.sh
jq -M --arg subject "\$SUBJECT" --arg level "\$LEVEL" --arg msg "\$MSG" --arg attachment "\$ATTACHMENT" --arg emoji "\$EMOJI" --arg hostname "\$HOSTNAME" '.Subject = \$subject | .Level = \$level | .Message = \$msg | .Attachment = \$attachment | .Emoji = \$emoji | .Hostname = \$hostname' < /var/lib/cloud/sns_alarm.json | sed -e 's/\\\\\\\\/\\\\/g' > /tmp/sns.json
aws sns publish --region ${REGION} --target-arn $ALARMSNSARN --message file:///tmp/sns.json
EOF
fi
chmod +x /var/lib/cloud/sns_alarm.sh
}
# associate EIP
# return 0 if we attached an EIP
# return 1 if we the public IP did NOT change or other error
associate_eip() {
local instance_id=$1
local eip=$(echo $2 | sed -e 's/\/32//' | grep -E -o "(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)") || true
local current_instance
if [ -n "$eip" ]; then
if [ "$eip" != "0.0.0.0" ]; then
read eip_alloc_id eip_assoc_id current_instance < <(aws ec2 describe-addresses --public-ips $eip --query 'Addresses[*].[AllocationId,AssociationId,InstanceId]' || true)
# If we already own and have the EIP attached -> done
[ "$instance_id" == "$current_instance" ] && return
if [ ! -z "$eip_alloc_id" ]; then
if [[ "$eip_assoc_id" =~ ^eipassoc- ]]; then
log -t user-data info "EIP $eip already associated via Association ID ${eip_assoc_id}. Disassociating."
retry 3 10 aws ec2 disassociate-address --association-id $eip_assoc_id
fi
log -t user-data info "Associating Elastic IP $eip via Allocation ID $eip_alloc_id with Instance $instance_id"
aws ec2 associate-address --no-allow-reassociation --instance-id $instance_id --allocation-id $eip_alloc_id
return
else
log -t user-data warn "Elastic IP $eip address not found."
fi
else
log -t user-data info "0.0.0.0 requested, keeping AWS assigned IP."
fi
else
log -t user-data debug "Invalid or no ElasticIP defined. Skip"
fi
return 1
}
# Accept incoming traffic for everything
disable_source_dest_check() {
aws ec2 modify-instance-attribute --instance-id ${INSTANCE_ID} --source-dest-check "{\"Value\": false}"
}
# Register ourself at route tables
register_routes() {
local rtb_id_list="$1"
local route_cidr="$2"
for cidr in ${route_cidr//,/ }; do
if [ "$cidr" != "$VPC_CIDR_RANGE" ]; then
for rt in ${rtb_id_list//,/ }; do
if [[ "$rt" =~ ^rtb-[a-f0-9]*$ ]]; then
aws ec2 create-route --route-table-id $rt --destination-cidr-block "${cidr}" --instance-id ${INSTANCE_ID} || \
aws ec2 replace-route --route-table-id $rt --destination-cidr-block "${cidr}" --instance-id ${INSTANCE_ID}
else
log -t user-data warn "Invalid Route Table ID: $rt"
fi
done
fi
done
}

View File

@ -0,0 +1,9 @@
#!/bin/bash
get_meta_data() {
SSHPORT=$(imds meta-data/cloudbender/sshPort)
}
import_meta_data() {
echo Noop
}

View File

@ -1,20 +1,21 @@
#!/sbin/openrc-run
# vim:set ts=8 noet ft=sh:
description="CloudBender early - eg. mount suitable block device at /var"
# no network / metadata available yet
description="CloudBender early tasks"
depend() {
need fsck root
use lvm modules
after clock lvm modules
before bootmisc
need fsck root
use lvm modules
after clock lvm modules
before bootmisc
}
start() {
source /usr/lib/cloudbender/volumes.sh
source /usr/lib/cloudbender/boot.sh
ebegin "Looking for suitable /var"
setup_var
eend $?
ebegin "CloudBender - early phase"
setup_var
eend $?
}

View File

@ -2,47 +2,63 @@
# vim:set ts=8 noet ft=sh:
#
description="CloudBender - mount additional volumes, send shutdown messages"
description="CloudBender - main phase"
depend() {
need net
before sshd
after tiny-cloud
need net
before sshd
after tiny-cloud-main
}
start() {
source /usr/lib/cloudbender/init.sh
source /usr/lib/cloudbender/base.sh
source /usr/lib/cloudbender/volumes.sh
source /usr/lib/cloudbender/common.sh
get_meta_data
ebegin "CloudBender"
# mount extra volumes as early as possible
[ -n "$VOLUMES" ] && mount_volumes "$VOLUMES"
get_meta_data
import_meta_data
# allow optional ssh keys, eg. via IAM for AWS
cb_sshkeys
# various initial OS tweaks
setup_instance
eend $?
# mount extra optional volumes
mount_volumes "$VOLUMES"
# add optional ssh keys, eg. via IAM for AWS
configure_sshd
if [ "$CLOUD" == "aws" ]; then
set_hostname $CUSTOMHOSTNAME
# if fixed hostname use persistent sshd keys
[ -n "$CUSTOMHOSTNAME" ] && persistent_sshd_hostkeys "/_ssh/${ARTIFACT}/${CONGLOMERATE}/${HOSTNAME}"
associate_eip $INSTANCE_ID $ELASTICIP && PUBLIC_IP_ADDRESS=$ELASTICIP
fi
register_service_dns
is_enabled $PROMETHEUS_ENABLED && setup_prometheus $PROMETHEUS_ALLOW
is_enabled $LOGGING_ENABLED && setup_fluentbit $LOGGING_HOST
# cleanup previous reboot logs
rm -f /tmp/shutdown.log
eend 0
}
stop() {
source /usr/lib/cloudbender/init.sh
source /usr/lib/cloudbender/base.sh
source /usr/lib/cloudbender/volumes.sh
source /usr/lib/cloudbender/common.sh
get_meta_data
import_meta_data
[ -n "$VOLUMES" ] && unmount_volumes "$VOLUMES"
unmount_volumes "$VOLUMES"
# Include dynamic config setting create at boot
[ -r /etc/cloudbender/rc.conf ] && . /etc/cloudbender/rc.conf
is_enabled $ZDT_CLOUDBENDER_DEBUG && [ -r /tmp/shutdown.log ] && SHUTDOWNLOG="$(cat /tmp/shutdown.log)"
[ -n "$DEBUG" ] && [ -r /tmp/shutdown.log ] && SHUTDOWNLOG="$(cat /tmp/shutdown.log)"
[ -n "$RC_REBOOT" ] && ACTION="rebooting" || ACTION="terminated"
[ -z "$DISABLE_SCALING_EVENTS" ] && /var/lib/cloud/sns_alarm.sh "Instance $ACTION" "" Info "$SHUTDOWNLOG"
[ -n "$RC_REBOOT" ] && ACTION="rebooting" || ACTION="terminated"
[ -z "$DISABLE_SCALING_EVENTS" ] && /var/lib/cloudbender/sns_alarm.sh "Instance $ACTION" "" Info "$SHUTDOWNLOG"
eend $?
eend 0
}

View File

@ -0,0 +1,10 @@
# mounts are shared to run containers later, eg. cilium, falco
# should be handled in openrc, see: https://github.com/OpenRC/openrc/pull/526/files
mount --make-rshared /
# Enable THP incl. defrag but very conservatively
# see: https://go.dev/doc/gc-guide#Linux_transparent_huge_pages
echo "madvise" > /sys/kernel/mm/transparent_hugepage/enabled
echo "defer+madvise" > /sys/kernel/mm/transparent_hugepage/defrag
echo "0" > /sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_none

View File

505
kubezero/zdt-base/common.sh Normal file
View File

@ -0,0 +1,505 @@
# We built on top of tiny-cloud
. /lib/tiny-cloud/common
. /usr/lib/cloudbender/cloud/"$CLOUD".sh
# boolean flags
is_enabled() {
local flag=$(echo "$1" | tr '[:upper:]' '[:lower:]')
[ "$flag" == 1 -o "$flag" == "true" ] && return 0
[ "$flag" == 0 -o "$flag" == "false" -o "$flag" == "none" -o -z "$flag" ] && return 1
log -t user-data warn "Unknown value for boolean option: $flag - assuming False"
return 1
}
# setup_instance, various OS tweaks impossible to do via AMI baking
setup_instance() {
# create machine-id to emulate systemd
[ -f /etc/machine-id ] || uuidgen > /etc/machine-id
# add and mount bpf file system
add_once /etc/fstab "bpffs /sys/fs/bpf bpf rw,nosuid,nodev,noexec,relatime,mode=700 0 0"
mount -a
add_once /etc/hosts "${IP_ADDRESS} ${_META_HOSTNAME} ${HOSTNAME}"
# workaround for dhcpcd / openresolv to omit search domain if equal to domain breaking DNS resolution of shortnames for eg. etcd and kube-apiserver
add_once /etc/resolv.conf "search $DOMAIN_NAME"
case "$CLOUD" in
aws)
# Set system wide default region for boto3
echo "export AWS_DEFAULT_REGION=$REGION" > /etc/profile.d/aws.sh
setup_sns_alarms
;;
*)
ewarn "Unsupported Cloud: $CLOUD"
# return 1
;;
esac
}
################
# IAM SSH KEYS #
################
configure_sshd() {
# Change Listen port
local port=${SSHPORT:-"22"}
[ -w /etc/ssh/sshd_config ] && sed -i -e 's/^[\s#]*Port\s.*/Port '$port'/' /etc/ssh/sshd_config
case "$CLOUD" in
aws)
# on AWS call IAM for allowed groups and actual keys
local group=${SSHKEYIAMGROUP:-""}
local role=${SSHKEYIAMROLE:-"arn:aws:iam::000000000000:role/Undefined"}
[ $role == "arn:aws:iam::000000000000:role/Undefined" ] && role=""
if [ -n "$group" ]; then
# Configure SSHD
sed -i -e 's,^[\s#]*AuthorizedKeysCommand\s.*,AuthorizedKeysCommand /usr/sbin/get_iam_sshkeys.py --user %u --group '$group' --iamRole "'$role'",' /etc/ssh/sshd_config
sed -i -e 's,^[\s#]*AuthorizedKeysCommandUser\s.*,AuthorizedKeysCommandUser nobody,' /etc/ssh/sshd_config
einfo "added $group to SSH admin keys"
fi
;;
nocloud)
return 0
;;
*)
ewarn "Unsupported Cloud: $CLOUD"
# return 1
;;
esac
}
# Persist host keys
# has to run before sshd starts up first time !
persistent_sshd_hostkeys() {
# Top level is artifact to be able to limit the SSM IAM permissions
local ssm_path=$1
local key_types="ecdsa ed25519 rsa"
# try to get none existing host keys from SSM
RET=0
for key in $key_types; do
if [ ! -f /etc/ssh/ssh_host_${key}_key.pub -a ! -f /etc/ssh/ssh_host_${key}_key ]; then
(aws ssm get-parameters --names "${ssm_path}/host_${key}.tgz" --with-decryption --query 'Parameters[0].Value' | base64 -d | tar xzf - --directory=/ 1>/dev/null 2>&1) \
&& log -t user-data info "Restored ssh_host_${key}_key from SSM" || RET=1
fi
done
# Update keys if any key couldn't be restored from SSM
if [ $RET -eq 1 ]; then
# generate any missing keys
ssh-keygen -A
for key in $key_types; do
if [ -r /etc/ssh/ssh_host_${key}_key -a -r /etc/ssh/ssh_host_${key}_key.pub ]; then
(aws ssm put-parameter --name "${ssm_path}/host_${key}.tgz" --type SecureString --value \
"$(tar czf - /etc/ssh/ssh_host_${key}_key /etc/ssh/ssh_host_${key}_key.pub | base64)" --overwrite) \
&& log -t user-data info "Uploaded ssh_host_${key}_key to SSM"
fi
done
fi
}
# either plain custom hostname or
# - `unique:<format_string>` eg. `uniq:kube-worker-{:02}` -> kube-worker-01
# - `myownip: <prefix>` eg. `myip: nodegroup-` -> nodegroup-1.2.3.4
set_hostname() {
local custom_hostname=$(echo $1 | awk -F. '{ print $1 }')
if [ -n "$custom_hostname" ]; then
if [[ "$custom_hostname" == unique:* ]]; then
new_hostname=$(uniq_hostname.py $AWS_EC2LAUNCHTEMPLATE_ID $INSTANCE_ID ${custom_hostname##unique:})
elif [[ "$custom_hostname" == myownip:* ]]; then
local _ip=$(echo $IP_ADDRESS | sed -e 's/\./-/g')
new_hostname=$(echo "${custom_hostname##myownip:}$_ip")
else
new_hostname=$custom_hostname
fi
FQDN="${new_hostname}.${DOMAIN_NAME}"
echo ${new_hostname} > /etc/hostname
hostname $new_hostname
export HOSTNAME=$new_hostname
# add new hostname to hosts
add_once /etc/hosts "${IP_ADDRESS} ${FQDN} ${new_hostname}"
log -t user-data info "Hostname updated to ${new_hostname}."
# hup syslog to update loghost macro
/etc/init.d/syslog-ng reload
# update Route53 entry for VPC internal FQDN
route53.py --fqdn $FQDN --record $IP_ADDRESS
# update our Name Tag to FQDN or PrivateDNSName to allow easy indentification in the AWS UI
aws ec2 create-tags --resources $INSTANCE_ID --tags Key=Name,Value=$FQDN
else
aws ec2 create-tags --resources $INSTANCE_ID --tags Key=Name,Value=${HOSTNAME}.${REGION}.compute.internal
fi
}
_parse_volume() {
# Todo: proper checks once all is yaml
# For now just replace ':'
echo $1 | sed -e 's/:/ /g'
}
# mount optional remote volumes
mount_volumes() {
local volumes="$1"
for vol in $volumes; do
# Todo: check volume type and call matching func
read volType volId volDevice volPath < <(_parse_volume $vol)
[ "$volType" != "ebs" ] && { echo "Unknown volume type $volType"; break; }
attach_ebs $volId $volDevice
rc=$?
[ $rc -ne 0 ] && { ewarn "error trying to attach $volId"; break; }
# wait for the block device to become available
while true; do
mdev -s
test -b $volDevice && break
sleep 1
done
# check volume for existing filesystem
type=$(file -Lbs $volDevice)
if [[ "$type" =~ "XFS filesystem" ]]; then
xfs_repair $volDevice >/dev/null 2>&1
else
mkfs.xfs -qf $volDevice >/dev/null
fi
# mount
mkdir -p $volPath
mount -t xfs -o noatime $volDevice $volPath
einfo "mounting $volDevice at $volPath"
done
}
unmount_volumes() {
local volumes="$1"
for vol in $volumes; do
read volType volId volDevice volPath < <(_parse_volume $vol)
umount $volPath && aws ec2 detach-volume --volume-id "$volId" --instance-id $INSTANCE_ID --region $REGION > /dev/null
done
}
# msg used for sns event, last one wins
msg() { MSG="$@"; log -t user-data info "$@"; }
# Generic retry command wrapper, incl. timeout of 30s
# $1 = number of tries; 0 = forever
# $2 = number of seconds to sleep between tries
# $@ actual command
retry() {
local tries=$1
local waitfor=$2
shift 2
while true; do
# Only use timeout of $1 is an executable, call directly if function
type -tf $1 >/dev/null && { timeout 30 $@ && return; } || { $@ && return; }
((tries=tries-1)) || true
[ $tries -eq 0 ] && return 1
sleep $waitfor
done
}
add_swap() {
[ -f /.swapfile ] || { dd if=/dev/zero of=/.swapfile bs=1M count=$1 && chmod 600 /.swapfile && mkswap /.swapfile && swapon /.swapfile; }
grep -q "/.swapfile" /etc/fstab || echo "/.swapfile none swap sw 0 0" >> /etc/fstab
sysctl -w vm.swappiness=10
}
# Get SSM secure string base64 decoded
# $0 SSM_PATH, value to stdout
get_secret() {
aws ssm get-parameter --name ${1,,} --with-decryption --query 'Parameter.Value' | base64 -d
}
# Store values as base64 on SSM
# $0 SSM_PATH VALUE
put_secret() {
aws ssm put-parameter --name ${1,,} --type SecureString --value "$(echo "$2" | base64 -w0)" --overwrite
}
# Gets existing passphrase or creates new passphrase and stores it
init_passphrase() {
local _URL=$1
local _PPFILE=$2
# If secret already exists noop
[ -f $_PPFILE ] && return 0
get_secret $_URL > $_PPFILE && chmod 600 $_PPFILE || \
{ xxd -l16 -p /dev/random > $_PPFILE; chmod 600 $_PPFILE; put_secret $_URL "$(cat $_PPFILE)"; }
}
# upload various useful logs to s3 if configured
upload_debug_logs(){
[ -z $ZDT_CLOUDBENDER_DEBUG_REMOTELOGS ] && return 0
local s3Url="$ZDT_CLOUDBENDER_DEBUG_REMOTELOGS/$INSTANCE_ID/$(date +'%Y%m%d-%H%M%Z')"
local _tmp=$(mktemp -d)
ps -ef > ${_tmp}/process.list
cp /var/log/messages \
/var/log/rc.log \
/var/log/user-data.log \
/etc/cloudbender.conf \
/var/lib/cloud/meta-data \
/var/log/kubelet/kubelet.log \
/var/log/crio/crio.log \
$_tmp
tar cfz /tmp/debuglogs.tgz -C $_tmp .
aws s3 cp /tmp/debuglogs.tgz $s3Url/debuglogs.tgz
return 0
}
exit_trap() {
set +e
trap - ERR EXIT
local ERR_CODE=$1
local ERR_LINE="$2"
local ERR_FUNC="$3"
local ERR_CMD="$4"
if [ $ERR_CODE -ne 0 ]; then
CFN_STATUS="FAILURE"
RESULT="ABANDON"
else
CFN_STATUS="SUCCESS"
RESULT="CONTINUE"
fi
# Add SNS events on demand
if [ -n "$ALARMSNSARN" ]; then
if [ $ERR_CODE -ne 0 ]; then
LEVEL="Error"
SUBJECT="Error during cloud-init."
if [ $ERR_LINE -ne 1 ]; then
MSG="$ERR_CMD failed in $ERR_FUNC at $ERR_LINE. Return: $ERR_CODE"
ATTACHMENT="$(pr -tn $0 | tail -n+$((ERR_LINE - 3)) | head -n7)"
else
MSG="$ERR_CMD"
fi
if [ -n "$ZDT_CLOUDBENDER_DEBUG" ]; then
SUBJECT="$SUBJECT Instance kept running for debug."
else
SUBJECT="$SUBJECT Instance terminated by ASG lifecycle hook."
fi
else
LEVEL="Info"
SUBJECT="ZDT Alpine Instance launched."
fi
if [ -z "${DISABLECLOUDBENDERSNSSCALINGEVENTS}" ] || [ "$LEVEL" != "Info" ]; then
/var/lib/cloud/sns_alarm.sh "$SUBJECT" "$MSG" "$LEVEL" "$ATTACHMENT"
fi
# Disable scaling events during shutdown
[ -n "${DISABLECLOUDBENDERSNSSCALINGEVENTS}" ] && echo "DISABLE_SCALING_EVENTS=1" >> /etc/cloudbender.conf
fi
[ -n "$LAUNCH_HOOK" ] && aws autoscaling complete-lifecycle-action --lifecycle-action-result $RESULT --instance-id $INSTANCE_ID --lifecycle-hook-name $LAUNCH_HOOK --auto-scaling-group-name ${AWS_AUTOSCALING_GROUPNAME} || true
if [ -n "${AWS_CLOUDFORMATION_LOGICAL_ID}" ]; then
aws cloudformation signal-resource --stack-name ${AWS_CLOUDFORMATION_STACK_NAME} --logical-resource-id ${AWS_CLOUDFORMATION_LOGICAL_ID} --unique-id ${INSTANCE_ID} --status ${CFN_STATUS}
fi
# timestamp being done
end_uptime=$(awk '{print $1}' < /proc/uptime)
log -t user-data info "Exiting user-data. $end_uptime seconds after boot. Duration: $(echo "$end_uptime-$start_uptime" | bc)"
# if we ran into error, either upload debug files or poweroff
if [ $ERR_CODE -ne 0 ]; then
is_enabled $ZDT_CLOUDBENDER_DEBUG && upload_debug_logs || poweroff
fi
exit 0
}
### S3 based locking
MUTEX=mutex
MUTEX_OWNER=$HOSTNAME
MUTEX_TIMEOUT=600
release_lock() {
local S3LOCK=$1
rm -f $MUTEX
aws s3 rm $S3LOCK
}
# Lock not timed out and we own it: 0
# Lock not timed out and someone else owns it: 1
# Lock timed out: 2
verify_lock() {
local S3LOCK=$1
aws s3 cp $S3LOCK $MUTEX
_host=$(grep "MUTEX_OWNER=" $MUTEX | sed -e 's/MUTEX_OWNER=//')
_time=$(grep "MUTEX_TIME=" $MUTEX | sed -e 's/MUTEX_TIME=//')
# Check for timestamp and timeout
let timepassed=$(date -u +%s)-$_time
[ $timepassed -gt $MUTEX_TIMEOUT ] && return 2
[ "$_host" == "$MUTEX_OWNER" ] && return 0
return 1
}
aquire_lock() {
local S3LOCK=$1
echo "MUTEX_OWNER=${MUTEX_OWNER}" > $MUTEX
echo "MUTEX_TIME=$(date -u +%s)" >> $MUTEX
aws s3 cp $MUTEX $S3LOCK
# verify we actually got the lock
sleep 2
verify_lock $S3LOCK
}
check_lock() {
local S3LOCK=$1
aws s3 ls $S3LOCK && rc=$? || rc=$?
# No LOCK ? -> get it !
if [ $rc -ne 0 ]; then
aquire_lock $S3LOCK
else
verify_lock $S3LOCK && rc=$? || rc=$?
# Lock timeout -> we get it
if [ $rc -eq 2 ]; then
aquire_lock $S3LOCK
# we already own it
elif [ $rc -eq 0 ]; then
return 0
# someone else has a valid lock
else
return 1
fi
fi
}
# All things networking
enable_ip_forwarding() {
modprobe nf_conntrack
cat <<EOF > /etc/sysctl.d/40-ip-forward.conf
net.ipv4.ip_forward=1
net.ipv4.ip_local_port_range = 1024 65535
net.ipv4.conf.all.send_redirects=0
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.all.accept_redirects = 0
net.ipv6.conf.default.forwarding = 1
net.ipv6.conf.all.forwarding = 1
net.netfilter.nf_conntrack_max = 524288
EOF
sysctl -p /etc/sysctl.d/40-ip-forward.conf
}
enable_iptables() {
rc-update add iptables
/etc/init.d/iptables save
}
register_service_dns() {
if [ -n "$DNSZONE" -a -n "$SERVICENAME" ]; then
_IP=${PUBLIC_IP_ADDRESS:-$IP_ADDRESS}
[ -n "SERVICEPRIVATE" ] && _IP=$IP_ADDRESS
route53.py --fqdn "${SERVICENAME}.${DNSZONE}" --record $_IP
# Register shutdown hook to remove DNS entry on terminate
add_once /etc/local.d/cloudbender.stop "echo \"Deleting Route53 record for ${SERVICENAME}.${DNSZONE}\" >> /tmp/shutdown.log"
add_once /etc/local.d/cloudbender.stop "route53.py --delete --fqdn \"${SERVICENAME}.${DNSZONE}\" --record ${PUBLIC_IP_ADDRESS:-$IP_ADDRESS}"
# Short cut our public IP to private one to allow talking to our own service name
add_once /etc/hosts "${IP_ADDRESS} ${SERVICENAME}.${DNSZONE}"
log -t user-data info "Registered $_IP with ${SERVICENAME}.$DNSZONE"
fi
}
setup_prometheus() {
rc-update add node-exporter default
rc-service node-exporter start
log -t user-data info "Enabled and started Prometheus node-exporter"
}
setup_fluentbit() {
local key="cloudbender"
local host="${1:-fluentd}"
if [[ "$host" =~ "@" ]]; then
key=${host%%@*}
host=${host##*@}
fi
# add some AWS metadata
cat <<EOF > /etc/fluent-bit/metadata.conf
[FILTER]
Name record_modifier
Match *
Record source.ip $IP_ADDRESS
Record source.instance_id $INSTANCE_ID
Record source.region $REGION
Record source.account $AWS_ACCOUNT_ID
Record source.conglomerate $CONGLOMERATE
Record source.artifact $ARTIFACT
EOF
# Configure output
cat <<EOF > /etc/fluent-bit/output.conf
[OUTPUT]
Match *
Name forward
Host $host
Port 24224
Shared_Key $key
tls on
Send_options true
Require_ack_response true
EOF
LOG_FILES=$LOGGING_FILES
## TODO:
# Add parameter parsing for custom logfile tailing
rc-update add fluent-bit default
rc-service fluent-bit start
log -t user-data info "Enabled and started fluent-bit logging agent sending logs to $host"
}

View File

@ -1,12 +0,0 @@
# Borrowed from Ubuntu 20.04LTS minimal EC2 AMi
option rfc3442-classless-static-routes code 121 = array of unsigned integer 8;
send host-name = gethostname();
request subnet-mask, broadcast-address, time-offset, routers,
domain-name, domain-name-servers, domain-search, host-name,
dhcp6.name-servers, dhcp6.domain-search, dhcp6.fqdn, dhcp6.sntp-servers,
netbios-name-servers, netbios-scope, interface-mtu,
rfc3442-classless-static-routes, ntp-servers;
timeout 300;

View File

@ -0,0 +1,36 @@
# From https://chromium.googlesource.com/chromiumos/third_party/dhcpcd/+/refs/tags/dhcpcd-6.0.0/dhcpcd-hooks
# Configure the MTU for the interface
mtu_dir="$state_dir/mtu"
set_mtu()
{
local mtu=$1
if [ -w /sys/class/net/$interface/mtu ]; then
echo "$mtu" >/sys/class/net/$interface/mtu
else
ifconfig "$interface" mtu "$mtu"
fi
}
if [ "$reason" = PREINIT -a -e "$mtu_dir/$interface" ]; then
rm "$mtu_dir/$interface"
elif [ -n "$new_interface_mtu" ] && $if_up; then
# The smalled MTU dhcpcd can work with is 576
if [ "$new_interface_mtu" -ge 576 ]; then
if set_mtu "$new_interface_mtu"; then
syslog info "MTU set to $new_interface_mtu"
# Save the MTU so we can restore it later
if [ ! -e "$mtu_dir/$interface" ]; then
mkdir -p "$mtu_dir"
echo "$ifmtu" > "$mtu_dir/$interface"
fi
fi
fi
elif [ -e "$mtu_dir/$interface" ]; then
if $if_up || $if_down; then
# No MTU in this state, so restore the prior MTU
mtu=$(cat "$mtu_dir/$interface")
syslog info "MTU restored to $mtu"
set_mtu "$mtu"
rm "$mtu_dir/$interface"
fi
fi

View File

@ -58,6 +58,7 @@ try:
Encoding="SSH",
)
if key["SSHPublicKey"]["Status"] == "Active":
print(key["SSHPublicKey"]["SSHPublicKeyBody"], user["UserName"])
print(key["SSHPublicKey"]
["SSHPublicKeyBody"], user["UserName"])
except:
pass

View File

@ -6,6 +6,6 @@ shift
ATTACHMENT="$@"
if [ -n "${MONIT_SERVICE}${MONIT_EVENT}" -a -n "$MONIT_DESCRIPTION" ]; then
[ -x /var/lib/cloudbender/sns_alarm.sh ] && \
/var/lib/cloudbender/sns_alarm.sh "$MONIT_SERVICE - $MONIT_EVENT" "$MONIT_DESCRIPTION" "$LEVEL" "$ATTACHMENT"
[ -x /var/lib/cloud/sns_alarm.sh ] && \
/var/lib/cloud/sns_alarm.sh "$MONIT_SERVICE - $MONIT_EVENT" "$MONIT_DESCRIPTION" "$LEVEL" "$ATTACHMENT"
fi

View File

@ -1,6 +1,6 @@
# Give instance 3 min to settle after boot
set daemon 30
with start delay 180
# add `for 2 cycles` might be better than this intial block
# with start delay 120
set log syslog

View File

@ -1,5 +1,5 @@
print_info() {
echo -e "\n"
#echo -e "\n"
prin "$(color 1)Welcome to Alpine - ZeroDownTime edition"
echo

View File

@ -29,7 +29,8 @@ def update_dns(record_name, ips=[], ttl=180, action="UPSERT", record_type='A'):
{"Value": ip}
)
route53.change_resource_record_sets(HostedZoneId=zone_id, ChangeBatch=changeset)
route53.change_resource_record_sets(
HostedZoneId=zone_id, ChangeBatch=changeset)
parser = argparse.ArgumentParser(description='Update Route53 entries')
@ -49,4 +50,5 @@ action = "UPSERT"
if args.delete:
action = "DELETE"
update_dns(args.fqdn, args.record, action=action, ttl=args.ttl, record_type=args.record_type)
update_dns(args.fqdn, args.record, action=action,
ttl=args.ttl, record_type=args.record_type)

View File

@ -1,4 +1,6 @@
/proc/uptime r,
/var/lib/syslog-ng/syslog-ng.ctl rw,
@{PROC}/@{pid}/cmdline r,
@{PROC}/@{pid}/loginuid r,
@{PROC}/@{pid}/sessionid r,
ptrace (read) peer=unconfined,

View File

@ -1,12 +1,12 @@
# syslog-ng, format all json into messages
# https://www.syslog-ng.com/technical-documents/doc/syslog-ng-open-source-edition/3.23/administration-guide/63#TOPIC-1268643
@version: 3.36
@version: 4.5
@include "scl.conf"
options { chain_hostnames(off); flush_lines(0); use_dns(no); use_fqdn(no);
dns_cache(no); owner("root"); group("adm"); perm(0640);
stats_freq(0); bad_hostname("^gconfd$"); frac-digits(6);
stats(freq(43200)); bad_hostname("^gconfd$"); frac-digits(9); keep-timestamp(no);
};
source s_sys { system(); internal();};
@ -15,8 +15,9 @@ destination d_mesg { file("/var/log/messages" template("$(format-json time=\"$UN
# filter ipvs loggging each SYN to closed port
# IPVS: rr: TCP 10.52.82.199:31021 - no destination available
filter f_drop_ipvs { not (facility(kern) and match("IPVS: rr:.*no destination available" value("MESSAGE"))); };
# filter f_drop_ipvs { not (facility(kern) and match("IPVS: rr:.*no destination available" value("MESSAGE"))); };
# "message":"net_ratelimit: 16 callbacks suppressed"
filter f_drop_ipvs_ratelimit { not (facility(kern) and match("net_ratelimit:.*callbacks suppressed" value("MESSAGE"))); };
# filter f_drop_ipvs_ratelimit { not (facility(kern) and match("net_ratelimit:.*callbacks suppressed" value("MESSAGE"))); };
# log { source(s_sys); filter(f_drop_ipvs); filter(f_drop_ipvs_ratelimit); destination(d_mesg); };
log { source(s_sys); filter(f_drop_ipvs); filter(f_drop_ipvs_ratelimit); destination(d_mesg); };
log { source(s_sys); destination(d_mesg); };

View File

@ -1,4 +1,4 @@
#!/usr/bin/python3
#!/usr/bin/env python3
# use pyminify: pyminifier --obfuscate-variables $0 > minified_$0

View File

@ -0,0 +1,63 @@
#!/usr/bin/python3
import os
import boto3
import argparse
parser = argparse.ArgumentParser(
description="Get SSM parameters beyond <path> and write to files")
parser.add_argument(
"--path",
dest="path",
action="store",
required=True,
help="SSM parameter path")
parser.add_argument(
"--root",
dest="root",
action="store",
required=True,
help="root filesystem path to create files")
args = parser.parse_args()
session = boto3.Session()
awsSSMClient = session.client('ssm')
def get_parameters_by_path(nextToken=None):
params = {
'Path': args.path,
'Recursive': True,
'WithDecryption': True
}
if nextToken is not None:
params['NextToken'] = nextToken
return awsSSMClient.get_parameters_by_path(**params)
def getParameters():
nextToken = None
while True:
response = get_parameters_by_path(nextToken)
parameters = response['Parameters']
if len(parameters) == 0:
break
for parameter in parameters:
yield parameter
if 'NextToken' not in response:
break
nextToken = response['NextToken']
for parameter in getParameters():
file_name = os.path.join(
args.root, parameter["Name"].removeprefix(
args.path).lstrip("/"))
os.makedirs(os.path.dirname(file_name), mode=0o755, exist_ok=True)
#print(f'{file_name}={parameter["Value"]}')
with open(file_name, "w") as file:
file.write(parameter["Value"])

View File

@ -1,5 +1,7 @@
#!/bin/sh
. /lib/tiny-cloud/common
# Enable SSH keepalive
sed -i -e 's/^[\s#]*TCPKeepAlive\s.*/TCPKeepAlive yes/' -e 's/^[\s#]*ClientAliveInterval\s.*/ClientAliveInterval 60/' /etc/ssh/sshd_config
echo 'enabled SSH keep alives'
@ -9,23 +11,31 @@ sed -i -e 's/^[\s#]*rc_cgroup_mode=.*/rc_cgroup_mode="unified"/' /etc/rc.conf
sed -i -e 's/^[\s#]*rc_logger=.*/rc_logger="YES"/' /etc/rc.conf
echo 'enabled cgroupv2, openRC logging'
# speed up dhcpcd and chronyd
add_once /etc/dhcpcd.conf noarp >/dev/null
sed -i -e 's/^[\s#]*FAST_STARTUP=.*/FAST_STARTUP=yes/' /etc/conf.d/chronyd
# OpenRC parallel - causes too much chaos
#sed -i -e 's/^[\s#]*rc_parallel=.*/rc_parallel="YES"/' /etc/rc.conf
#echo 'enable parallel openRC'
# load falco kernel module at boot
grep -q falco /etc/modules || echo falco >> /etc/modules
# Setup syslog-ng json logging and apparmor tweaks
cp /lib/zdt/syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
cp /lib/zdt/syslog-ng.logrotate.conf /etc/logrotate.d/syslog-ng
cp /lib/zdt/syslog-ng.apparmor /etc/apparmor.d/local/sbin.syslog-ng
mv /etc/periodic/daily/logrotate /etc/periodic/hourly/
[ -f /etc/periodic/daily/logrotate ] && mv /etc/periodic/daily/logrotate /etc/periodic/hourly/
echo 'syslog-ng: all to /var/log/messages as json, rotate hourly'
# use init to spawn monit
echo ":2345:respawn:/usr/bin/monit -Ic /etc/monitrc.zdt" >> /etc/inittab
add_once /etc/inittab ":2345:respawn:/usr/bin/monit -Ic /etc/monitrc.zdt" >/dev/null
echo 'Enable monit via inittab'
# QoL
mv /etc/profile.d/color_prompt.sh.disabled /etc/profile.d/color_prompt.sh || true
echo 'alias rs="doas bash"' > /etc/profile.d/alias.sh
# QoL - color prompt even for doas bash
[ -f /etc/profile.d/color_prompt.sh.disabled ] && mv /etc/profile.d/color_prompt.sh.disabled /etc/profile.d/color_prompt.sh
ln -sf /etc/profile.d/color_prompt.sh /etc/bash/color_prompt.sh
echo 'alias rs="doas bash"' > /etc/profile.d/alias.sh

View File

@ -13,3 +13,4 @@ net.ipv4.ip_forward_use_pmtu = 0
kernel.panic = 10
kernel.panic_on_oops = 1
vm.oom_dump_tasks = 0
vm.max_map_count=262144

10
scripts/rebuild_new_kernel.sh Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
PACKETS="falco-kernel nvidia-open-gpu aws-neuron-driver"
for p in $PACKETS; do
rm -f packages/kubezero/*/$p*.apk
make apk PKG=kubezero/$p/APKBUILD
done
make upload