Compare commits

..

No commits in common. "master" and "v0.7.5" have entirely different histories.

14 changed files with 85 additions and 265 deletions

View File

@ -1,63 +0,0 @@
#!/usr/bin/env python3
import argparse
import boto3
parser = argparse.ArgumentParser(
description='Implement basic public ECR lifecycle policy')
parser.add_argument('--repo', dest='repositoryName', action='store', required=True,
help='Name of the public ECR repository')
parser.add_argument('--keep', dest='keep', action='store', default=10, type=int,
help='number of tagged images to keep, default 10')
parser.add_argument('--dev', dest='delete_dev', action='store_true',
help='also delete in-development images only having tags like v0.1.1-commitNr-githash')
args = parser.parse_args()
client = boto3.client('ecr-public', region_name='us-east-1')
images = client.describe_images(repositoryName=args.repositoryName)[
"imageDetails"]
untagged = []
kept = 0
# actual Image
# imageManifestMediaType: 'application/vnd.oci.image.manifest.v1+json'
# image Index
# imageManifestMediaType: 'application/vnd.oci.image.index.v1+json'
# Sort by date uploaded
for image in sorted(images, key=lambda d: d['imagePushedAt'], reverse=True):
# Remove all untagged
# if registry uses image index all actual images will be untagged anyways
if 'imageTags' not in image:
untagged.append({"imageDigest": image['imageDigest']})
# print("Delete untagged image {}".format(image["imageDigest"]))
continue
# check for dev tags
if args.delete_dev:
_delete = True
for tag in image["imageTags"]:
# Look for at least one tag NOT beign a SemVer dev tag
if "-" not in tag:
_delete = False
if _delete:
print("Deleting development image {}".format(image["imageTags"]))
untagged.append({"imageDigest": image['imageDigest']})
continue
if kept < args.keep:
kept = kept+1
print("Keeping tagged image {}".format(image["imageTags"]))
continue
else:
print("Deleting tagged image {}".format(image["imageTags"]))
untagged.append({"imageDigest": image['imageDigest']})
deleted_images = client.batch_delete_image(
repositoryName=args.repositoryName, imageIds=untagged)
if deleted_images["imageIds"]:
print("Deleted images: {}".format(deleted_images["imageIds"]))

View File

@ -1,26 +1,25 @@
# Parse version from latest git semver tag
GIT_TAG ?= $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
GIT_TAG := $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
TAG ::= $(GIT_TAG)
TAG := $(GIT_TAG)
# append branch name to tag if NOT main nor master
ifeq (,$(filter main master, $(GIT_BRANCH)))
# If branch is substring of tag, omit branch name
ifeq ($(findstring $(GIT_BRANCH), $(GIT_TAG)),)
# only append branch name if not equal tag
ifneq ($(GIT_TAG), $(GIT_BRANCH))
# Sanitize GIT_BRANCH to allowed Docker tag character set
TAG = $(GIT_TAG)-$(shell echo $$GIT_BRANCH | sed -e 's/[^a-zA-Z0-9]/-/g')
TAG = $(GIT_TAG)-$(GIT_BRANCH)
endif
endif
endif
ARCH ::= amd64
ALL_ARCHS ::= amd64 arm64
ARCH := amd64
ALL_ARCHS := amd64 arm64
_ARCH = $(or $(filter $(ARCH),$(ALL_ARCHS)),$(error $$ARCH [$(ARCH)] must be exactly one of "$(ALL_ARCHS)"))
ifneq ($(TRIVY_REMOTE),)
TRIVY_OPTS ::= --server $(TRIVY_REMOTE)
TRIVY_OPTS := --server $(TRIVY_REMOTE)
endif
.SILENT: ; # no need for @
@ -33,20 +32,18 @@ endif
help: ## Show Help
grep -E '^[a-zA-Z_-]+:.*?## .*$$' .ci/podman.mk | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
prepare:: ## custom step on the build agent before building
fmt:: ## auto format source
lint:: ## Lint source
build: ## Build the app
buildah build --rm --layers -t $(IMAGE):$(TAG)-$(_ARCH) --build-arg TAG=$(TAG) --build-arg ARCH=$(_ARCH) --platform linux/$(_ARCH) .
test:: ## test built artificats
test: rm-test-image ## Execute Dockerfile.test
test -f Dockerfile.test && \
{ buildah build --rm --layers -t $(REGISTRY)/$(IMAGE):$(TAG)-$(_ARCH)-test --from=$(REGISTRY)/$(IMAGE):$(TAG) -f Dockerfile.test --platform linux/$(_ARCH) . && \
podman run --rm --env-host -t $(REGISTRY)/$(IMAGE):$(TAG)-$(_ARCH)-test; } || \
echo "No Dockerfile.test found, skipping test"
scan: ## Scan image using trivy
echo "Scanning $(IMAGE):$(TAG)-$(_ARCH) using Trivy $(TRIVY_REMOTE)"
trivy image $(TRIVY_OPTS) --quiet --no-progress localhost/$(IMAGE):$(TAG)-$(_ARCH)
trivy image $(TRIVY_OPTS) localhost/$(IMAGE):$(TAG)-$(_ARCH)
# first tag and push all actual images
# create new manifest for each tag and add all available TAG-ARCH before pushing
@ -66,19 +63,24 @@ push: ecr-login ## push images to registry
ecr-login: ## log into AWS ECR public
aws ecr-public get-login-password --region $(REGION) | podman login --username AWS --password-stdin $(REGISTRY)
rm-remote-untagged: ## delete all remote untagged and in-dev images, keep 10 tagged
echo "Removing all untagged and in-dev images from $(IMAGE) in $(REGION)"
.ci/ecr_public_lifecycle.py --repo $(IMAGE) --dev
clean: rm-test-image rm-image ## delete local built container and test images
clean:: ## clean up source folder
rm-remote-untagged: ## delete all remote untagged images
echo "Removing all untagged images from $(IMAGE) in $(REGION)"
IMAGE_IDS=$$(for image in $$(aws ecr-public describe-images --repository-name $(IMAGE) --region $(REGION) --output json | jq -r '.imageDetails[] | select(.imageTags | not ).imageDigest'); do echo -n "imageDigest=$$image "; done) ; \
[ -n "$$IMAGE_IDS" ] && aws ecr-public batch-delete-image --repository-name $(IMAGE) --region $(REGION) --image-ids $$IMAGE_IDS || echo "No image to remove"
rm-image:
test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH))" || podman image rm -f $(IMAGE):$(TAG)-$(_ARCH) > /dev/null
test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH))" || echo "Error: Removing image failed"
## some useful tasks during development
# Ensure we run the tests by removing any previous runs
rm-test-image:
test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH)-test)" || podman image rm -f $(IMAGE):$(TAG)-$(_ARCH)-test > /dev/null
test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH)-test)" || echo "Error: Removing test image failed"
ci-pull-upstream: ## pull latest shared .ci subtree
git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash -m "Merge latest ci-tools-lib"
git stash && git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash && git stash pop
create-repo: ## create new AWS ECR public repository
aws ecr-public create-repository --repository-name $(IMAGE) --region $(REGION)

View File

@ -2,9 +2,6 @@
def call(Map config=[:]) {
pipeline {
options {
disableConcurrentBuilds()
}
agent {
node {
label 'podman-aws-trivy'
@ -13,22 +10,18 @@ def call(Map config=[:]) {
stages {
stage('Prepare') {
steps {
sh 'mkdir -p reports'
// we set pull tags as project adv. options
// pull tags
//withCredentials([gitUsernamePassword(credentialsId: 'gitea-jenkins-user')]) {
// sh 'git fetch -q --tags ${GIT_URL}'
//}
// Optional project specific preparations
sh 'make prepare'
withCredentials([gitUsernamePassword(credentialsId: 'gitea-jenkins-user')]) {
sh 'git fetch -q --tags ${GIT_URL}'
}
sh 'make prepare || true'
}
}
// Build using rootless podman
stage('Build') {
steps {
sh 'make build GIT_BRANCH=$GIT_BRANCH'
sh 'make build'
}
}
@ -40,13 +33,12 @@ def call(Map config=[:]) {
// Scan via trivy
stage('Scan') {
environment {
TRIVY_FORMAT = "template"
TRIVY_OUTPUT = "reports/trivy.html"
}
steps {
// we always scan and create the full json report
sh 'TRIVY_FORMAT=json TRIVY_OUTPUT="reports/trivy.json" make scan'
// render custom full html report
sh 'trivy convert -f template -t @/home/jenkins/html.tpl -o reports/trivy.html reports/trivy.json'
sh 'mkdir -p reports && make scan'
publishHTML target: [
allowMissing: true,
alwaysLinkToLastBuild: true,
@ -56,33 +48,26 @@ def call(Map config=[:]) {
reportName: 'TrivyScan',
reportTitles: 'TrivyScan'
]
sh 'echo "Trivy report at: $BUILD_URL/TrivyScan"'
// fail build if issues found above trivy threshold
// Scan again and fail on CRITICAL vulns, if not overridden
script {
if ( config.trivyFail ) {
sh "TRIVY_SEVERITY=${config.trivyFail} trivy convert --report summary --exit-code 1 reports/trivy.json"
if (config.trivyFail == 'NONE') {
echo 'trivyFail == NONE, review Trivy report manually. Proceeding ...'
} else {
sh "TRIVY_EXIT_CODE=1 TRIVY_SEVERITY=${config.trivyFail} make scan"
}
}
}
}
// Push to container registry if not PR
// incl. basic registry retention removing any untagged images
// Push to container registry, skip if PR
stage('Push') {
when { not { changeRequest() } }
steps {
sh 'make push'
sh 'make rm-remote-untagged'
}
}
// generic clean
stage('cleanup') {
steps {
sh 'make clean'
}
}
}
}
}

2
.gitignore vendored
View File

@ -59,5 +59,3 @@ reports/
# virtualenv
venv/
ENV/
aws-lambda-rie

View File

@ -1,16 +1,15 @@
# https://aws.amazon.com/blogs/aws/new-for-aws-lambda-container-image-support/
ARG RUNTIME_VERSION="3.9"
# libexec is missing from >=3.17
ARG DISTRO_VERSION="3.16"
# Stage 1 - bundle base image + runtime
FROM python:3.12-alpine3.19 AS python-alpine
ARG ALPINE="v3.19"
FROM python:${RUNTIME_VERSION}-alpine${DISTRO_VERSION} AS python-alpine
# Install GCC (Alpine uses musl but we compile and link dependencies with GCC)
RUN echo "@kubezero https://cdn.zero-downtime.net/alpine/${ALPINE}/kubezero" >> /etc/apk/repositories && \
wget -q -O /etc/apk/keys/stefan@zero-downtime.net-61bb6bfb.rsa.pub https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub
RUN apk -U --no-cache upgrade && \
apk --no-cache add \
RUN apk upgrade -U --available --no-cache && \
apk add --no-cache \
libstdc++
@ -19,7 +18,8 @@ FROM python-alpine AS build-image
ARG TAG="latest"
# Install aws-lambda-cpp build dependencies
RUN apk --no-cache add \
RUN apk upgrade -U --available --no-cache && \
apk add --no-cache \
build-base \
libtool \
autoconf \
@ -28,8 +28,8 @@ RUN apk --no-cache add \
cmake \
libcurl \
libffi-dev \
openssl-dev \
libexecinfo-dev@kubezero
libexecinfo-dev \
openssl-dev
# cargo
# Install requirements
@ -40,7 +40,7 @@ RUN export MAKEFLAGS="-j$(nproc)" && \
# Install our app
COPY app.py /app
# Set internal __version__ to our own container TAG
# Ser version to our TAG
RUN sed -i -e "s/^__version__ =.*/__version__ = \"${TAG}\"/" /app/app.py
# Stage 3 - final runtime image

26
Dockerfile.test Normal file
View File

@ -0,0 +1,26 @@
FROM setviacmdline:latest
# Install additional tools for tests
COPY dev-requirements.txt .flake8 .
RUN export MAKEFLAGS="-j$(nproc)" && \
pip install -r dev-requirements.txt
# Unit Tests / Static / Style etc.
COPY tests/ tests/
RUN flake8 app.py tests && \
codespell app.py tests
# Get aws-lambda run time emulator
ADD https://github.com/aws/aws-lambda-runtime-interface-emulator/releases/latest/download/aws-lambda-rie /usr/local/bin/aws-lambda-rie
RUN chmod 0755 /usr/local/bin/aws-lambda-rie && \
mkdir -p tests
# Install pytest
RUN pip install pytest --target /app
# Add our tests
ADD tests /app/tests
# Run tests
ENTRYPOINT []
CMD /usr/local/bin/python -m pytest tests -c tests/pytest.ini --capture=tee-sys

View File

@ -3,21 +3,3 @@ IMAGE := sns-alert-hub
REGION := us-east-1
include .ci/podman.mk
SOURCE := app.py tests/test_aws-lambda-rie.py
test:: aws-lambda-rie
./run_tests.sh "$(IMAGE):$(TAG)-$(_ARCH)"
fmt::
autopep8 -i -a $(SOURCE)
lint::
flake8 $(SOURCE)
codespell $(SOURCE)
clean::
rm -rf .pytest_cache __pycache__ aws-lambda-rie
aws-lambda-rie:
wget https://github.com/aws/aws-lambda-runtime-interface-emulator/releases/latest/download/aws-lambda-rie && chmod 0755 aws-lambda-rie

43
app.py
View File

@ -106,8 +106,7 @@ def handler(event, context):
if "AlarmName" in msg:
title = "AWS Cloudwatch Alarm"
# Discard NewStateValue == OK && OldStateValue == INSUFFICIENT_DATA as
# these are triggered by installing new Alarms and only cause confusion
# Discard NewStateValue == OK && OldStateValue == INSUFFICIENT_DATA as these are triggered by installing new Alarms and only cause confusion
if msg["NewStateValue"] == "OK" and msg["OldStateValue"] == "INSUFFICIENT_DATA":
logger.info(
"Discarding Cloudwatch Metrics Alarm as state is OK and previous state was insufficient data, most likely new alarm being installed"
@ -251,43 +250,6 @@ def handler(event, context):
body = "{} will be replaced between {} and {}".format(
msg["ElastiCache:NodeReplacementScheduled"], msg["Start Time"], msg["End Time"])
# ElasticCache replacement notifications
elif "ElastiCache:CacheNodeReplaceStarted" in msg:
title = "ElastiCache fail over stareted"
body = "for node {}".format(msg["ElastiCache:CacheNodeReplaceStarted"])
# ElasticCache replacement notifications
elif "ElastiCache:FailoverComplete" in msg:
title = "ElastiCache fail over complete"
body = "for node {}".format(msg["ElastiCache:FailoverComplete"])
# ElasticCache update notifications
elif "ElastiCache:ServiceUpdateAvailableForNode" in msg:
title = "ElastiCache update available"
body = "for node {}".format(msg["ElastiCache:ServiceUpdateAvailableForNode"])
elif "ElastiCache:ServiceUpdateAvailable" in msg:
title = "ElastiCache update available"
body = "for Group {}".format(msg["ElastiCache:ServiceUpdateAvailable"])
# known RDS events
elif "Event Source" in msg and msg['Event Source'] in ["db-instance", "db-cluster-snapshot", "db-snapshot"]:
try:
title = msg["Event Message"]
try:
name = " ({}).".format(
msg["Tags"]["Name"])
except (KeyError, IndexError):
name = ""
body = "RDS {}: <{}|{}>{}\n<{}|Event docs>".format(msg["Event Source"].replace("db-", ""),
msg["Identifier Link"], msg["Source ID"], name, msg["Event ID"])
except KeyError:
msg_type = apprise.NotifyType.WARNING
body = sns["Message"]
pass
# Basic ASG events
elif "Event" in msg and msg["Event"] in ["autoscaling:EC2_INSTANCE_TERMINATE", "autoscaling:EC2_INSTANCE_LAUNCH"]:
title = msg["Description"]
@ -308,5 +270,4 @@ def handler(event, context):
msg_type = apprise.NotifyType.WARNING
body = sns["Message"]
if not apobj.notify(body=body, title=title, notify_type=msg_type):
logger.error("Error during notify!")
apobj.notify(body=body, title=title, notify_type=msg_type)

View File

@ -1,4 +1,3 @@
pytest
autopep8
flake8
codespell

View File

@ -1,10 +0,0 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:recommended",
":label(renovate)",
":semanticCommits",
"group:allNonMajor"
],
"prHourlyLimit": 0
}

View File

@ -1,4 +1,4 @@
boto3==1.34.84
apprise==1.7.6
humanize==4.9.0
awslambdaric==2.0.11
boto3
apprise
humanize
awslambdaric

View File

@ -1,17 +0,0 @@
#!/bin/sh -ex
IMAGE=$1
ctr=$(buildah from $IMAGE)
trap "buildah rm $ctr" EXIT
buildah copy $ctr dev-requirements.txt .flake8 .
buildah copy $ctr aws-lambda-rie
buildah copy $ctr tests/ tests/
buildah run $ctr pip install -r dev-requirements.txt --target .
buildah run $ctr python -m flake8 app.py
buildah run $ctr python -m codespell_lib app.py
buildah run $ctr python -m pytest tests -c tests/pytest.ini --capture=tee-sys

View File

@ -9,13 +9,8 @@ from requests.packages.urllib3.util.retry import Retry
s = requests.Session()
retries = Retry(
total=3,
backoff_factor=1,
status_forcelist=[
502,
503,
504],
allowed_methods="POST")
total=3, backoff_factor=1, status_forcelist=[502, 503, 504], allowed_methods="POST"
)
s.mount("http://", HTTPAdapter(max_retries=retries))
@ -23,7 +18,7 @@ class Test:
@classmethod
def setup_class(cls):
cls.p = subprocess.Popen(
"./aws-lambda-rie python -m awslambdaric app.handler", shell=True
"aws-lambda-rie python -m awslambdaric app.handler", shell=True
)
@classmethod
@ -73,44 +68,6 @@ class Test:
)
self.send_event(event)
# RDS
def test_rds_event(self):
event = json.loads(
r''' {
"Records": [
{
"EventSource": "aws:sns",
"EventVersion": "1.0",
"EventSubscriptionArn": "arn:aws:sns:us-west-2:123456789012:AlertHub:63470449-620d-44ce-971f-ad9582804b13",
"Sns": {
"Type": "Notification",
"MessageId": "ef1f821c-a04f-5c5c-9dff-df498532069b",
"TopicArn": "arn:aws:sns:us-west-2:123456789012:AlertHub",
"Subject": "RDS Notification Message",
"Message": "{\"Event Source\":\"db-cluster-snapshot\",\"Event Time\":\"2023-08-15 07:03:24.491\",\"Identifier Link\":\"https://console.aws.amazon.com/rds/home?region=us-west-2#snapshot:engine=aurora;id=rds:projectdb-cluster-2023-08-15-07-03\",\"Source ID\":\"rds:projectdb-cluster-2023-08-15-07-03\",\"Source ARN\":\"arn:aws:rds:us-west-2:123456789012:cluster-snapshot:rds:projectdb-cluster-2023-08-15-07-03\",\"Event ID\":\"http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.Messages.html#RDS-EVENT-0168\",\"Event Message\":\"Creating automated cluster snapshot\",\"Tags\":{}}",
"Timestamp": "2023-08-15T07:03:25.289Z",
"SignatureVersion": "1",
"Signature": "mRtx+ddS1uzF3alGDWnDtUkAz+Gno8iuv0wPwkeBJPe1LAcKTXVteYhQdP2BB5ZunPlWXPSDsNtFl8Eh6v4/fcdukxH/czc6itqgGiciQ3DCICLvOJrvrVVgsVvHgOA/Euh8wryzxeQ3HJ/nmF9sg/PtuKyxvGxyO7NSFJrRKkqwkuG1Wr/8gcN3nrenqNTzKiC16kzVuKISWgXM1jqbsleQ4MyBcjq61LRwODKB8tc8vJ6PLGOs4Lrc3qeruCqF3Tzpl43680RsaRBBn1SLycwFVdB1kpHSXuk+YJQ6BS7s6rbMoyhPOpSCFHMZXC/eEb09wTzgpop0KDE/koiUsg==",
"SigningCertUrl": "https://sns.us-west-2.amazonaws.com/SimpleNotificationService-01d088a6f77103d0fe307c0069e40ed6.pem",
"UnsubscribeUrl": "https://sns.us-west-2.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-west-2:123456789012:AlertHub:63470449-620d-44ce-971f-ad9582804b13",
"MessageAttributes": {
"Resource": {
"Type": "String",
"Value": "arn:aws:rds:us-west-2:123456789012:cluster-snapshot:rds:projectdb-cluster-2023-08-15-07-03"
},
"EventID": {
"Type": "String",
"Value": "RDS-EVENT-0168"
}
}
}
}
]
}
'''
)
self.send_event(event)
def test_asg(self):
event = json.loads(
r' {"Records": [{"EventSource": "aws:sns", "EventVersion": "1.0", "EventSubscriptionArn": "arn:aws:sns:eu-central-1:123456789012:AlertHub:0e7ce1ba-c3e4-4264-bae1-4eb71c91235a", "Sns": {"Type": "Notification", "MessageId": "10ae86eb-9ddc-5c2f-806c-df6ecb6bde42", "TopicArn": "arn:aws:sns:eu-central-1:123456789012:AlertHub", "Subject": null, "Message": "{\"Origin\":\"AutoScalingGroup\",\"Destination\":\"EC2\",\"Progress\":50,\"AccountId\":\"123456789012\",\"Description\":\"Terminating EC2 instance: i-023ca42b188ffd91d\",\"RequestId\":\"1764cac3-224b-46bf-8bed-407a5b868e63\",\"EndTime\":\"2023-05-15T08:51:16.195Z\",\"AutoScalingGroupARN\":\"arn:aws:autoscaling:us-west-2:123456789012:autoScalingGroup:4a4fb6e3-22b4-487b-8335-3904f02ff9fd:autoScalingGroupName/powerbi\",\"ActivityId\":\"1764cac3-224b-46bf-8bed-407a5b868e63\",\"StartTime\":\"2023-05-15T08:50:14.145Z\",\"Service\":\"AWS Auto Scaling\",\"Time\":\"2023-05-15T08:51:16.195Z\",\"EC2InstanceId\":\"i-023ca42b188ffd91d\",\"StatusCode\":\"InProgress\",\"StatusMessage\":\"\",\"Details\":{\"Subnet ID\":\"subnet-fe2d6189\",\"Availability Zone\":\"us-west-2a\"},\"AutoScalingGroupName\":\"powerbi\",\"Cause\":\"At 2023-05-15T08:50:03Z the scheduled action end executed. Setting min size from 1 to 0. Setting desired capacity from 1 to 0. At 2023-05-15T08:50:03Z a scheduled action update of AutoScalingGroup constraints to min: 0, max: 1, desired: 0 changing the desired capacity from 1 to 0. At 2023-05-15T08:50:13Z an instance was taken out of service in response to a difference between desired and actual capacity, shrinking the capacity from 1 to 0. At 2023-05-15T08:50:14Z instance i-023ca42b188ffd91d was selected for termination.\",\"Event\":\"autoscaling:EC2_INSTANCE_TERMINATE\"}" }}]}'