Squashed '.ci/' changes from 5819ded..955afa7
955afa7 Apply pep8 git-subtree-dir: .ci git-subtree-split: 955afa71eec3533962eae428f46d5372a22ab85f
This commit is contained in:
parent
cb8ae01ca4
commit
785cddf02c
@ -1,25 +0,0 @@
|
|||||||
# ci-tools-lib
|
|
||||||
|
|
||||||
Various toolchain bits and pieces shared between projects
|
|
||||||
|
|
||||||
# Quickstart
|
|
||||||
Create top-level Makefile
|
|
||||||
```
|
|
||||||
REGISTRY := <your-registry>
|
|
||||||
IMAGE := <image_name>
|
|
||||||
REGION := <AWS region of your registry>
|
|
||||||
|
|
||||||
include .ci/podman.mk
|
|
||||||
```
|
|
||||||
|
|
||||||
Add subtree to your project:
|
|
||||||
```
|
|
||||||
git subtree add --prefix .ci https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## Jenkins
|
|
||||||
Shared groovy libraries
|
|
||||||
|
|
||||||
## Make
|
|
||||||
Common Makefile include
|
|
3
.flake8
3
.flake8
@ -1,3 +0,0 @@
|
|||||||
[flake8]
|
|
||||||
extend-ignore = E501
|
|
||||||
exclude = .git,__pycache__,build,dist,report
|
|
61
.gitignore
vendored
61
.gitignore
vendored
@ -1,61 +0,0 @@
|
|||||||
# Vim
|
|
||||||
*.swp
|
|
||||||
|
|
||||||
# Byte-compiled / optimized / DLL files
|
|
||||||
__pycache__/
|
|
||||||
*.py[cod]
|
|
||||||
*$py.class
|
|
||||||
|
|
||||||
# C extensions
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Distribution / packaging
|
|
||||||
.Python
|
|
||||||
env/
|
|
||||||
build/
|
|
||||||
develop-eggs/
|
|
||||||
dist/
|
|
||||||
downloads/
|
|
||||||
eggs/
|
|
||||||
.eggs/
|
|
||||||
lib64/
|
|
||||||
parts/
|
|
||||||
sdist/
|
|
||||||
var/
|
|
||||||
*.egg-info/
|
|
||||||
.installed.cfg
|
|
||||||
*.egg
|
|
||||||
|
|
||||||
# PyInstaller
|
|
||||||
# Usually these files are written by a python script from a template
|
|
||||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
||||||
*.manifest
|
|
||||||
*.spec
|
|
||||||
|
|
||||||
# Installer logs
|
|
||||||
pip-log.txt
|
|
||||||
pip-delete-this-directory.txt
|
|
||||||
|
|
||||||
# Unit test / coverage reports
|
|
||||||
htmlcov/
|
|
||||||
.tox/
|
|
||||||
.coverage
|
|
||||||
.coverage.*
|
|
||||||
.cache
|
|
||||||
.pytest*
|
|
||||||
nosetests.xml
|
|
||||||
coverage.xml
|
|
||||||
*,cover
|
|
||||||
.hypothesis/
|
|
||||||
reports/
|
|
||||||
|
|
||||||
# Translations
|
|
||||||
*.mo
|
|
||||||
*.pot
|
|
||||||
|
|
||||||
# dotenv
|
|
||||||
.env
|
|
||||||
|
|
||||||
# virtualenv
|
|
||||||
venv/
|
|
||||||
ENV/
|
|
53
Dockerfile
53
Dockerfile
@ -1,53 +0,0 @@
|
|||||||
# https://aws.amazon.com/blogs/aws/new-for-aws-lambda-container-image-support/
|
|
||||||
# libexec is missing from >=3.17
|
|
||||||
|
|
||||||
# Stage 1 - bundle base image + runtime
|
|
||||||
FROM python:3.9-alpine3.16 AS python-alpine
|
|
||||||
|
|
||||||
# Install GCC (Alpine uses musl but we compile and link dependencies with GCC)
|
|
||||||
RUN apk upgrade -U --available --no-cache && \
|
|
||||||
apk add --no-cache \
|
|
||||||
libstdc++
|
|
||||||
|
|
||||||
|
|
||||||
# Stage 2 - build function and dependencies
|
|
||||||
FROM python-alpine AS build-image
|
|
||||||
ARG TAG="latest"
|
|
||||||
|
|
||||||
# Install aws-lambda-cpp build dependencies
|
|
||||||
RUN apk upgrade -U --available --no-cache && \
|
|
||||||
apk add --no-cache \
|
|
||||||
build-base \
|
|
||||||
libtool \
|
|
||||||
autoconf \
|
|
||||||
automake \
|
|
||||||
make \
|
|
||||||
cmake \
|
|
||||||
libcurl \
|
|
||||||
libffi-dev \
|
|
||||||
libexecinfo-dev \
|
|
||||||
openssl-dev
|
|
||||||
# cargo
|
|
||||||
|
|
||||||
# Install requirements
|
|
||||||
COPY requirements.txt requirements.txt
|
|
||||||
RUN export MAKEFLAGS="-j$(nproc)" && \
|
|
||||||
pip install -r requirements.txt --target /app
|
|
||||||
|
|
||||||
# Install our app
|
|
||||||
COPY app.py /app
|
|
||||||
|
|
||||||
# Ser version to our TAG
|
|
||||||
RUN sed -i -e "s/^__version__ =.*/__version__ = \"${TAG}\"/" /app/app.py
|
|
||||||
|
|
||||||
# Stage 3 - final runtime image
|
|
||||||
FROM python-alpine
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
COPY --from=build-image /app /app
|
|
||||||
|
|
||||||
ENTRYPOINT [ "/usr/local/bin/python", "-m", "awslambdaric" ]
|
|
||||||
CMD [ "app.handler" ]
|
|
||||||
|
|
||||||
LABEL zero-downtime.net.image.maintainer="stefan@zero-downtime.net" \
|
|
||||||
zero-downtime.net.image.license="AGPLv3"
|
|
@ -1,26 +0,0 @@
|
|||||||
FROM setviacmdline:latest
|
|
||||||
|
|
||||||
# Install additional tools for tests
|
|
||||||
COPY dev-requirements.txt .flake8 .
|
|
||||||
RUN export MAKEFLAGS="-j$(nproc)" && \
|
|
||||||
pip install -r dev-requirements.txt
|
|
||||||
|
|
||||||
# Unit Tests / Static / Style etc.
|
|
||||||
COPY tests/ tests/
|
|
||||||
RUN flake8 app.py tests && \
|
|
||||||
codespell app.py tests
|
|
||||||
|
|
||||||
# Get aws-lambda run time emulator
|
|
||||||
ADD https://github.com/aws/aws-lambda-runtime-interface-emulator/releases/latest/download/aws-lambda-rie /usr/local/bin/aws-lambda-rie
|
|
||||||
RUN chmod 0755 /usr/local/bin/aws-lambda-rie && \
|
|
||||||
mkdir -p tests
|
|
||||||
|
|
||||||
# Install pytest
|
|
||||||
RUN pip install pytest --target /app
|
|
||||||
|
|
||||||
# Add our tests
|
|
||||||
ADD tests /app/tests
|
|
||||||
|
|
||||||
# Run tests
|
|
||||||
ENTRYPOINT []
|
|
||||||
CMD /usr/local/bin/python -m pytest tests -c tests/pytest.ini --capture=tee-sys
|
|
5
Jenkinsfile
vendored
5
Jenkinsfile
vendored
@ -1,5 +0,0 @@
|
|||||||
library identifier: 'zdt-lib@master', retriever: modernSCM(
|
|
||||||
[$class: 'GitSCMSource',
|
|
||||||
remote: 'https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git'])
|
|
||||||
|
|
||||||
buildPodman name: 'sns-alert-hub'
|
|
5
Makefile
5
Makefile
@ -1,5 +0,0 @@
|
|||||||
REGISTRY := public.ecr.aws/zero-downtime
|
|
||||||
IMAGE := sns-alert-hub
|
|
||||||
REGION := us-east-1
|
|
||||||
|
|
||||||
include .ci/podman.mk
|
|
31
README.md
31
README.md
@ -1,12 +1,25 @@
|
|||||||
# SNS Alert Hub
|
# ci-tools-lib
|
||||||
|
|
||||||
## Abstract
|
Various toolchain bits and pieces shared between projects
|
||||||
AWS SNS/Lambda central alert hub taking SNS messages, parsing and formatting them before sending them to any messaging service, like Slack, Matrix, etc
|
|
||||||
|
|
||||||
## Tests
|
# Quickstart
|
||||||
All env variables are forwarded into the test container.
|
Create top-level Makefile
|
||||||
Simply set WEBHOOK_URL accordingly before running `make test`.
|
```
|
||||||
|
REGISTRY := <your-registry>
|
||||||
|
IMAGE := <image_name>
|
||||||
|
REGION := <AWS region of your registry>
|
||||||
|
|
||||||
## Resources
|
include .ci/podman.mk
|
||||||
- https://gallery.ecr.aws/zero-downtime/sns-alert-hub
|
```
|
||||||
- https://github.com/caronc/apprise
|
|
||||||
|
Add subtree to your project:
|
||||||
|
```
|
||||||
|
git subtree add --prefix .ci https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Jenkins
|
||||||
|
Shared groovy libraries
|
||||||
|
|
||||||
|
## Make
|
||||||
|
Common Makefile include
|
||||||
|
139
SNSAlertHub.yaml
139
SNSAlertHub.yaml
@ -1,139 +0,0 @@
|
|||||||
AWSTemplateFormatVersion: "2010-09-09"
|
|
||||||
|
|
||||||
Description: "SNS Topic and tools to fan out alerts to email and or Slack"
|
|
||||||
|
|
||||||
Conditions:
|
|
||||||
|
|
||||||
IsSetEmail:
|
|
||||||
Fn::Not:
|
|
||||||
- Fn::Equals:
|
|
||||||
- Ref: AlertEmail
|
|
||||||
- ""
|
|
||||||
IsSetSlack:
|
|
||||||
Fn::Not:
|
|
||||||
- Fn::Equals:
|
|
||||||
- Ref: AlertSlackWebHook
|
|
||||||
- ""
|
|
||||||
|
|
||||||
Resources:
|
|
||||||
|
|
||||||
AlertHubTopic:
|
|
||||||
Type: AWS::SNS::Topic
|
|
||||||
Properties:
|
|
||||||
TopicName: AlertHub
|
|
||||||
|
|
||||||
# Email
|
|
||||||
EmailAlertsSubscription:
|
|
||||||
Type: AWS::SNS::Subscription
|
|
||||||
Condition: IsSetEmail
|
|
||||||
Properties:
|
|
||||||
Endpoint: { Ref: AlertEmail }
|
|
||||||
Protocol: email
|
|
||||||
TopicArn: { Ref: AlertHubTopic }
|
|
||||||
|
|
||||||
# Slack
|
|
||||||
SlackAlertsSubscription:
|
|
||||||
Type: AWS::SNS::Subscription
|
|
||||||
Condition: IsSetSlack
|
|
||||||
Properties:
|
|
||||||
Endpoint: {"Fn::GetAtt": ["SNSAlertHubFunction", "Arn"] }
|
|
||||||
Protocol: lambda
|
|
||||||
TopicArn: { Ref: AlertHubTopic }
|
|
||||||
|
|
||||||
IamRole:
|
|
||||||
Type: AWS::IAM::Role
|
|
||||||
Condition: IsSetSlack
|
|
||||||
Properties:
|
|
||||||
Policies:
|
|
||||||
- PolicyName: ResolveAccountAlias
|
|
||||||
PolicyDocument:
|
|
||||||
Version: '2012-10-17'
|
|
||||||
Statement:
|
|
||||||
- Effect: Allow
|
|
||||||
Action:
|
|
||||||
- iam:ListAccountAliases
|
|
||||||
Resource:
|
|
||||||
- "*"
|
|
||||||
|
|
||||||
- PolicyName: LogtoCloudwatchGroup
|
|
||||||
PolicyDocument:
|
|
||||||
Version: '2012-10-17'
|
|
||||||
Statement:
|
|
||||||
- Effect: Allow
|
|
||||||
Action:
|
|
||||||
- logs:CreateLogStream
|
|
||||||
- logs:PutLogEvents
|
|
||||||
Resource:
|
|
||||||
- Fn::Sub: "arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/SNSAlertHub:log-stream:*"
|
|
||||||
- Effect: Allow
|
|
||||||
Action:
|
|
||||||
- logs:CreateLogGroup
|
|
||||||
Resource:
|
|
||||||
- Fn::Sub: "arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/SNSAlertHub:*"
|
|
||||||
|
|
||||||
AssumeRolePolicyDocument:
|
|
||||||
Version: '2012-10-17'
|
|
||||||
Statement:
|
|
||||||
- Action:
|
|
||||||
- sts:AssumeRole
|
|
||||||
Effect: Allow
|
|
||||||
Principal:
|
|
||||||
Service: [ lambda.amazonaws.com ]
|
|
||||||
|
|
||||||
SNSAlertHubAllowed2Lambda:
|
|
||||||
Type: AWS::Lambda::Permission
|
|
||||||
Condition: IsSetSlack
|
|
||||||
Properties:
|
|
||||||
Action: lambda:InvokeFunction
|
|
||||||
Principal: sns.amazonaws.com
|
|
||||||
FunctionName: { Ref: SNSAlertHubFunction }
|
|
||||||
SourceArn: { Ref: AlertHubTopic }
|
|
||||||
|
|
||||||
SNSAlertHubFunction:
|
|
||||||
Type: AWS::Lambda::Function
|
|
||||||
Condition: IsSetSlack
|
|
||||||
Properties:
|
|
||||||
PackageType: Image
|
|
||||||
Code:
|
|
||||||
ImageUri: { "Fn::Sub": "${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${ImageTag}" }
|
|
||||||
Description: "Lambda function to forward alerts from SNS to Slack"
|
|
||||||
FunctionName: SNSAlertHub
|
|
||||||
MemorySize: 128
|
|
||||||
Role: { "Fn::GetAtt": ["IamRole", "Arn"] }
|
|
||||||
Timeout: 10
|
|
||||||
Environment:
|
|
||||||
Variables:
|
|
||||||
WEBHOOK_URL: { Ref: AlertSlackWebHook }
|
|
||||||
# DEBUG: "1"
|
|
||||||
|
|
||||||
Metadata:
|
|
||||||
Template:
|
|
||||||
Name: sns-alert-hub
|
|
||||||
Hash: 98fcf521f053f7412a90ce360ab62807
|
|
||||||
AwsCfnLib: v0.2.1
|
|
||||||
CloudBender:
|
|
||||||
Version: 0.9.9
|
|
||||||
|
|
||||||
Parameters:
|
|
||||||
|
|
||||||
ImageTag:
|
|
||||||
Type: String
|
|
||||||
Description: "(Optional) Overwrite default ImageTag"
|
|
||||||
Default: "sns-alert-hub:v0.5.8"
|
|
||||||
|
|
||||||
AlertEmail:
|
|
||||||
Type: String
|
|
||||||
Description: "(Optional) Email address to receive alerts via SMTP"
|
|
||||||
Default: ""
|
|
||||||
|
|
||||||
AlertSlackWebHook:
|
|
||||||
Type: String
|
|
||||||
Description: "(Optional) Encrypted (KMS Default key) Slack webhook to post alerts; deploys Slack Lambda function"
|
|
||||||
Default: ""
|
|
||||||
NoEcho: True
|
|
||||||
|
|
||||||
Outputs:
|
|
||||||
|
|
||||||
AlertHubTopic:
|
|
||||||
Value: { Ref: AlertHubTopic }
|
|
||||||
Description: ARN of the SNS AlertHub Topic
|
|
301
app.py
301
app.py
@ -1,301 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
import os
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import boto3
|
|
||||||
import dateutil.parser
|
|
||||||
import humanize
|
|
||||||
import urllib
|
|
||||||
|
|
||||||
import apprise
|
|
||||||
|
|
||||||
__author__ = "Stefan Reimer"
|
|
||||||
__author_email__ = "stefan@zero-downtime.net"
|
|
||||||
__version__ = "latest"
|
|
||||||
|
|
||||||
# Global alias lookup cache
|
|
||||||
account_aliases = {}
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
logging.getLogger("urllib3").setLevel(logging.WARNING)
|
|
||||||
logging.getLogger("boto3").setLevel(logging.WARNING)
|
|
||||||
logging.getLogger("botocore").setLevel(logging.WARNING)
|
|
||||||
|
|
||||||
|
|
||||||
def boolean(value):
|
|
||||||
if value in ("t", "T", "true", "True", "TRUE", "1", 1, True):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
DEBUG = boolean(os.getenv("DEBUG", default=False))
|
|
||||||
RESOLVE_ACCOUNT = boolean(os.getenv("RESOLVE_ACCOUNT", default=False))
|
|
||||||
WEBHOOK_URL = os.environ.get("WEBHOOK_URL", "dbus://")
|
|
||||||
|
|
||||||
if DEBUG:
|
|
||||||
logging.getLogger().setLevel(logging.DEBUG)
|
|
||||||
else:
|
|
||||||
logging.getLogger().setLevel(logging.INFO)
|
|
||||||
|
|
||||||
# Ensure slack URLs use ?blocks=yes
|
|
||||||
if "slack.com" in WEBHOOK_URL:
|
|
||||||
scheme, netloc, path, query_string, fragment = urllib.parse.urlsplit(
|
|
||||||
WEBHOOK_URL)
|
|
||||||
query_params = urllib.parse.parse_qs(query_string)
|
|
||||||
query_params["blocks"] = ["yes"]
|
|
||||||
new_query_string = urllib.parse.urlencode(query_params, doseq=True)
|
|
||||||
WEBHOOK_URL = urllib.parse.urlunsplit(
|
|
||||||
(scheme, netloc, path, new_query_string, fragment)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Setup apprise
|
|
||||||
asset = apprise.AppriseAsset()
|
|
||||||
|
|
||||||
# Set our app_id which is also used as User-Agent
|
|
||||||
asset.app_desc = "SNSAlertHub part of ZeroDownTime CloudBender"
|
|
||||||
asset.app_url = "https://zero-downtime.net"
|
|
||||||
asset.image_url_mask = (
|
|
||||||
"https://cdn.zero-downtime.net/assets/zdt/apprise/{TYPE}-{XY}{EXTENSION}"
|
|
||||||
)
|
|
||||||
asset.app_id = "{} / {} {}".format("cloudbender",
|
|
||||||
__version__, "zero-downtime.net")
|
|
||||||
|
|
||||||
apobj = apprise.Apprise(asset=asset)
|
|
||||||
apobj.add(WEBHOOK_URL)
|
|
||||||
|
|
||||||
|
|
||||||
def get_alias(account_id):
|
|
||||||
"""resolves aws account_id to account alias and caches for lifetime of lambda function"""
|
|
||||||
if RESOLVE_ACCOUNT:
|
|
||||||
try:
|
|
||||||
if account_id not in account_aliases:
|
|
||||||
iam = boto3.client("iam")
|
|
||||||
account_aliases[account_id] = iam.list_account_aliases()[
|
|
||||||
"AccountAliases"
|
|
||||||
][0]
|
|
||||||
|
|
||||||
return account_aliases[account_id]
|
|
||||||
|
|
||||||
except (KeyError, IndexError):
|
|
||||||
logger.warning("Could not resolve IAM account alias")
|
|
||||||
pass
|
|
||||||
|
|
||||||
return account_id
|
|
||||||
|
|
||||||
|
|
||||||
def handler(event, context):
|
|
||||||
logger.debug(json.dumps({"aws.event": event})) # sort_keys=True, indent=4
|
|
||||||
|
|
||||||
(region, account_id) = context.invoked_function_arn.split(":")[3:5]
|
|
||||||
|
|
||||||
sns = event["Records"][0]["Sns"]
|
|
||||||
|
|
||||||
# Guess what we have, try to parse as json first
|
|
||||||
try:
|
|
||||||
msg = json.loads(sns["Message"])
|
|
||||||
|
|
||||||
except json.decoder.JSONDecodeError:
|
|
||||||
msg = {}
|
|
||||||
pass
|
|
||||||
|
|
||||||
body = ""
|
|
||||||
title = ""
|
|
||||||
msg_type = apprise.NotifyType.INFO
|
|
||||||
|
|
||||||
# CloudWatch Alarm ?
|
|
||||||
if "AlarmName" in msg:
|
|
||||||
title = "AWS Cloudwatch Alarm"
|
|
||||||
|
|
||||||
# Discard NewStateValue == OK && OldStateValue == INSUFFICIENT_DATA as these are triggered by installing new Alarms and only cause confusion
|
|
||||||
if msg["NewStateValue"] == "OK" and msg["OldStateValue"] == "INSUFFICIENT_DATA":
|
|
||||||
logger.info(
|
|
||||||
"Discarding Cloudwatch Metrics Alarm as state is OK and previous state was insufficient data, most likely new alarm being installed"
|
|
||||||
)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
body = msg["AlarmDescription"]
|
|
||||||
|
|
||||||
msg_context = "{account} - {region} -> <https://{region}.console.aws.amazon.com/cloudwatch/home?region={region}#alarmsV2:alarm/{alarm}|Alarm Details>".format(
|
|
||||||
region=region,
|
|
||||||
alarm=msg["AlarmName"],
|
|
||||||
account=get_alias(msg["AWSAccountId"]),
|
|
||||||
)
|
|
||||||
|
|
||||||
msg_type = apprise.NotifyType.WARNING
|
|
||||||
try:
|
|
||||||
notify_map = {
|
|
||||||
"ok": apprise.NotifyType.SUCCESS,
|
|
||||||
"alarm": apprise.NotifyType.FAILURE,
|
|
||||||
"insuffcient_data": apprise.NotifyType.INFO,
|
|
||||||
}
|
|
||||||
msg_type = notify_map[msg["NewStateValue"].lower()]
|
|
||||||
|
|
||||||
# Reduce severtity for CPUCredit Alarms to Warning
|
|
||||||
if msg_type == apprise.NotifyType.FAILURE:
|
|
||||||
if msg["Trigger"]["MetricName"] == "CPUSurplusCreditBalance":
|
|
||||||
msg_type = apprise.NotifyType.WARNING
|
|
||||||
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
body = body + "\n\n_{}_".format(msg_context)
|
|
||||||
|
|
||||||
elif "Source" in msg and msg["Source"] == "CloudBender":
|
|
||||||
title = "AWS EC2 - CloudBender"
|
|
||||||
|
|
||||||
try:
|
|
||||||
msg_context = "{account} - {region} - {host} ({instance}) <https://{region}.console.aws.amazon.com/ec2/home?region={region}#AutoScalingGroupDetails:id={asg};view=activity|{artifact} ASG>".format(
|
|
||||||
account=get_alias(msg["AWSAccountId"]),
|
|
||||||
region=msg["Region"],
|
|
||||||
asg=msg["Asg"],
|
|
||||||
instance=msg["Instance"],
|
|
||||||
host=msg["Hostname"],
|
|
||||||
artifact=msg["Artifact"],
|
|
||||||
)
|
|
||||||
except KeyError:
|
|
||||||
msg_context = "{account} - {region}".format(
|
|
||||||
account=get_alias(msg["AWSAccountId"]), region=msg["Region"]
|
|
||||||
)
|
|
||||||
|
|
||||||
msg_type = apprise.NotifyType.WARNING
|
|
||||||
try:
|
|
||||||
notify_map = {
|
|
||||||
"warning": apprise.NotifyType.WARNING,
|
|
||||||
"error": apprise.NotifyType.FAILURE,
|
|
||||||
"info": apprise.NotifyType.INFO,
|
|
||||||
"success": apprise.NotifyType.SUCCESS,
|
|
||||||
}
|
|
||||||
msg_type = notify_map[msg["Level"].lower()]
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if "Subject" in msg and msg["Subject"]:
|
|
||||||
title = msg["Subject"]
|
|
||||||
|
|
||||||
body = ""
|
|
||||||
if "Message" in msg and msg["Message"]:
|
|
||||||
body = msg["Message"]
|
|
||||||
|
|
||||||
if "Attachment" in msg and msg["Attachment"]:
|
|
||||||
body = body + "\n```{}```".format(msg["Attachment"])
|
|
||||||
|
|
||||||
body = body + "\n\n_{}_".format(msg_context)
|
|
||||||
|
|
||||||
elif "receiver" in msg and msg["receiver"] == "alerthub-notifications":
|
|
||||||
|
|
||||||
for alert in msg["alerts"]:
|
|
||||||
|
|
||||||
# First msg_type
|
|
||||||
msg_type = apprise.NotifyType.WARNING
|
|
||||||
try:
|
|
||||||
if alert["status"] == "resolved":
|
|
||||||
msg_type = apprise.NotifyType.SUCCESS
|
|
||||||
else:
|
|
||||||
if alert["labels"]["severity"] == "critical":
|
|
||||||
msg_type = apprise.NotifyType.FAILURE
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# set title to Alertname
|
|
||||||
try:
|
|
||||||
title = alert["labels"]["alertname"]
|
|
||||||
except KeyError:
|
|
||||||
title = "Alertmanager"
|
|
||||||
|
|
||||||
# assemble message body
|
|
||||||
try:
|
|
||||||
body = "{}\n{}".format(
|
|
||||||
alert["annotations"]["summary"], alert["annotations"]["description"]
|
|
||||||
)
|
|
||||||
|
|
||||||
if alert["status"] == "resolved":
|
|
||||||
body = body + "\nDuration: {}".format(
|
|
||||||
humanize.time.precisedelta(
|
|
||||||
dateutil.parser.parse(alert["startsAt"])
|
|
||||||
- dateutil.parser.parse(alert["endsAt"])
|
|
||||||
)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
if "runbook_url" in alert["annotations"]:
|
|
||||||
body = body + " <{}|Runbook>".format(
|
|
||||||
alert["annotations"]["runbook_url"]
|
|
||||||
)
|
|
||||||
if "generatorURL" in alert["annotations"]:
|
|
||||||
body = body + " <{}|Source>".format(
|
|
||||||
alert["annotations"]["generatorURL"]
|
|
||||||
)
|
|
||||||
except KeyError:
|
|
||||||
body = "Unknown Alert:\n{}".format(alert)
|
|
||||||
|
|
||||||
try:
|
|
||||||
msg_context = "{account} - {region} - <{alert_manager_link}/#/alerts?receiver=alerthub-notifications|{cluster}>".format(
|
|
||||||
cluster=alert["labels"]["clusterName"],
|
|
||||||
region=alert["labels"]["awsRegion"],
|
|
||||||
account=get_alias(alert["labels"]["awsAccount"]),
|
|
||||||
alert_manager_link=msg["externalURL"],
|
|
||||||
)
|
|
||||||
body = body + "\n\n_{}_".format(msg_context)
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# ElasticCache snapshot notifications
|
|
||||||
elif "ElastiCache:SnapshotComplete" in msg:
|
|
||||||
title = "ElastiCache Snapshot complete."
|
|
||||||
body = "Snapshot taken on {}".format(
|
|
||||||
msg["ElastiCache:SnapshotComplete"])
|
|
||||||
|
|
||||||
# ElasticCache replacement notifications
|
|
||||||
elif "ElastiCache:NodeReplacementScheduled" in msg:
|
|
||||||
title = "ElastiCache node replacement scheduled"
|
|
||||||
body = "{} will be replaced between {} and {}".format(
|
|
||||||
msg["ElastiCache:NodeReplacementScheduled"], msg["Start Time"], msg["End Time"])
|
|
||||||
|
|
||||||
# ElasticCache replacement notifications
|
|
||||||
elif "ElastiCache:CacheNodeReplaceStarted" in msg:
|
|
||||||
title = "ElastiCache fail over stareted"
|
|
||||||
body = "for node {}".format(msg["ElastiCache:CacheNodeReplaceStarted"])
|
|
||||||
|
|
||||||
# ElasticCache replacement notifications
|
|
||||||
elif "ElastiCache:FailoverComplete" in msg:
|
|
||||||
title = "ElastiCache fail over complete"
|
|
||||||
body = "for node {}".format(msg["ElastiCache:FailoverComplete"])
|
|
||||||
|
|
||||||
# known RDS events
|
|
||||||
elif "Event Source" in msg and msg['Event Source'] in ["db-instance", "db-cluster-snapshot", "db-snapshot"]:
|
|
||||||
try:
|
|
||||||
title = msg["Event Message"]
|
|
||||||
try:
|
|
||||||
name = " ({}).".format(
|
|
||||||
msg["Tags"]["Name"])
|
|
||||||
except (KeyError, IndexError):
|
|
||||||
name = ""
|
|
||||||
|
|
||||||
body = "RDS {}: <{}|{}>{}\n<{}|Event docs>".format(msg["Event Source"].replace("db-", ""),
|
|
||||||
msg["Identifier Link"], msg["Source ID"], name, msg["Event ID"])
|
|
||||||
|
|
||||||
except KeyError:
|
|
||||||
msg_type = apprise.NotifyType.WARNING
|
|
||||||
body = sns["Message"]
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Basic ASG events
|
|
||||||
elif "Event" in msg and msg["Event"] in ["autoscaling:EC2_INSTANCE_TERMINATE", "autoscaling:EC2_INSTANCE_LAUNCH"]:
|
|
||||||
title = msg["Description"]
|
|
||||||
body = msg["Cause"]
|
|
||||||
|
|
||||||
try:
|
|
||||||
msg_context = "{account} - {region} - <https://{region}.console.aws.amazon.com/ec2/home?region={region}#AutoScalingGroupDetails:id={asg};view=activity|{asg} ASG>".format(
|
|
||||||
region=region,
|
|
||||||
account=get_alias(msg["AccountId"]),
|
|
||||||
asg=msg["AutoScalingGroupName"],
|
|
||||||
)
|
|
||||||
body = body + "\n\n_{}_".format(msg_context)
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
else:
|
|
||||||
title = "Unknown message type"
|
|
||||||
msg_type = apprise.NotifyType.WARNING
|
|
||||||
body = sns["Message"]
|
|
||||||
|
|
||||||
apobj.notify(body=body, title=title, notify_type=msg_type)
|
|
@ -1,3 +0,0 @@
|
|||||||
pytest
|
|
||||||
flake8
|
|
||||||
codespell
|
|
@ -3,7 +3,8 @@
|
|||||||
import argparse
|
import argparse
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Implement basic public ECR lifecycle policy')
|
parser = argparse.ArgumentParser(
|
||||||
|
description='Implement basic public ECR lifecycle policy')
|
||||||
parser.add_argument('--repo', dest='repositoryName', action='store', required=True,
|
parser.add_argument('--repo', dest='repositoryName', action='store', required=True,
|
||||||
help='Name of the public ECR repository')
|
help='Name of the public ECR repository')
|
||||||
parser.add_argument('--keep', dest='keep', action='store', default=10, type=int,
|
parser.add_argument('--keep', dest='keep', action='store', default=10, type=int,
|
||||||
@ -15,11 +16,12 @@ args = parser.parse_args()
|
|||||||
|
|
||||||
client = boto3.client('ecr-public', region_name='us-east-1')
|
client = boto3.client('ecr-public', region_name='us-east-1')
|
||||||
|
|
||||||
images = client.describe_images(repositoryName=args.repositoryName)["imageDetails"]
|
images = client.describe_images(repositoryName=args.repositoryName)[
|
||||||
|
"imageDetails"]
|
||||||
|
|
||||||
untagged = []
|
untagged = []
|
||||||
kept = 0
|
kept = 0
|
||||||
|
|
||||||
# actual Image
|
# actual Image
|
||||||
# imageManifestMediaType: 'application/vnd.oci.image.manifest.v1+json'
|
# imageManifestMediaType: 'application/vnd.oci.image.manifest.v1+json'
|
||||||
# image Index
|
# image Index
|
||||||
@ -31,30 +33,31 @@ for image in sorted(images, key=lambda d: d['imagePushedAt'], reverse=True):
|
|||||||
# if registry uses image index all actual images will be untagged anyways
|
# if registry uses image index all actual images will be untagged anyways
|
||||||
if 'imageTags' not in image:
|
if 'imageTags' not in image:
|
||||||
untagged.append({"imageDigest": image['imageDigest']})
|
untagged.append({"imageDigest": image['imageDigest']})
|
||||||
#print("Delete untagged image {}".format(image["imageDigest"]))
|
# print("Delete untagged image {}".format(image["imageDigest"]))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# check for dev tags
|
# check for dev tags
|
||||||
if args.delete_dev:
|
if args.delete_dev:
|
||||||
_delete=True
|
_delete = True
|
||||||
for tag in image["imageTags"]:
|
for tag in image["imageTags"]:
|
||||||
# Look for at least one tag NOT beign a SemVer dev tag
|
# Look for at least one tag NOT beign a SemVer dev tag
|
||||||
if "-" not in tag:
|
if "-" not in tag:
|
||||||
_delete=False
|
_delete = False
|
||||||
if _delete:
|
if _delete:
|
||||||
print("Deleting development image {}".format(image["imageTags"]))
|
print("Deleting development image {}".format(image["imageTags"]))
|
||||||
untagged.append({"imageDigest": image['imageDigest']})
|
untagged.append({"imageDigest": image['imageDigest']})
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if kept < args.keep:
|
if kept < args.keep:
|
||||||
kept=kept+1
|
kept = kept+1
|
||||||
print("Keeping tagged image {}".format(image["imageTags"]))
|
print("Keeping tagged image {}".format(image["imageTags"]))
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
print("Deleting tagged image {}".format(image["imageTags"]))
|
print("Deleting tagged image {}".format(image["imageTags"]))
|
||||||
untagged.append({"imageDigest": image['imageDigest']})
|
untagged.append({"imageDigest": image['imageDigest']})
|
||||||
|
|
||||||
deleted_images = client.batch_delete_image(repositoryName=args.repositoryName, imageIds=untagged)
|
deleted_images = client.batch_delete_image(
|
||||||
|
repositoryName=args.repositoryName, imageIds=untagged)
|
||||||
|
|
||||||
if deleted_images["imageIds"]:
|
if deleted_images["imageIds"]:
|
||||||
print("Deleted images: {}".format(deleted_images["imageIds"]))
|
print("Deleted images: {}".format(deleted_images["imageIds"]))
|
||||||
|
@ -1,9 +0,0 @@
|
|||||||
{
|
|
||||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
|
||||||
"extends": [
|
|
||||||
"config:recommended",
|
|
||||||
":label(renovate)",
|
|
||||||
":semanticCommits"
|
|
||||||
],
|
|
||||||
"prHourlyLimit": 0
|
|
||||||
}
|
|
@ -1,4 +0,0 @@
|
|||||||
boto3==1.26.154
|
|
||||||
apprise==1.4.0
|
|
||||||
humanize==4.6.0
|
|
||||||
awslambdaric==2.0.4
|
|
@ -1,2 +0,0 @@
|
|||||||
[pytest]
|
|
||||||
addopts = --color=no
|
|
@ -1,82 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
import json
|
|
||||||
import subprocess
|
|
||||||
import requests
|
|
||||||
|
|
||||||
from requests.adapters import HTTPAdapter
|
|
||||||
from requests.packages.urllib3.util.retry import Retry
|
|
||||||
|
|
||||||
|
|
||||||
s = requests.Session()
|
|
||||||
retries = Retry(
|
|
||||||
total=3, backoff_factor=1, status_forcelist=[502, 503, 504], allowed_methods="POST"
|
|
||||||
)
|
|
||||||
s.mount("http://", HTTPAdapter(max_retries=retries))
|
|
||||||
|
|
||||||
|
|
||||||
class Test:
|
|
||||||
@classmethod
|
|
||||||
def setup_class(cls):
|
|
||||||
cls.p = subprocess.Popen(
|
|
||||||
"aws-lambda-rie python -m awslambdaric app.handler", shell=True
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def teardown_class(cls):
|
|
||||||
cls.p.terminate()
|
|
||||||
|
|
||||||
def send_event(self, event):
|
|
||||||
self.p.poll()
|
|
||||||
assert self.p.returncode is None, "aws-lambda-rie not running"
|
|
||||||
s.post(
|
|
||||||
"http://localhost:8080/2015-03-31/functions/function/invocations",
|
|
||||||
json=event,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Cloudwatch Alarm
|
|
||||||
def test_cloudwatch_alarm(self):
|
|
||||||
event = json.loads(
|
|
||||||
r' { "Records": [ { "EventSource": "aws:sns", "EventVersion": "1.0", "EventSubscriptionArn": "arn:aws:sns:us-west-2:123456789012:AlertHub:626fe212-7605-47f5-85ed-d77e68f448b9", "Sns": { "Type": "Notification", "MessageId": "a45aa76b-1a4e-55fc-9c26-fdfa56068892", "TopicArn": "arn:aws:sns:us-west-2:123456789012:AlertHub", "Subject": "ALARM: \"example-bastion-CPUCreditsAlarm-18414G67CHF2I\" in US West (Oregon)", "Message": "{\"AlarmName\":\"example-bastion-CPUCreditsAlarm-18414G67CHF2I\",\"AlarmDescription\":\"At least one instance/member of example-bastion.Asg in us-west-2 starts to consume surplus CPUCredits. Additional charges might incur after 24h! <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances-monitoring-cpu-credits.html|(AWS Docs)>\",\"AWSAccountId\":\"123456789012\",\"AlarmConfigurationUpdatedTimestamp\":\"2022-02-24T09:03:10.154+0000\",\"NewStateValue\":\"ALARM\",\"NewStateReason\":\"Threshold Crossed: 1 out of the last 1 datapoints [0.0 (24/02/22 08:59:00)] was less than the threshold (5.0) (minimum 1 datapoint for OK -> ALARM transition).\",\"StateChangeTime\":\"2022-02-24T09:04:08.879+0000\",\"Region\":\"US West (Oregon)\",\"AlarmArn\":\"arn:aws:cloudwatch:us-west-2:123456789012:alarm:example-bastion-CPUCreditsAlarm-18414G67CHF2I\",\"OldStateValue\":\"OK\",\"Trigger\":{\"MetricName\":\"CPUSurplusCreditBalance\",\"Namespace\":\"AWS/EC2\",\"StatisticType\":\"Statistic\",\"Statistic\":\"SUM\",\"Unit\":null,\"Dimensions\":[{\"value\":\"example-bastion-BastionAsg-1K4RJ9QCQF2P1\",\"name\":\"AutoScalingGroupName\"}],\"Period\":300,\"EvaluationPeriods\":1,\"DatapointsToAlarm\":1,\"ComparisonOperator\":\"LessThanThreshold\",\"Threshold\":5.0,\"TreatMissingData\":\"notBreaching\",\"EvaluateLowSampleCountPercentile\":\"\"}}", "Timestamp": "2022-02-24T09:04:08.941Z", "SignatureVersion": "1", "Signature": "eESZxAoYVNuaNd6yxFqrIRTi7vRR9+JFUiTkifp6GmWPr+TwwS85y3kBZqsQXsN9935cIC+S0POdrF2s0raVcBcZ+Ggzi6SE3PAcd+uZfV5zgCOHZGA5AABCsa5fHmmJvqNgDoJQFXTcDriBBoNCbtHhbPPN4O9W1uXFmg+jYNox3kWycgGDvsbsrS4kA7sPwqxJAtQSkY72jvCAnCa9M66hIgX3qR/RcUSSv9BQrELOpbuVAQ40l0LSwB8n2rPeNDNc95YbRLXSRCAAKM2Gd3+l0G4mQkoTozoPA752mIef5tz3bp8MdM4lKG4pjUuPRDKj6N+yuwmTiyq0mYDvSw==", "SigningCertUrl": "https://sns.us-west-2.amazonaws.com/SimpleNotificationService-7ff5318490ec183fbaddaa2a969abfda.pem", "UnsubscribeUrl": "https://sns.us-west-2.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-west-2:123456789012:AlertHub:626fe212-7605-47f5-85ed-d77e68f448b9", "MessageAttributes": {} } } ] }'
|
|
||||||
)
|
|
||||||
self.send_event(event)
|
|
||||||
|
|
||||||
# CloudBender launch event
|
|
||||||
def test_cloudbender(self):
|
|
||||||
event = json.loads(
|
|
||||||
r' { "Records": [ { "EventSource": "aws:sns", "EventVersion": "1.0", "EventSubscriptionArn": "arn:aws:sns:eu-central-1:123456789012:AlertHub:0c04d2e7-32ec-4933-b913-84c7172e6d90", "Sns": { "Type": "Notification", "MessageId": "25bb2fd0-3221-5c07-aea1-76acf75017c3", "TopicArn": "arn:aws:sns:eu-central-1:123456789012:AlertHub", "Subject": null, "Message": "{\n \"Source\": \"CloudBender\",\n \"AWSAccountId\": \"123456789012\",\n \"Region\": \"eu-central-1\",\n \"Artifact\": \"bastion\",\n \"Hostname\": \"bastion\",\n \"Asg\": \"zdt-bastion-BastionAsg-UABA2PIZV4TI\",\n \"Instance\": \"i-0d9f08855cdfcd740\",\n \"Subject\": \"Test\",\n \"Level\": \"Info\",\n \"Message\": \"Hello\",\n \"Attachment\": \"\",\n \"Emoji\": \"\"\n}\n", "Timestamp": "2021-07-29T15:03:13.318Z", "SignatureVersion": "1", "Signature": "OhLoCz8RWazyZ+ZHK03QLby6M3jmdtZvLWoNFygAHIaljQ0ZHsd6mc4TskDTnqpUpCtd/iIl+TLIPN8hYyflbLk2/cN3LXXWcSQ0GWqQ/bWIEhej54oCmUgZjIzrVfRlgz7mlUkhRnjQoRWYpcXRycQczMWuF2DCeIDP6v3ON53BxR8NdCeQXiquwoFlHaAaZIviRoUMqwp2Cl1T0NaBLeL9zmsdPvJF6EaXRbu3rqC1hdrA6E+nV2lzYNKg09POxA9JVpURmMEq3AC4tXm1Gu73PWQgWgoDSOQx+SOjMrbMeCAqf5R6typBV1BRDsGPkNkt9n4oto0FR9iyDmuWog==", "SigningCertUrl": "https://sns.eu-central-1.amazonaws.com/SimpleNotificationService-010a507c1833636cd94bdb98bd93083a.pem", "UnsubscribeUrl": "https://sns.eu-central-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:eu-central-1:123456789012:AlertHub:0c04d2e7-32ec-4933-b913-84c7172e6d90", "MessageAttributes": {} } } ] }'
|
|
||||||
)
|
|
||||||
self.send_event(event)
|
|
||||||
|
|
||||||
# Alertmanager
|
|
||||||
def test_alertmanager(self):
|
|
||||||
event = json.loads(
|
|
||||||
r' { "Records": [ { "EventSource": "aws:sns", "EventVersion": "1.0", "EventSubscriptionArn": "arn:aws:sns:eu-central-1:123456789012:AlertHub:0e7ce1ba-c3e4-4264-bae1-4eb71c91235a", "Sns": { "Type": "Notification", "MessageId": "10ae86eb-9ddc-5c2f-806c-df6ecb6bde42", "TopicArn": "arn:aws:sns:eu-central-1:123456789012:AlertHub", "Subject": null, "Message": "{\"receiver\":\"alerthub-notifications\",\"status\":\"firing\",\"alerts\":[{\"status\":\"firing\",\"labels\":{\"alertname\":\"KubeVersionMismatch\",\"awsRegion\":\"eu-central-1\",\"clusterName\":\"test\",\"prometheus\":\"monitoring/metrics-kube-prometheus-st-prometheus\",\"severity\":\"warning\"},\"annotations\":{\"description\":\"There are 2 different semantic versions of Kubernetes components running.\",\"runbook_url\":\"https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeversionmismatch\",\"summary\":\"Different semantic versions of Kubernetes components running.\"},\"startsAt\":\"2021-08-04T13:17:40.31Z\",\"endsAt\":\"0001-01-01T00:00:00Z\",\"generatorURL\":\"https://prometheus/graph?g0.expr=count%28count+by%28git_version%29+%28label_replace%28kubernetes_build_info%7Bjob%21~%22kube-dns%7Ccoredns%22%7D%2C+%22git_version%22%2C+%22%241%22%2C+%22git_version%22%2C+%22%28v%5B0-9%5D%2A.%5B0-9%5D%2A%29.%2A%22%29%29%29+%3E+1\\u0026g0.tab=1\",\"fingerprint\":\"5f94d4a22730c666\"}],\"groupLabels\":{},\"commonLabels\":{\"alertname\":\"KubeVersionMismatch\",\"awsRegion\":\"eu-central-1\",\"clusterName\":\"test\",\"prometheus\":\"monitoring/metrics-kube-prometheus-st-prometheus\",\"severity\":\"warning\"},\"commonAnnotations\":{\"description\":\"There are 2 different semantic versions of Kubernetes components running.\",\"runbook_url\":\"https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeversionmismatch\",\"summary\":\"Different semantic versions of Kubernetes components running.\"},\"externalURL\":\"https://alertmanager\",\"version\":\"4\",\"groupKey\":\"{}:{}\",\"truncatedAlerts\":0}\n", "Timestamp": "2021-08-05T03:01:11.233Z", "SignatureVersion": "1", "Signature": "pSUYO7LDIfzCbBrp/S2HXV3/yzls3vfYy+2di6HsKG8Mf+CV97RLnen15ieAo3eKA8IfviZIzyREasbF0cwfUeruHPbW1B8kO572fDyV206zmUxvR63r6oM6OyLv9XKBmvyYHKawkOgHZHEMP3v1wMIIHK2W5KbJtXoUcks5DVamooVb9iFF58uqTf+Ccy31bOL4tFyMR9nr8NU55vEIlGEVno8A9Q21TujdZTg0V0BmRgPafcic96udWungjmfhZ005378N32u2hlLj6BRneTpHHSXHBw4wKZreKpX+INZwiZ4P8hzVfgRvAIh/4gXN9+0UJSHgnsaqUcLDNoLZTQ==", "SigningCertUrl": "https://sns.eu-central-1.amazonaws.com/SimpleNotificationService-010a507c1833636cd94bdb98bd93083a.pem", "UnsubscribeUrl": "https://sns.eu-central-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:eu-central-1:123456789012:AlertHub:0e7ce1ba-c3e4-4264-bae1-4eb71c91235a", "MessageAttributes": {} } } ] }'
|
|
||||||
)
|
|
||||||
self.send_event(event)
|
|
||||||
|
|
||||||
# Alertmanager Multiple
|
|
||||||
def test_alertmanager_multi(self):
|
|
||||||
event = json.loads(
|
|
||||||
r' { "Records": [ { "EventSource": "aws:sns", "EventVersion": "1.0", "EventSubscriptionArn": "arn:aws:sns:eu-central-1:123456789012:AlertHub:0e7ce1ba-c3e4-4264-bae1-4eb71c91235a", "Sns": { "Type": "Notification", "MessageId": "10ae86eb-9ddc-5c2f-806c-df6ecb6bde42", "TopicArn": "arn:aws:sns:eu-central-1:123456789012:AlertHub", "Subject": null, "Message": "{\"receiver\":\"alerthub-notifications\",\"status\":\"resolved\",\"alerts\":[{\"status\":\"resolved\",\"labels\":{\"alertname\":\"KubeDeploymentReplicasMismatch\",\"awsAccount\":\"123456789012\",\"awsRegion\":\"us-west-2\",\"clusterName\":\"test-cluster\",\"container\":\"kube-state-metrics\",\"deployment\":\"example-job\",\"endpoint\":\"http\",\"instance\":\"10.244.202.71:8080\",\"job\":\"kube-state-metrics\",\"namespace\":\"default\",\"pod\":\"metrics-kube-state-metrics-56546f44c7-h57jx\",\"prometheus\":\"monitoring/metrics-kube-prometheus-st-prometheus\",\"service\":\"metrics-kube-state-metrics\",\"severity\":\"warning\"},\"annotations\":{\"description\":\"Deployment default/example-job has not matched the expected number of replicas for longer than 15 minutes.\",\"runbook_url\":\"https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedeploymentreplicasmismatch\",\"summary\":\"Deployment has not matched the expected number of replicas.\"},\"startsAt\":\"2021-09-29T12:36:11.394Z\",\"endsAt\":\"2021-09-29T14:51:11.394Z\",\"generatorURL\":\"https://prometheus.dev.example.com/graph?g0.expr=%28kube_deployment_spec_replicas%7Bjob%3D%22kube-state-metrics%22%7D+%3E+kube_deployment_status_replicas_available%7Bjob%3D%22kube-state-metrics%22%7D%29+and+%28changes%28kube_deployment_status_replicas_updated%7Bjob%3D%22kube-state-metrics%22%7D%5B10m%5D%29+%3D%3D+0%29\\u0026g0.tab=1\",\"fingerprint\":\"59ad2f1a4567b43b\"},{\"status\":\"firing\",\"labels\":{\"alertname\":\"KubeVersionMismatch\",\"awsRegion\":\"eu-central-1\",\"clusterName\":\"test\",\"prometheus\":\"monitoring/metrics-kube-prometheus-st-prometheus\",\"severity\":\"warning\"},\"annotations\":{\"description\":\"There are 2 different semantic versions of Kubernetes components running.\",\"runbook_url\":\"https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeversionmismatch\",\"summary\":\"Different semantic versions of Kubernetes components running.\"},\"startsAt\":\"2021-08-04T13:17:40.31Z\",\"endsAt\":\"0001-01-01T00:00:00Z\",\"generatorURL\":\"https://prometheus/graph?g0.expr=count%28count+by%28git_version%29+%28label_replace%28kubernetes_build_info%7Bjob%21~%22kube-dns%7Ccoredns%22%7D%2C+%22git_version%22%2C+%22%241%22%2C+%22git_version%22%2C+%22%28v%5B0-9%5D%2A.%5B0-9%5D%2A%29.%2A%22%29%29%29+%3E+1\\u0026g0.tab=1\",\"fingerprint\":\"5f94d4a22730c666\"}],\"groupLabels\":{\"job\":\"kube-state-metrics\"},\"commonLabels\":{\"alertname\":\"KubeDeploymentReplicasMismatch\",\"awsAccount\":\"123456789012\",\"awsRegion\":\"us-west-2\",\"clusterName\":\"test-cluster\",\"container\":\"kube-state-metrics\",\"deployment\":\"example-job\",\"endpoint\":\"http\",\"instance\":\"10.244.202.71:8080\",\"job\":\"kube-state-metrics\",\"namespace\":\"default\",\"pod\":\"metrics-kube-state-metrics-56546f44c7-h57jx\",\"prometheus\":\"monitoring/metrics-kube-prometheus-st-prometheus\",\"service\":\"metrics-kube-state-metrics\",\"severity\":\"warning\"},\"commonAnnotations\":{\"description\":\"Deployment default/example-job has not matched the expected number of replicas for longer than 15 minutes.\",\"runbook_url\":\"https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedeploymentreplicasmismatch\",\"summary\":\"Deployment has not matched the expected number of replicas.\"},\"externalURL\":\"https://alertmanager.dev.example.com\",\"version\":\"4\",\"groupKey\":\"{}:{job=\\\"kube-state-metrics\\\"}\",\"truncatedAlerts\":0}\n", "Timestamp": "2021-08-05T03:01:11.233Z", "SignatureVersion": "1", "Signature": "pSUYO7LDIfzCbBrp/S2HXV3/yzls3vfYy+2di6HsKG8Mf+CV97RLnen15ieAo3eKA8IfviZIzyREasbF0cwfUeruHPbW1B8kO572fDyV206zmUxvR63r6oM6OyLv9XKBmvyYHKawkOgHZHEMP3v1wMIIHK2W5KbJtXoUcks5DVamooVb9iFF58uqTf+Ccy31bOL4tFyMR9nr8NU55vEIlGEVno8A9Q21TujdZTg0V0BmRgPafcic96udWungjmfhZ005378N32u2hlLj6BRneTpHHSXHBw4wKZreKpX+INZwiZ4P8hzVfgRvAIh/4gXN9+0UJSHgnsaqUcLDNoLZTQ==", "SigningCertUrl": "https://sns.eu-central-1.amazonaws.com/SimpleNotificationService-010a507c1833636cd94bdb98bd93083a.pem", "UnsubscribeUrl": "https://sns.eu-central-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:eu-central-1:123456789012:AlertHub:0e7ce1ba-c3e4-4264-bae1-4eb71c91235a", "MessageAttributes": {} } } ] }'
|
|
||||||
)
|
|
||||||
self.send_event(event)
|
|
||||||
|
|
||||||
# ElastiCache snaphshot
|
|
||||||
def test_elasticache_snapshot(self):
|
|
||||||
event = json.loads(
|
|
||||||
r' {"Records": [{"EventSource": "aws:sns", "EventVersion": "1.0", "EventSubscriptionArn": "arn:aws:sns:eu-central-1:123456789012:AlertHub:0e7ce1ba-c3e4-4264-bae1-4eb71c91235a", "Sns": {"Type": "Notification", "MessageId": "10ae86eb-9ddc-5c2f-806c-df6ecb6bde42", "TopicArn": "arn:aws:sns:eu-central-1:123456789012:AlertHub", "Subject": null, "Message": "{\"ElastiCache:SnapshotComplete\":\"redis-prod-0002-001\"}" }}]}'
|
|
||||||
)
|
|
||||||
self.send_event(event)
|
|
||||||
|
|
||||||
# RDS
|
|
||||||
def test_rds_event(self):
|
|
||||||
event = json.loads(
|
|
||||||
r' {"Records": [{"Event Source":"db-instance","Event Time":"2023-06-15 06:13:18.237","Identifier Link":"https://console.aws.amazon.com/rds/home?region=us-west-2#dbinstance:id=vrv6b014c5jbdf","Source ID":"vrv6b014c5jbdf","Source ARN":"arn:aws:rds:us-west-2:123456789012:db:vrv6b014c5jbdf","Event ID":"http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.Messages.html#RDS-EVENT-0001","Event Message":"Backing up DB instance","Tags":{"aws:cloudformation:stack-name":"postgres-rds","aws:cloudformation:stack-id":"arn:aws:cloudformation:us-west-2:123456789012:stack/postgres-rds/c7382650-46a7-11ea-bd3f-064fbe1c973c","Conglomerate":"test","aws:cloudformation:logical-id":"RdsDBInstance","Artifact":"postgres-rds","Name":"postgres-rds.DBInstance"}}]}'
|
|
||||||
)
|
|
||||||
self.send_event(event)
|
|
||||||
|
|
||||||
def test_asg(self):
|
|
||||||
event = json.loads(
|
|
||||||
r' {"Records": [{"EventSource": "aws:sns", "EventVersion": "1.0", "EventSubscriptionArn": "arn:aws:sns:eu-central-1:123456789012:AlertHub:0e7ce1ba-c3e4-4264-bae1-4eb71c91235a", "Sns": {"Type": "Notification", "MessageId": "10ae86eb-9ddc-5c2f-806c-df6ecb6bde42", "TopicArn": "arn:aws:sns:eu-central-1:123456789012:AlertHub", "Subject": null, "Message": "{\"Origin\":\"AutoScalingGroup\",\"Destination\":\"EC2\",\"Progress\":50,\"AccountId\":\"123456789012\",\"Description\":\"Terminating EC2 instance: i-023ca42b188ffd91d\",\"RequestId\":\"1764cac3-224b-46bf-8bed-407a5b868e63\",\"EndTime\":\"2023-05-15T08:51:16.195Z\",\"AutoScalingGroupARN\":\"arn:aws:autoscaling:us-west-2:123456789012:autoScalingGroup:4a4fb6e3-22b4-487b-8335-3904f02ff9fd:autoScalingGroupName/powerbi\",\"ActivityId\":\"1764cac3-224b-46bf-8bed-407a5b868e63\",\"StartTime\":\"2023-05-15T08:50:14.145Z\",\"Service\":\"AWS Auto Scaling\",\"Time\":\"2023-05-15T08:51:16.195Z\",\"EC2InstanceId\":\"i-023ca42b188ffd91d\",\"StatusCode\":\"InProgress\",\"StatusMessage\":\"\",\"Details\":{\"Subnet ID\":\"subnet-fe2d6189\",\"Availability Zone\":\"us-west-2a\"},\"AutoScalingGroupName\":\"powerbi\",\"Cause\":\"At 2023-05-15T08:50:03Z the scheduled action end executed. Setting min size from 1 to 0. Setting desired capacity from 1 to 0. At 2023-05-15T08:50:03Z a scheduled action update of AutoScalingGroup constraints to min: 0, max: 1, desired: 0 changing the desired capacity from 1 to 0. At 2023-05-15T08:50:13Z an instance was taken out of service in response to a difference between desired and actual capacity, shrinking the capacity from 1 to 0. At 2023-05-15T08:50:14Z instance i-023ca42b188ffd91d was selected for termination.\",\"Event\":\"autoscaling:EC2_INSTANCE_TERMINATE\"}" }}]}'
|
|
||||||
)
|
|
||||||
self.send_event(event)
|
|
Loading…
Reference in New Issue
Block a user