kube/rosdump/application.yml

125 lines
3.2 KiB
YAML

apiVersion: v1
kind: ConfigMap
metadata:
name: rosdump-config
data:
script.sh: |
#!/bin/bash
set -e
if [ -d rosdump ]; then
echo "Pulling Git repo"
cd rosdump
git pull
else
echo "Cloning Git repo"
git clone git@git.k-space.ee:k-space/rosdump.git
cd rosdump
fi
git rm *.k-space.ee
for target in $(cat /config/targets | grep -v '^#'); do
echo "Exporting configuration for $target"
ssh rosdump@$target '/export' | grep -v '^# serial number =' | grep -v '^#.* by RouterOS' > $target
git add $target
done
if [[ `git status --porcelain` ]]; then
echo "Attempting Git check in"
git commit -m "$(git diff --cached --shortstat)"
git push
else
echo "No changes to commit"
fi
targets: |
router.mgmt.k-space.ee
sw_core01.mgmt.k-space.ee
sw_core02.mgmt.k-space.ee
sw_mgmt.mgmt.k-space.ee
sw_poe.mgmt.k-space.ee
sw_ha.mgmt.k-space.ee
sw_cyber.mgmt.k-space.ee
sw_chaos.mgmt.k-space.ee
sw_asocial.mgmt.k-space.ee
sw_kitchen.mgmt.k-space.ee
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: rosdump-cronjob
spec:
schedule: "0 * * * *"
concurrencyPolicy: Forbid
jobTemplate:
spec:
activeDeadlineSeconds: 300
template:
spec:
nodeSelector:
dedicated: monitoring
tolerations:
- key: dedicated
operator: Equal
value: monitoring
effect: NoSchedule
restartPolicy: OnFailure
containers:
- name: rosdump
image: harbor.k-space.ee/k-space/microscript-base
imagePullPolicy: Always
args:
- bash
- /config/script.sh
volumeMounts:
- name: config
mountPath: /config
volumes:
- name: config
projected:
sources:
- secret:
name: rosdump-secrets
items:
- key: ssh_identity
path: ssh_identity
mode: 0600
- configMap:
name: rosdump-known-hosts
items:
- key: ssh_known_hosts
path: ssh_known_hosts
- configMap:
name: rosdump-config
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: rosdump
spec:
podSelector: {}
policyTypes:
- Egress
egress:
- to:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: gitea
- ipBlock:
cidr: 172.23.0.0/24
- ipBlock:
cidr: 100.102.1.0/24
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: rosdump
spec:
groups:
- name: rosdump
rules:
- alert: MikrotikBackupsBroken
expr: absent(kube_cronjob_status_last_successful_time{cronjob="rosdump-cronjob"}) or time() - kube_cronjob_status_last_successful_time{cronjob="rosdump-cronjob"} > 3600
for: 4h
labels:
severity: warning
annotations:
summary: Mikrotik backups are broken