Compare commits

..

1 Commits

Author SHA1 Message Date
6b635b6dc7 proxmox: first attempt to move to ingressroute 2022-11-04 11:39:36 +02:00
41 changed files with 30950 additions and 6621 deletions

View File

@ -155,8 +155,7 @@ these should be handled by `tls:` section in Ingress.
## Cluster formation ## Cluster formation
Created Ubuntu 22.04 VM-s on Proxmox with local storage. Create Ubuntu 20.04 VM-s on Proxmox with local storage.
Added some ARM64 workers by using Ubuntu 22.04 server on Raspberry Pi.
After machines have booted up and you can reach them via SSH: After machines have booted up and you can reach them via SSH:
@ -174,13 +173,6 @@ net.ipv4.conf.all.accept_redirects = 0
net.bridge.bridge-nf-call-iptables = 1 net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1 net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-ip6tables = 1
# Elasticsearch needs this
vm.max_map_count = 524288
# Bump inotify limits to make sure
fs.inotify.max_user_instances=1280
fs.inotify.max_user_watches=655360
EOF EOF
sysctl --system sysctl --system
@ -194,23 +186,32 @@ nameserver 8.8.8.8
EOF EOF
# Disable multipathd as Longhorn handles that itself # Disable multipathd as Longhorn handles that itself
systemctl mask multipathd snapd systemctl mask multipathd
systemctl disable --now multipathd snapd bluetooth ModemManager hciuart wpa_supplicant packagekit systemctl disable multipathd
systemctl stop multipathd
# Disable Snapcraft
systemctl mask snapd
systemctl disable snapd
systemctl stop snapd
# Permit root login # Permit root login
sed -i -e 's/PermitRootLogin no/PermitRootLogin without-password/' /etc/ssh/sshd_config sed -i -e 's/PermitRootLogin no/PermitRootLogin without-password/' /etc/ssh/sshd_config
systemctl reload ssh systemctl reload ssh
cat ~ubuntu/.ssh/authorized_keys > /root/.ssh/authorized_keys cat << EOF > /root/.ssh/authorized_keys
sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBD4/e9SWYWYoNZMkkF+NirhbmHuUgjoCap42kAq0pLIXFwIqgVTCre03VPoChIwBClc8RspLKqr5W3j0fG8QwnQAAAAEc3NoOg== lauri@lauri-x13
EOF
userdel -f ubuntu userdel -f ubuntu
apt-get install -yqq linux-image-generic apt-get remove -yq cloud-init
apt-get remove -yq cloud-init linux-image-*-kvm
``` ```
Install packages: Install packages, for Raspbian set `OS=Debian_11`
```bash ```bash
OS=xUbuntu_22.04 OS=xUbuntu_20.04
VERSION=1.24 VERSION=1.23
cat <<EOF | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list cat <<EOF | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/ / deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/ /
EOF EOF
@ -218,26 +219,17 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cr
deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/ / deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/ /
EOF EOF
rm -fv /etc/apt/trusted.gpg curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | sudo apt-key --keyring /etc/apt/trusted.gpg.d/libcontainers.gpg add -
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | gpg --dearmor > /etc/apt/trusted.gpg.d/libcontainers.gpg curl -L https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:$VERSION/$OS/Release.key | sudo apt-key --keyring /etc/apt/trusted.gpg.d/libcontainers-cri-o.gpg add -
curl -L https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:$VERSION/$OS/Release.key | gpg --dearmor > /etc/apt/trusted.gpg.d/libcontainers-cri-o.gpg curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg > /etc/apt/trusted.gpg.d/packages-cloud-google.gpg
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list
apt-get update apt-get update
apt-get install -yqq --allow-change-held-packages apt-transport-https curl cri-o cri-o-runc kubelet=1.24.10-00 kubectl=1.24.10-00 kubeadm=1.24.10-00 apt-get install -yqq apt-transport-https curl cri-o cri-o-runc kubelet=1.23.5-00 kubectl=1.23.5-00 kubeadm=1.23.5-00
cat << \EOF > /etc/containers/registries.conf
unqualified-search-registries = ["docker.io"]
# To pull Docker images from a mirror uncomment following
#[[registry]]
#prefix = "docker.io"
#location = "mirror.gcr.io"
EOF
sudo systemctl restart crio
sudo systemctl daemon-reload sudo systemctl daemon-reload
sudo systemctl enable crio --now sudo systemctl enable crio --now
apt-mark hold kubelet kubeadm kubectl apt-mark hold kubelet kubeadm kubectl
sed -i -e 's/unqualified-search-registries = .*/unqualified-search-registries = ["docker.io"]/' /etc/containers/registries.conf
``` ```
On master: On master:
@ -248,16 +240,6 @@ kubeadm init --token-ttl=120m --pod-network-cidr=10.244.0.0/16 --control-plane-e
For the `kubeadm join` command specify FQDN via `--node-name $(hostname -f)`. For the `kubeadm join` command specify FQDN via `--node-name $(hostname -f)`.
Set AZ labels:
```
for j in $(seq 1 9); do
for t in master mon worker storage; do
kubectl label nodes ${t}${j}.kube.k-space.ee topology.kubernetes.io/zone=node${j}
done
done
```
After forming the cluster add taints: After forming the cluster add taints:
```bash ```bash
@ -265,7 +247,7 @@ for j in $(seq 1 9); do
kubectl label nodes worker${j}.kube.k-space.ee node-role.kubernetes.io/worker='' kubectl label nodes worker${j}.kube.k-space.ee node-role.kubernetes.io/worker=''
done done
for j in $(seq 1 4); do for j in $(seq 1 3); do
kubectl taint nodes mon${j}.kube.k-space.ee dedicated=monitoring:NoSchedule kubectl taint nodes mon${j}.kube.k-space.ee dedicated=monitoring:NoSchedule
kubectl label nodes mon${j}.kube.k-space.ee dedicated=monitoring kubectl label nodes mon${j}.kube.k-space.ee dedicated=monitoring
done done
@ -276,26 +258,15 @@ for j in $(seq 1 4); do
done done
``` ```
On Raspberry Pi you need to take additonal steps:
* Manually enable cgroups by appending
`cgroup_memory=1 cgroup_enable=memory` to `/boot/cmdline.txt`,
* Disable swap with `swapoff -a; apt-get purge -y dphys-swapfile`
* For mounting Longhorn volumes on Rasbian install `open-iscsi`
For `arm64` nodes add suitable taint to prevent scheduling non-multiarch images on them: For `arm64` nodes add suitable taint to prevent scheduling non-multiarch images on them:
```bash ```bash
kubectl taint nodes worker9.kube.k-space.ee arch=arm64:NoSchedule kubectl taint nodes worker9.kube.k-space.ee arch=arm64:NoSchedule
``` ```
For door controllers:
```
for j in ground front back; do
kubectl taint nodes door-${j}.kube.k-space.ee dedicated=door:NoSchedule
kubectl label nodes door-${j}.kube.k-space.ee dedicated=door
kubectl taint nodes door-${j}.kube.k-space.ee arch=arm64:NoSchedule
done
```
To reduce wear on storage:
```
echo StandardOutput=null >> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
systemctl daemon-reload
systemctl restart kubelet
```

View File

@ -1,17 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: logmower
namespace: argocd
spec:
project: default
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: logmower
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: logmower
syncPolicy:
syncOptions:
- CreateNamespace=true

View File

@ -1,16 +1,7 @@
To apply changes: To apply changes:
``` ```
kubectl apply -n camtiler \ kubectl apply -n camtiler -f application.yml -f persistence.yml -f mongoexpress.yml -f mongodb-support.yml -f networkpolicy-base.yml -f minio-support.yml
-f application.yml \
-f persistence.yml \
-f mongoexpress.yml \
-f mongodb-support.yml \
-f camera-tiler.yml \
-f logmower.yml \
-f ingress.yml \
-f network-policies.yml \
-f networkpolicy-base.yml
``` ```
To deploy changes: To deploy changes:
@ -24,16 +15,15 @@ To initialize secrets:
``` ```
kubectl create secret generic -n camtiler mongodb-application-readwrite-password --from-literal="password=$(cat /dev/urandom | base64 | head -c 30)" kubectl create secret generic -n camtiler mongodb-application-readwrite-password --from-literal="password=$(cat /dev/urandom | base64 | head -c 30)"
kubectl create secret generic -n camtiler mongodb-application-readonly-password --from-literal="password=$(cat /dev/urandom | base64 | head -c 30)" kubectl create secret generic -n camtiler mongodb-application-readonly-password --from-literal="password=$(cat /dev/urandom | base64 | head -c 30)"
kubectl create secret generic -n camtiler minio-secrets \ kubectl create secret generic -n camtiler minio-secret \
--from-literal=accesskey=application \
--from-literal=secretkey=$(cat /dev/urandom | base64 | head -c 30)
kubectl create secret generic -n camtiler minio-env-configuration \
--from-literal="MINIO_BROWSER=off" \
--from-literal="MINIO_ROOT_USER=root" \ --from-literal="MINIO_ROOT_USER=root" \
--from-literal="MINIO_ROOT_PASSWORD=$(cat /dev/urandom | base64 | head -c 30)" --from-literal="MINIO_ROOT_PASSWORD=$(cat /dev/urandom | base64 | head -c 30)" \
--from-literal="MINIO_STORAGE_CLASS_STANDARD=EC:4"
kubectl -n camtiler create secret generic camera-secrets \ kubectl -n camtiler create secret generic camera-secrets \
--from-literal=username=... \ --from-literal=username=... \
--from-literal=password=... --from-literal=password=...
``` ```
To restart all deployments:
```
for j in $(kubectl get deployments -n camtiler -o name); do kubectl rollout restart -n camtiler $j; done
```

View File

@ -1,4 +1,388 @@
--- ---
apiVersion: apps/v1
kind: Deployment
metadata:
name: camtiler
annotations:
keel.sh/policy: force
keel.sh/trigger: poll
spec:
revisionHistoryLimit: 0
replicas: 2
selector:
matchLabels:
app.kubernetes.io/name: camtiler
template:
metadata:
labels:
app.kubernetes.io/name: camtiler
component: camtiler
spec:
serviceAccountName: camtiler
containers:
- name: camtiler
image: harbor.k-space.ee/k-space/camera-tiler:latest
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
ports:
- containerPort: 5001
name: "http"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: log-viewer-frontend
annotations:
keel.sh/policy: force
keel.sh/trigger: poll
spec:
revisionHistoryLimit: 0
replicas: 2
selector:
matchLabels:
app.kubernetes.io/name: log-viewer-frontend
template:
metadata:
labels:
app.kubernetes.io/name: log-viewer-frontend
spec:
containers:
- name: log-viewer-frontend
image: harbor.k-space.ee/k-space/log-viewer-frontend:latest
# securityContext:
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: log-viewer-backend
annotations:
keel.sh/policy: force
keel.sh/trigger: poll
spec:
revisionHistoryLimit: 0
replicas: 3
selector:
matchLabels:
app.kubernetes.io/name: log-viewer-backend
template:
metadata:
labels:
app.kubernetes.io/name: log-viewer-backend
spec:
containers:
- name: log-backend-backend
image: harbor.k-space.ee/k-space/log-viewer:latest
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
env:
- name: MONGO_URI
valueFrom:
secretKeyRef:
name: mongodb-application-readwrite
key: connectionString.standard
- name: MINIO_BUCKET
value: application
- name: MINIO_HOSTNAME
value: cams-s3.k-space.ee
- name: MINIO_PORT
value: "443"
- name: MINIO_SCHEME
value: "https"
- name: MINIO_SECRET_KEY
valueFrom:
secretKeyRef:
name: minio-secret
key: secretkey
- name: MINIO_ACCESS_KEY
valueFrom:
secretKeyRef:
name: minio-secret
key: accesskey
---
apiVersion: v1
kind: Service
metadata:
name: log-viewer-frontend
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: log-viewer-frontend
ports:
- protocol: TCP
port: 3003
---
apiVersion: v1
kind: Service
metadata:
name: log-viewer-backend
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: log-viewer-backend
ports:
- protocol: TCP
port: 3002
---
apiVersion: v1
kind: Service
metadata:
name: camtiler
labels:
component: camtiler
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: camtiler
component: camtiler
ports:
- protocol: TCP
port: 5001
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: camtiler
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: camtiler
rules:
- apiGroups:
- ""
resources:
- services
verbs:
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: camtiler
subjects:
- kind: ServiceAccount
name: camtiler
apiGroup: ""
roleRef:
kind: Role
name: camtiler
apiGroup: ""
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: camtiler
annotations:
kubernetes.io/ingress.class: traefik
# This tells Traefik this Ingress object is associated with the
# https:// entrypoint
# Global http:// to https:// redirect is enabled in
# ../traefik/values.yml using `globalArguments`
traefik.ingress.kubernetes.io/router.entrypoints: websecure
# Following enables Authelia intercepting middleware
# which makes sure user is authenticated and then
# proceeds to inject Remote-User header for the application
traefik.ingress.kubernetes.io/router.middlewares: traefik-sso@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true"
# Following tells external-dns to add CNAME entry which makes
# cams.k-space.ee point to same IP address as traefik.k-space.ee
# The A record for traefik.k-space.ee is created via annotation
# added in ../traefik/ingress.yml
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: cams.k-space.ee
http:
paths:
- pathType: Prefix
path: "/tiled"
backend:
service:
name: camtiler
port:
number: 5001
- pathType: Prefix
path: "/events"
backend:
service:
name: log-viewer-backend
port:
number: 3002
- pathType: Prefix
path: "/"
backend:
service:
name: log-viewer-frontend
port:
number: 3003
tls:
- hosts:
- "*.k-space.ee"
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: camera-motion-detect
spec:
podSelector:
matchLabels:
component: camdetect
policyTypes:
- Ingress
- Egress
ingress:
- from:
- podSelector:
matchLabels:
component: camtiler
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: prometheus-operator
podSelector:
matchLabels:
app.kubernetes.io/name: prometheus
egress:
- to:
- ipBlock:
# Permit access to cameras outside the cluster
cidr: 100.102.0.0/16
- to:
- podSelector:
matchLabels:
app: mongodb-svc
ports:
- port: 27017
- to:
- podSelector:
matchLabels:
v1.min.io/tenant: minio
ports:
- port: 9000
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: camera-tiler
spec:
podSelector:
matchLabels:
component: camtiler
policyTypes:
- Ingress
- Egress
egress:
- to:
- podSelector:
matchLabels:
component: camdetect
ports:
- port: 5000
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: prometheus-operator
podSelector:
matchLabels:
app.kubernetes.io/name: prometheus
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: log-viewer-backend
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: log-viewer-backend
policyTypes:
- Ingress
- Egress
egress:
- to:
- podSelector:
matchLabels:
app: mongodb-svc
- to:
# Minio access via Traefik's public endpoint
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: log-viewer-frontend
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: log-viewer-frontend
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: minio
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: cams-s3.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: minio
port:
number: 80
tls:
- hosts:
- "*.k-space.ee"
---
apiVersion: apiextensions.k8s.io/v1 apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
@ -89,13 +473,12 @@ spec:
metadata: metadata:
name: foobar name: foobar
labels: labels:
app.kubernetes.io/name: foobar component: camdetect
component: camera-motion-detect
spec: spec:
type: ClusterIP type: ClusterIP
selector: selector:
app.kubernetes.io/name: foobar app.kubernetes.io/name: foobar
component: camera-motion-detect component: camdetect
ports: ports:
- protocol: TCP - protocol: TCP
port: 80 port: 80
@ -110,15 +493,14 @@ spec:
keel.sh/policy: force keel.sh/policy: force
keel.sh/trigger: poll keel.sh/trigger: poll
spec: spec:
revisionHistoryLimit: 0
replicas: 1 replicas: 1
# Make sure we do not congest the network during rollout
strategy: strategy:
type: RollingUpdate type: RollingUpdate
rollingUpdate: rollingUpdate:
# Swap following two with replicas: 2 maxSurge: 0
maxSurge: 1 maxUnavailable: 1
maxUnavailable: 0
selector: selector:
matchLabels: matchLabels:
app.kubernetes.io/name: foobar app.kubernetes.io/name: foobar
@ -126,25 +508,18 @@ spec:
metadata: metadata:
labels: labels:
app.kubernetes.io/name: foobar app.kubernetes.io/name: foobar
component: camera-motion-detect component: camdetect
spec: spec:
containers: containers:
- name: camera-motion-detect - name: camdetect
image: harbor.k-space.ee/k-space/camera-motion-detect:latest image: harbor.k-space.ee/k-space/camera-motion-detect:latest
starupProbe:
httpGet:
path: /healthz
port: 5000
initialDelaySeconds: 2
periodSeconds: 180
timeoutSeconds: 60
readinessProbe: readinessProbe:
httpGet: httpGet:
path: /readyz path: /readyz
port: 5000 port: 5000
initialDelaySeconds: 60 initialDelaySeconds: 10
periodSeconds: 60 periodSeconds: 180
timeoutSeconds: 5 timeoutSeconds: 60
ports: ports:
- containerPort: 5000 - containerPort: 5000
name: "http" name: "http"
@ -154,7 +529,7 @@ spec:
cpu: "200m" cpu: "200m"
limits: limits:
memory: "256Mi" memory: "256Mi"
cpu: "4000m" cpu: "1"
securityContext: securityContext:
readOnlyRootFilesystem: true readOnlyRootFilesystem: true
runAsNonRoot: true runAsNonRoot: true
@ -182,13 +557,13 @@ spec:
- name: AWS_SECRET_ACCESS_KEY - name: AWS_SECRET_ACCESS_KEY
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: minio-secrets name: minio-secret
key: MINIO_ROOT_PASSWORD key: secretkey
- name: AWS_ACCESS_KEY_ID - name: AWS_ACCESS_KEY_ID
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: minio-secrets name: minio-secret
key: MINIO_ROOT_USER key: accesskey
# Make sure 2+ pods of same camera are scheduled on different hosts # Make sure 2+ pods of same camera are scheduled on different hosts
affinity: affinity:
@ -196,7 +571,7 @@ spec:
requiredDuringSchedulingIgnoredDuringExecution: requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector: - labelSelector:
matchExpressions: matchExpressions:
- key: app.kubernetes.io/name - key: app
operator: In operator: In
values: values:
- foobar - foobar
@ -210,7 +585,18 @@ spec:
labelSelector: labelSelector:
matchLabels: matchLabels:
app.kubernetes.io/name: foobar app.kubernetes.io/name: foobar
component: camera-motion-detect component: camdetect
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: camtiler
spec:
selector: {}
podMetricsEndpoints:
- port: http
podTargetLabels:
- app.kubernetes.io/name
--- ---
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule kind: PrometheusRule
@ -221,21 +607,21 @@ spec:
- name: cameras - name: cameras
rules: rules:
- alert: CameraLost - alert: CameraLost
expr: rate(camtiler_frames_total{stage="downloaded"}[1m]) < 1 expr: rate(camdetect_rx_frames_total[2m]) < 1
for: 2m for: 2m
labels: labels:
severity: warning severity: warning
annotations: annotations:
summary: Camera feed stopped summary: Camera feed stopped
- alert: CameraServerRoomMotion - alert: CameraServerRoomMotion
expr: rate(camtiler_events_total{app_kubernetes_io_name="server-room"}[30m]) > 0 expr: camdetect_event_active {app="camdetect-server-room"} > 0
for: 1m for: 1m
labels: labels:
severity: warning severity: warning
annotations: annotations:
summary: Motion was detected in server room summary: Motion was detected in server room
- alert: CameraSlowUploads - alert: CameraSlowUploads
expr: camtiler_queue_frames{stage="upload"} > 10 expr: rate(camdetect_upload_dropped_frames_total[2m]) > 1
for: 5m for: 5m
labels: labels:
severity: warning severity: warning
@ -243,20 +629,13 @@ spec:
summary: Motion detect snapshots are piling up and summary: Motion detect snapshots are piling up and
not getting uploaded to S3 not getting uploaded to S3
- alert: CameraSlowProcessing - alert: CameraSlowProcessing
expr: camtiler_queue_frames{stage="download"} > 10 expr: rate(camdetect_download_dropped_frames_total[2m]) > 1
for: 5m for: 5m
labels: labels:
severity: warning severity: warning
annotations: annotations:
summary: Motion detection processing pipeline is not keeping up summary: Motion detection processing pipeline is not keeping up
with incoming frames with incoming frames
- alert: CameraResourcesThrottled
expr: sum by (pod) (rate(container_cpu_cfs_throttled_periods_total{namespace="camtiler"}[1m])) > 0
for: 5m
labels:
severity: warning
annotations:
summary: CPU limits are bottleneck
--- ---
apiVersion: k-space.ee/v1alpha1 apiVersion: k-space.ee/v1alpha1
kind: Camera kind: Camera
@ -265,7 +644,6 @@ metadata:
spec: spec:
target: http://user@workshop.cam.k-space.ee:8080/?action=stream target: http://user@workshop.cam.k-space.ee:8080/?action=stream
secretRef: camera-secrets secretRef: camera-secrets
replicas: 1
--- ---
apiVersion: k-space.ee/v1alpha1 apiVersion: k-space.ee/v1alpha1
kind: Camera kind: Camera
@ -274,7 +652,6 @@ metadata:
spec: spec:
target: http://user@server-room.cam.k-space.ee:8080/?action=stream target: http://user@server-room.cam.k-space.ee:8080/?action=stream
secretRef: camera-secrets secretRef: camera-secrets
replicas: 1
--- ---
apiVersion: k-space.ee/v1alpha1 apiVersion: k-space.ee/v1alpha1
kind: Camera kind: Camera
@ -283,7 +660,6 @@ metadata:
spec: spec:
target: http://user@printer.cam.k-space.ee:8080/?action=stream target: http://user@printer.cam.k-space.ee:8080/?action=stream
secretRef: camera-secrets secretRef: camera-secrets
replicas: 1
--- ---
apiVersion: k-space.ee/v1alpha1 apiVersion: k-space.ee/v1alpha1
kind: Camera kind: Camera
@ -292,7 +668,6 @@ metadata:
spec: spec:
target: http://user@chaos.cam.k-space.ee:8080/?action=stream target: http://user@chaos.cam.k-space.ee:8080/?action=stream
secretRef: camera-secrets secretRef: camera-secrets
replicas: 1
--- ---
apiVersion: k-space.ee/v1alpha1 apiVersion: k-space.ee/v1alpha1
kind: Camera kind: Camera
@ -301,7 +676,6 @@ metadata:
spec: spec:
target: http://user@cyber.cam.k-space.ee:8080/?action=stream target: http://user@cyber.cam.k-space.ee:8080/?action=stream
secretRef: camera-secrets secretRef: camera-secrets
replicas: 1
--- ---
apiVersion: k-space.ee/v1alpha1 apiVersion: k-space.ee/v1alpha1
kind: Camera kind: Camera
@ -310,7 +684,6 @@ metadata:
spec: spec:
target: http://user@kitchen.cam.k-space.ee:8080/?action=stream target: http://user@kitchen.cam.k-space.ee:8080/?action=stream
secretRef: camera-secrets secretRef: camera-secrets
replicas: 1
--- ---
apiVersion: k-space.ee/v1alpha1 apiVersion: k-space.ee/v1alpha1
kind: Camera kind: Camera
@ -319,7 +692,6 @@ metadata:
spec: spec:
target: http://user@back-door.cam.k-space.ee:8080/?action=stream target: http://user@back-door.cam.k-space.ee:8080/?action=stream
secretRef: camera-secrets secretRef: camera-secrets
replicas: 1
--- ---
apiVersion: k-space.ee/v1alpha1 apiVersion: k-space.ee/v1alpha1
kind: Camera kind: Camera
@ -328,4 +700,3 @@ metadata:
spec: spec:
target: http://user@ground-door.cam.k-space.ee:8080/?action=stream target: http://user@ground-door.cam.k-space.ee:8080/?action=stream
secretRef: camera-secrets secretRef: camera-secrets
replicas: 1

View File

@ -1,98 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: camera-tiler
annotations:
keel.sh/policy: force
keel.sh/trigger: poll
spec:
revisionHistoryLimit: 0
replicas: 2
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: camtiler
component: camera-tiler
template:
metadata:
labels: *selectorLabels
spec:
serviceAccountName: camera-tiler
containers:
- name: camera-tiler
image: harbor.k-space.ee/k-space/camera-tiler:latest
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
ports:
- containerPort: 5001
name: "http"
resources:
requests:
memory: "200Mi"
cpu: "100m"
limits:
memory: "500Mi"
cpu: "4000m"
---
apiVersion: v1
kind: Service
metadata:
name: camera-tiler
labels:
app.kubernetes.io/name: camtiler
component: camera-tiler
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: camtiler
component: camera-tiler
ports:
- protocol: TCP
port: 5001
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: camera-tiler
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: camera-tiler
rules:
- apiGroups:
- ""
resources:
- services
verbs:
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: camera-tiler
subjects:
- kind: ServiceAccount
name: camera-tiler
apiGroup: ""
roleRef:
kind: Role
name: camera-tiler
apiGroup: ""
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: camtiler
spec:
selector:
matchLabels:
app.kubernetes.io/name: camtiler
component: camera-tiler
podMetricsEndpoints:
- port: http
podTargetLabels:
- app.kubernetes.io/name
- component

View File

@ -1,67 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: camtiler
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: traefik-sso@kubernetescrd,camtiler-redirect@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: cams.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: logmower-frontend
port:
number: 8080
- host: cam.k-space.ee
http:
paths:
- pathType: Prefix
path: "/tiled"
backend:
service:
name: camera-tiler
port:
number: 5001
- pathType: Prefix
path: "/m"
backend:
service:
name: camera-tiler
port:
number: 5001
- pathType: Prefix
path: "/events"
backend:
service:
name: logmower-eventsource
port:
number: 3002
- pathType: Prefix
path: "/"
backend:
service:
name: logmower-frontend
port:
number: 8080
tls:
- hosts:
- "*.k-space.ee"
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: redirect
spec:
redirectRegex:
regex: ^https://cams.k-space.ee/(.*)$
replacement: https://cam.k-space.ee/$1
permanent: false

View File

@ -1,137 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logmower-eventsource
spec:
revisionHistoryLimit: 0
replicas: 2
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: camtiler
component: logmower-eventsource
template:
metadata:
labels: *selectorLabels
spec:
containers:
- name: logmower-eventsource
image: harbor.k-space.ee/k-space/logmower-eventsource
ports:
- containerPort: 3002
name: nodejs
env:
- name: MONGO_COLLECTION
value: eventlog
- name: MONGODB_HOST
valueFrom:
secretKeyRef:
name: mongodb-application-readonly
key: connectionString.standard
- name: BACKEND
value: 'camtiler'
- name: BACKEND_BROKER_URL
value: 'http://logmower-event-broker'
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logmower-event-broker
spec:
revisionHistoryLimit: 0
replicas: 5
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: camtiler
component: logmower-event-broker
template:
metadata:
labels: *selectorLabels
spec:
containers:
- name: logmower-event-broker
image: harbor.k-space.ee/k-space/camera-event-broker
ports:
- containerPort: 3000
env:
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: minio-secrets
key: MINIO_ROOT_PASSWORD
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: minio-secrets
key: MINIO_ROOT_USER
- name: MINIO_BUCKET
value: 'application'
- name: MINIO_HOSTNAME
value: 'cams-s3.k-space.ee'
- name: MINIO_PORT
value: '443'
- name: MINIO_SCHEMA
value: 'https'
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logmower-frontend
spec:
revisionHistoryLimit: 0
replicas: 2
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: camtiler
component: logmower-frontend
template:
metadata:
labels: *selectorLabels
spec:
containers:
- name: logmower-frontend
image: harbor.k-space.ee/k-space/logmower-frontend
ports:
- containerPort: 8080
name: http
---
apiVersion: v1
kind: Service
metadata:
name: logmower-frontend
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: camtiler
component: logmower-frontend
ports:
- protocol: TCP
port: 8080
---
apiVersion: v1
kind: Service
metadata:
name: logmower-eventsource
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: camtiler
component: logmower-eventsource
ports:
- protocol: TCP
port: 3002
---
apiVersion: v1
kind: Service
metadata:
name: logmower-event-broker
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: camtiler
component: logmower-event-broker
ports:
- protocol: TCP
port: 80
targetPort: 3000

1
camtiler/minio-support.yml Symbolic link
View File

@ -0,0 +1 @@
../shared/minio-support.yml

View File

@ -1,199 +0,0 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: minio
labels:
app.kubernetes.io/name: minio
spec:
selector:
matchLabels:
app.kubernetes.io/name: minio
serviceName: minio-svc
replicas: 4
podManagementPolicy: Parallel
template:
metadata:
labels:
app.kubernetes.io/name: minio
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- minio
topologyKey: kubernetes.io/hostname
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
containers:
- name: minio
env:
- name: MINIO_PROMETHEUS_AUTH_TYPE
value: public
envFrom:
- secretRef:
name: minio-secrets
image: minio/minio:RELEASE.2022-12-12T19-27-27Z
args:
- server
- http://minio-{0...3}.minio-svc.camtiler.svc.cluster.local/data
- --address
- 0.0.0.0:9000
- --console-address
- 0.0.0.0:9001
ports:
- containerPort: 9000
name: http
- containerPort: 9001
name: console
readinessProbe:
httpGet:
path: /minio/health/ready
port: 9000
initialDelaySeconds: 2
periodSeconds: 5
resources:
requests:
cpu: 300m
memory: 1Gi
limits:
cpu: 4000m
memory: 2Gi
volumeMounts:
- name: minio-data
mountPath: /data
volumeClaimTemplates:
- metadata:
name: minio-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: '30Gi'
storageClassName: minio
---
apiVersion: v1
kind: Service
metadata:
name: minio
spec:
sessionAffinity: ClientIP
type: ClusterIP
ports:
- port: 80
targetPort: 9000
protocol: TCP
name: http
selector:
app.kubernetes.io/name: minio
---
kind: Service
apiVersion: v1
metadata:
name: minio-svc
spec:
selector:
app.kubernetes.io/name: minio
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: http
port: 9000
- name: console
port: 9001
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: minio
spec:
selector:
matchLabels:
app.kubernetes.io/name: minio
podMetricsEndpoints:
- port: http
path: /minio/v2/metrics/node
podTargetLabels:
- app.kubernetes.io/name
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: minio
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
honorLabels: true
port: minio
path: /minio/v2/metrics/cluster
selector:
matchLabels:
app.kubernetes.io/name: minio
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: minio
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: cams-s3.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: minio-svc
port:
name: http
tls:
- hosts:
- "*.k-space.ee"
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: minio
spec:
groups:
- name: minio
rules:
- alert: MinioClusterDiskOffline
expr: minio_cluster_disk_offline_total > 0
for: 0m
labels:
severity: critical
annotations:
summary: Minio cluster disk offline (instance {{ $labels.instance }})
description: "Minio cluster disk is offline"
- alert: MinioNodeDiskOffline
expr: minio_cluster_nodes_offline_total > 0
for: 0m
labels:
severity: critical
annotations:
summary: Minio node disk offline (instance {{ $labels.instance }})
description: "Minio cluster node disk is offline"
- alert: MinioDiskSpaceUsage
expr: disk_storage_available / disk_storage_total * 100 < 10
for: 0m
labels:
severity: warning
annotations:
summary: Minio disk space usage (instance {{ $labels.instance }})
description: "Minio available free space is low (< 10%)"

View File

@ -1,192 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: camera-motion-detect
spec:
podSelector:
matchLabels:
component: camera-motion-detect
policyTypes:
- Ingress
- Egress
ingress:
- from:
- podSelector:
matchLabels:
app.kubernetes.io/name: camtiler
component: camera-tiler
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: prometheus-operator
podSelector:
matchLabels:
app.kubernetes.io/name: prometheus
egress:
- to:
- ipBlock:
# Permit access to cameras outside the cluster
cidr: 100.102.0.0/16
- to:
- podSelector:
matchLabels:
app: mongodb-svc
ports:
- port: 27017
- to:
- podSelector:
matchLabels:
app.kubernetes.io/name: minio
ports:
- port: 9000
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: camera-tiler
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: camtiler
component: camera-tiler
policyTypes:
- Ingress
- Egress
egress:
- to:
- podSelector:
matchLabels:
component: camera-motion-detect
ports:
- port: 5000
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: prometheus-operator
podSelector:
matchLabels:
app.kubernetes.io/name: prometheus
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-eventsource
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: camtiler
component: logmower-eventsource
policyTypes:
- Ingress
- Egress
egress:
- to:
- podSelector:
matchLabels:
app: mongodb-svc
- podSelector:
matchLabels:
component: logmower-event-broker
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-event-broker
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: camtiler
component: logmower-event-broker
policyTypes:
- Ingress
- Egress
egress:
- to:
# Minio access via Traefik's public endpoint
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
ingress:
- from:
- podSelector:
matchLabels:
component: logmower-eventsource
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-frontend
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: camtiler
component: logmower-frontend
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: minio
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: minio
policyTypes:
- Ingress
- Egress
egress:
- ports:
- port: http
to:
- podSelector:
matchLabels:
app.kubernetes.io/name: minio
ingress:
- ports:
- port: http
from:
- podSelector: {}
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: prometheus-operator
podSelector:
matchLabels:
app.kubernetes.io/name: prometheus

View File

@ -7,10 +7,9 @@ spec:
additionalMongodConfig: additionalMongodConfig:
systemLog: systemLog:
quiet: true quiet: true
members: 2 members: 3
arbiters: 1
type: ReplicaSet type: ReplicaSet
version: "6.0.3" version: "5.0.9"
security: security:
authentication: authentication:
modes: ["SCRAM"] modes: ["SCRAM"]
@ -28,7 +27,7 @@ spec:
passwordSecretRef: passwordSecretRef:
name: mongodb-application-readonly-password name: mongodb-application-readonly-password
roles: roles:
- name: read - name: readOnly
db: application db: application
scramCredentialsSecretName: mongodb-application-readonly scramCredentialsSecretName: mongodb-application-readonly
statefulSet: statefulSet:
@ -36,24 +35,6 @@ spec:
logLevel: WARN logLevel: WARN
template: template:
spec: spec:
containers:
- name: mongod
resources:
requests:
cpu: 100m
memory: 512Mi
limits:
cpu: 500m
memory: 1Gi
volumeMounts:
- name: journal-volume
mountPath: /data/journal
- name: mongodb-agent
resources:
requests:
cpu: 1m
memory: 100Mi
limits: {}
affinity: affinity:
podAntiAffinity: podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution: requiredDuringSchedulingIgnoredDuringExecution:
@ -74,21 +55,8 @@ spec:
volumeClaimTemplates: volumeClaimTemplates:
- metadata: - metadata:
name: logs-volume name: logs-volume
labels:
usecase: logs
spec: spec:
storageClassName: mongo storageClassName: local-path
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
- metadata:
name: journal-volume
labels:
usecase: journal
spec:
storageClassName: mongo
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
@ -96,12 +64,67 @@ spec:
storage: 512Mi storage: 512Mi
- metadata: - metadata:
name: data-volume name: data-volume
labels:
usecase: data
spec: spec:
storageClassName: mongo storageClassName: local-path
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 2Gi storage: 2Gi
---
apiVersion: minio.min.io/v2
kind: Tenant
metadata:
name: minio
annotations:
prometheus.io/path: /minio/prometheus/metrics
prometheus.io/port: "9000"
prometheus.io/scrape: "true"
spec:
credsSecret:
name: minio-secret
buckets:
- name: application
requestAutoCert: false
users:
- name: minio-user-0
pools:
- name: pool-0
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: v1.min.io/tenant
operator: In
values:
- minio
- key: v1.min.io/pool
operator: In
values:
- pool-0
topologyKey: kubernetes.io/hostname
resources:
requests:
cpu: '1'
memory: 512Mi
servers: 4
volumesPerServer: 1
volumeClaimTemplate:
metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: '30Gi'
storageClassName: local-path
status: {}
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule

View File

@ -2,9 +2,9 @@ Before applying replace the secret with the actual one.
For debugging add `- --log-level=debug`: For debugging add `- --log-level=debug`:
``` ```
wget https://raw.githubusercontent.com/kubernetes-sigs/external-dns/master/docs/contributing/crd-source/crd-manifest.yaml -O crd.yml kubectl apply -n external-dns -f external-dns.yml
kubectl apply -n external-dns -f application.yml -f crd.yml
``` ```
Insert TSIG secret: Insert TSIG secret:

View File

@ -24,20 +24,6 @@ rules:
- get - get
- list - list
- watch - watch
- apiGroups:
- externaldns.k8s.io
resources:
- dnsendpoints
verbs:
- get
- watch
- list
- apiGroups:
- externaldns.k8s.io
resources:
- dnsendpoints/status
verbs:
- update
--- ---
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
@ -77,7 +63,7 @@ spec:
serviceAccountName: external-dns serviceAccountName: external-dns
containers: containers:
- name: external-dns - name: external-dns
image: k8s.gcr.io/external-dns/external-dns:v0.13.1 image: k8s.gcr.io/external-dns/external-dns:v0.10.2
envFrom: envFrom:
- secretRef: - secretRef:
name: tsig-secret name: tsig-secret

View File

@ -35,7 +35,7 @@ data:
TRIVY_ADAPTER_URL: "http://harbor-trivy:8080" TRIVY_ADAPTER_URL: "http://harbor-trivy:8080"
REGISTRY_STORAGE_PROVIDER_NAME: "filesystem" REGISTRY_STORAGE_PROVIDER_NAME: "filesystem"
WITH_CHARTMUSEUM: "false" WITH_CHARTMUSEUM: "false"
LOG_LEVEL: "warning" LOG_LEVEL: "info"
CONFIG_PATH: "/etc/core/app.conf" CONFIG_PATH: "/etc/core/app.conf"
CHART_CACHE_DRIVER: "redis" CHART_CACHE_DRIVER: "redis"
_REDIS_URL_CORE: "redis://harbor-redis:6379/0?idle_timeout_seconds=30" _REDIS_URL_CORE: "redis://harbor-redis:6379/0?idle_timeout_seconds=30"

View File

@ -1,165 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: descheduler
namespace: kube-system
labels:
app.kubernetes.io/name: descheduler
---
apiVersion: v1
kind: ConfigMap
metadata:
name: descheduler
namespace: kube-system
labels:
app.kubernetes.io/name: descheduler
data:
policy.yaml: |
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
LowNodeUtilization:
enabled: true
params:
nodeResourceUtilizationThresholds:
targetThresholds:
cpu: 50
memory: 50
pods: 50
thresholds:
cpu: 20
memory: 20
pods: 20
RemoveDuplicates:
enabled: true
RemovePodsHavingTooManyRestarts:
enabled: true
params:
podsHavingTooManyRestarts:
includingInitContainers: true
podRestartThreshold: 100
RemovePodsViolatingInterPodAntiAffinity:
enabled: true
RemovePodsViolatingNodeAffinity:
enabled: true
params:
nodeAffinityType:
- requiredDuringSchedulingIgnoredDuringExecution
RemovePodsViolatingNodeTaints:
enabled: true
RemovePodsViolatingTopologySpreadConstraint:
enabled: true
params:
includeSoftConstraints: false
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: descheduler
labels:
app.kubernetes.io/name: descheduler
rules:
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["create", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "delete"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
- apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"]
verbs: ["get", "watch", "list"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create", "update"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
resourceNames: ["descheduler"]
verbs: ["get", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: descheduler
labels:
app.kubernetes.io/name: descheduler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: descheduler
subjects:
- kind: ServiceAccount
name: descheduler
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: descheduler
namespace: kube-system
labels:
app.kubernetes.io/name: descheduler
spec:
replicas: 2
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: descheduler
template:
metadata:
labels: *selectorLabels
spec:
priorityClassName: system-cluster-critical
serviceAccountName: descheduler
containers:
- name: descheduler
image: "k8s.gcr.io/descheduler/descheduler:v0.25.1"
imagePullPolicy: IfNotPresent
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--descheduling-interval"
- 5m
- "--v"
- "3"
- --leader-elect=true
ports:
- containerPort: 10258
protocol: TCP
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10
resources:
requests:
cpu: 500m
memory: 256Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
volumes:
- name: policy-volume
configMap:
name: descheduler

View File

@ -159,9 +159,7 @@ spec:
spec: spec:
automountServiceAccountToken: true automountServiceAccountToken: true
containers: containers:
- image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.7.0 - image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.6.0
args:
- --metric-labels-allowlist=pods=[*]
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /healthz path: /healthz
@ -310,6 +308,14 @@ spec:
annotations: annotations:
summary: Kubernetes Volume out of disk space (instance {{ $labels.instance }}) summary: Kubernetes Volume out of disk space (instance {{ $labels.instance }})
description: "Volume is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" description: "Volume is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesVolumeFullInFourDays
expr: predict_linear(kubelet_volume_stats_available_bytes[6h], 4 * 24 * 3600) < 0
for: 0m
labels:
severity: critical
annotations:
summary: Kubernetes Volume full in four days (instance {{ $labels.instance }})
description: "{{ $labels.namespace }}/{{ $labels.persistentvolumeclaim }} is expected to fill up within four days. Currently {{ $value | humanize }}% is available.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesPersistentvolumeError - alert: KubernetesPersistentvolumeError
expr: kube_persistentvolume_status_phase{phase=~"Failed|Pending", job="kube-state-metrics"} > 0 expr: kube_persistentvolume_status_phase{phase=~"Failed|Pending", job="kube-state-metrics"} > 0
for: 0m for: 0m
@ -423,13 +429,21 @@ spec:
summary: Kubernetes DaemonSet rollout stuck (instance {{ $labels.instance }}) summary: Kubernetes DaemonSet rollout stuck (instance {{ $labels.instance }})
description: "Some Pods of DaemonSet are not scheduled or not ready\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" description: "Some Pods of DaemonSet are not scheduled or not ready\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesDaemonsetMisscheduled - alert: KubernetesDaemonsetMisscheduled
expr: sum by (namespace, daemonset) (kube_daemonset_status_number_misscheduled) > 0 expr: kube_daemonset_status_number_misscheduled > 0
for: 1m for: 1m
labels: labels:
severity: critical severity: critical
annotations: annotations:
summary: Kubernetes DaemonSet misscheduled (instance {{ $labels.instance }}) summary: Kubernetes DaemonSet misscheduled (instance {{ $labels.instance }})
description: "Some DaemonSet Pods are running where they are not supposed to run\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" description: "Some DaemonSet Pods are running where they are not supposed to run\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesCronjobTooLong
expr: time() - kube_cronjob_next_schedule_time > 3600
for: 0m
labels:
severity: warning
annotations:
summary: Kubernetes CronJob too long (instance {{ $labels.instance }})
description: "CronJob {{ $labels.namespace }}/{{ $labels.cronjob }} is taking more than 1h to complete.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesJobSlowCompletion - alert: KubernetesJobSlowCompletion
expr: kube_job_spec_completions - kube_job_status_succeeded > 0 expr: kube_job_spec_completions - kube_job_status_succeeded > 0
for: 12h for: 12h

View File

@ -1,491 +0,0 @@
---
apiVersion: codemowers.io/v1alpha1
kind: GeneratedSecret
metadata:
name: logmower-readwrite-password
spec:
mapping:
- key: password
value: "%(password)s"
---
apiVersion: codemowers.io/v1alpha1
kind: GeneratedSecret
metadata:
name: logmower-readonly-password
spec:
mapping:
- key: password
value: "%(password)s"
---
apiVersion: mongodbcommunity.mongodb.com/v1
kind: MongoDBCommunity
metadata:
name: logmower-mongodb
spec:
additionalMongodConfig:
systemLog:
quiet: true
members: 2
arbiters: 1
type: ReplicaSet
version: "6.0.3"
security:
authentication:
modes: ["SCRAM"]
users:
- name: readwrite
db: application
passwordSecretRef:
name: logmower-readwrite-password
roles:
- name: readWrite
db: application
scramCredentialsSecretName: logmower-readwrite
- name: readonly
db: application
passwordSecretRef:
name: logmower-readonly-password
roles:
- name: read
db: application
scramCredentialsSecretName: logmower-readonly
statefulSet:
spec:
logLevel: WARN
template:
spec:
containers:
- name: mongod
resources:
requests:
cpu: 100m
memory: 1Gi
limits:
cpu: 4000m
memory: 1Gi
volumeMounts:
- name: journal-volume
mountPath: /data/journal
- name: mongodb-agent
resources:
requests:
cpu: 1m
memory: 100Mi
limits: {}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- logmower-mongodb-svc
topologyKey: kubernetes.io/hostname
nodeSelector:
dedicated: monitoring
tolerations:
- key: dedicated
operator: Equal
value: monitoring
effect: NoSchedule
volumeClaimTemplates:
- metadata:
name: logs-volume
labels:
usecase: logs
spec:
storageClassName: mongo
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
- metadata:
name: journal-volume
labels:
usecase: journal
spec:
storageClassName: mongo
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 512Mi
- metadata:
name: data-volume
labels:
usecase: data
spec:
storageClassName: mongo
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: logmower-shipper
spec:
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 50%
selector:
matchLabels:
app: logmower-shipper
template:
metadata:
labels:
app: logmower-shipper
spec:
serviceAccountName: logmower-shipper
containers:
- name: logmower-shipper
image: harbor.k-space.ee/k-space/logmower-shipper-prototype:latest
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MONGO_URI
valueFrom:
secretKeyRef:
name: logmower-mongodb-application-readwrite
key: connectionString.standard
ports:
- containerPort: 8000
name: metrics
securityContext:
readOnlyRootFilesystem: true
command:
- /app/log_shipper.py
- --parse-json
- --normalize-log-level
- --stream-to-log-level
- --merge-top-level
- --max-collection-size
- "10000000000"
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: etcmachineid
mountPath: /etc/machine-id
readOnly: true
- name: varlog
mountPath: /var/log
readOnly: true
volumes:
- name: etcmachineid
hostPath:
path: /etc/machine-id
- name: varlog
hostPath:
path: /var/log
tolerations:
- operator: "Exists"
effect: "NoSchedule"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: logging-logmower-shipper
subjects:
- kind: ServiceAccount
name: logmower-shipper
namespace: logmower
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: logmower-shipper
labels:
app: logmower-shipper
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-shipper
spec:
podSelector:
matchLabels:
app: logmower-shipper
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: prometheus-operator
podSelector:
matchLabels:
app.kubernetes.io/name: prometheus
egress:
- to:
- podSelector:
matchLabels:
app: logmower-mongodb-svc
ports:
- port: 27017
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-eventsource
spec:
podSelector:
matchLabels:
app: logmower-eventsource
policyTypes:
- Ingress
- Egress
egress:
- to:
- podSelector:
matchLabels:
app: logmower-mongodb-svc
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-frontend
spec:
podSelector:
matchLabels:
app: logmower-frontend
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: logmower-shipper
spec:
selector:
matchLabels:
app: logmower-shipper
podMetricsEndpoints:
- port: metrics
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: logmower-shipper
spec:
groups:
- name: logmower-shipper
rules:
- alert: LogmowerSingleInsertionErrors
annotations:
summary: Logmower shipper is having issues submitting log records
to database
expr: rate(logmower_insertion_error_count_total[30m]) > 0
for: 0m
labels:
severity: warning
- alert: LogmowerBulkInsertionErrors
annotations:
summary: Logmower shipper is having issues submitting log records
to database
expr: rate(logmower_bulk_insertion_error_count_total[30m]) > 0
for: 0m
labels:
severity: warning
- alert: LogmowerHighDatabaseLatency
annotations:
summary: Database operations are slow
expr: histogram_quantile(0.95, logmower_database_operation_latency_bucket) > 10
for: 1m
labels:
severity: warning
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: logmower
annotations:
kubernetes.io/ingress.class: traefik
cert-manager.io/cluster-issuer: default
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: traefik-sso@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: log.k-space.ee
http:
paths:
- pathType: Prefix
path: "/events"
backend:
service:
name: logmower-eventsource
port:
number: 3002
- pathType: Prefix
path: "/"
backend:
service:
name: logmower-frontend
port:
number: 8080
tls:
- hosts:
- "*.k-space.ee"
---
apiVersion: v1
kind: Service
metadata:
name: logmower-eventsource
spec:
type: ClusterIP
selector:
app: logmower-eventsource
ports:
- protocol: TCP
port: 3002
---
apiVersion: v1
kind: Service
metadata:
name: logmower-frontend
spec:
type: ClusterIP
selector:
app: logmower-frontend
ports:
- protocol: TCP
port: 8080
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logmower-frontend
spec:
selector:
matchLabels:
app: logmower-frontend
template:
metadata:
labels:
app: logmower-frontend
spec:
containers:
- name: logmower-frontend
image: harbor.k-space.ee/k-space/logmower-frontend
ports:
- containerPort: 8080
name: http
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
resources:
limits:
memory: 50Mi
requests:
cpu: 1m
memory: 20Mi
volumeMounts:
- name : nginx-cache
mountPath: /var/cache/nginx/
- name : nginx-config
mountPath: /var/config/nginx/
- name: var-run
mountPath: /var/run/
volumes:
- emptyDir: {}
name: nginx-cache
- emptyDir: {}
name: nginx-config
- emptyDir: {}
name: var-run
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logmower-eventsource
spec:
selector:
matchLabels:
app: logmower-eventsource
template:
metadata:
labels:
app: logmower-eventsource
spec:
containers:
- name: logmower-eventsource
image: harbor.k-space.ee/k-space/logmower-eventsource
ports:
- containerPort: 3002
name: nodejs
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
resources:
limits:
cpu: 500m
memory: 200Mi
requests:
cpu: 10m
memory: 100Mi
env:
- name: MONGODB_HOST
valueFrom:
secretKeyRef:
name: logmower-mongodb-application-readonly
key: connectionString.standard
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-mongodb
spec:
podSelector:
matchLabels:
app: logmower-mongodb-svc
policyTypes:
- Ingress
- Egress
ingress:
- from:
- podSelector: {}
ports:
- port: 27017
egress:
- to:
- podSelector:
matchLabels:
app: logmower-mongodb-svc
ports:
- port: 27017

View File

@ -1 +0,0 @@
../mongodb-operator/mongodb-support.yml

View File

@ -1,47 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: logmower-mongoexpress
spec:
revisionHistoryLimit: 0
replicas: 1
selector:
matchLabels:
app: logmower-mongoexpress
template:
metadata:
labels:
app: logmower-mongoexpress
spec:
containers:
- name: mongoexpress
image: mongo-express
ports:
- name: mongoexpress
containerPort: 8081
env:
- name: ME_CONFIG_MONGODB_URL
valueFrom:
secretKeyRef:
name: logmower-mongodb-application-readonly
key: connectionString.standard
- name: ME_CONFIG_MONGODB_ENABLE_ADMIN
value: "true"
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-mongoexpress
spec:
podSelector:
matchLabels:
app: logmower-mongoexpress
policyTypes:
- Egress
egress:
- to:
- podSelector:
matchLabels:
app: logmower-mongodb-svc
ports:
- port: 27017

View File

@ -1 +0,0 @@
../shared/networkpolicy-base.yml

View File

@ -1,8 +1,8 @@
# Longhorn distributed block storage system # Longhorn distributed block storage system
The manifest was fetched from The manifest was fetched from
https://raw.githubusercontent.com/longhorn/longhorn/v1.4.0/deploy/longhorn.yaml https://raw.githubusercontent.com/longhorn/longhorn/v1.2.4/deploy/longhorn.yaml
and then heavily modified as per `changes.diff` and then heavily modified.
To deploy Longhorn use following: To deploy Longhorn use following:

File diff suppressed because it is too large Load Diff

View File

@ -1,92 +0,0 @@
--- ref 2023-02-20 11:15:07.340650467 +0200
+++ application.yml 2023-02-19 18:38:05.059234209 +0200
@@ -60,14 +60,14 @@
storageclass.kubernetes.io/is-default-class: "true"
provisioner: driver.longhorn.io
allowVolumeExpansion: true
- reclaimPolicy: "Delete"
+ reclaimPolicy: "Retain"
volumeBindingMode: Immediate
parameters:
- numberOfReplicas: "3"
+ numberOfReplicas: "2"
staleReplicaTimeout: "30"
fromBackup: ""
- fsType: "ext4"
- dataLocality: "disabled"
+ fsType: "xfs"
+ dataLocality: "best-effort"
---
# Source: longhorn/templates/crds.yaml
apiVersion: apiextensions.k8s.io/v1
@@ -3869,6 +3869,11 @@
app.kubernetes.io/version: v1.4.0
app: longhorn-manager
spec:
+ tolerations:
+ - key: dedicated
+ operator: Equal
+ value: storage
+ effect: NoSchedule
initContainers:
- name: wait-longhorn-admission-webhook
image: longhornio/longhorn-manager:v1.4.0
@@ -3968,6 +3973,10 @@
app.kubernetes.io/version: v1.4.0
app: longhorn-driver-deployer
spec:
+ tolerations:
+ - key: dedicated
+ operator: Equal
+ value: storage
initContainers:
- name: wait-longhorn-manager
image: longhornio/longhorn-manager:v1.4.0
@@ -4037,6 +4046,11 @@
app.kubernetes.io/version: v1.4.0
app: longhorn-recovery-backend
spec:
+ tolerations:
+ - key: dedicated
+ operator: Equal
+ value: storage
+ effect: NoSchedule
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
@@ -4103,6 +4117,11 @@
app.kubernetes.io/version: v1.4.0
app: longhorn-ui
spec:
+ tolerations:
+ - key: dedicated
+ operator: Equal
+ value: storage
+ effect: NoSchedule
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
@@ -4166,6 +4185,11 @@
app.kubernetes.io/version: v1.4.0
app: longhorn-conversion-webhook
spec:
+ tolerations:
+ - key: dedicated
+ operator: Equal
+ value: storage
+ effect: NoSchedule
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
@@ -4226,6 +4250,11 @@
app.kubernetes.io/version: v1.4.0
app: longhorn-admission-webhook
spec:
+ tolerations:
+ - key: dedicated
+ operator: Equal
+ value: storage
+ effect: NoSchedule
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:

View File

@ -1,158 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: doorboy-proxy
annotations:
keel.sh/policy: force
keel.sh/trigger: poll
spec:
revisionHistoryLimit: 0
replicas: 3
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: doorboy-proxy
template:
metadata:
labels: *selectorLabels
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- doorboy-proxy
topologyKey: kubernetes.io/hostname
weight: 100
containers:
- name: doorboy-proxy
image: harbor.k-space.ee/k-space/doorboy-proxy:latest
envFrom:
- secretRef:
name: doorboy-api
env:
- name: MONGO_URI
valueFrom:
secretKeyRef:
name: mongo-application-readwrite
key: connectionString.standard
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
ports:
- containerPort: 5000
name: "http"
resources:
requests:
memory: "200Mi"
cpu: "100m"
limits:
memory: "500Mi"
cpu: "1"
---
apiVersion: v1
kind: Service
metadata:
name: doorboy-proxy
spec:
selector:
app.kubernetes.io/name: doorboy-proxy
ports:
- protocol: TCP
name: http
port: 5000
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: doorboy-proxy
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: doorboy-proxy.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: doorboy-proxy
port:
name: http
tls:
- hosts:
- "*.k-space.ee"
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: doorboy-proxy
spec:
selector:
matchLabels:
app.kubernetes.io/name: doorboy-proxy
podMetricsEndpoints:
- port: http
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kdoorpi
spec:
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: kdoorpi
template:
metadata:
labels: *selectorLabels
spec:
containers:
- name: kdoorpi
image: harbor.k-space.ee/k-space/kdoorpi:latest
env:
- name: KDOORPI_API_ALLOWED
value: https://doorboy-proxy.k-space.ee/allowed
- name: KDOORPI_API_LONGPOLL
value: https://doorboy-proxy.k-space.ee/longpoll
- name: KDOORPI_API_SWIPE
value: http://172.21.99.98/swipe
- name: KDOORPI_DOOR
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: KDOORPI_API_KEY
valueFrom:
secretKeyRef:
name: doorboy-api
key: DOORBOY_SECRET
- name: KDOORPI_UID_SALT
valueFrom:
secretKeyRef:
name: doorboy-uid-hash-salt
key: KDOORPI_UID_SALT
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
nodeSelector:
dedicated: door
tolerations:
- key: dedicated
operator: Equal
value: door
effect: NoSchedule
- key: arch
operator: Equal
value: arm64
effect: NoSchedule

11
meta-operator/README.md Normal file
View File

@ -0,0 +1,11 @@
# meta-operator
Meta operator enables creating operators without building any binaries or
Docker images.
For example operator declaration see `keydb.yml`
```
kubectl create namespace meta-operator
kubectl apply -f application.yml -f keydb.yml
```

View File

@ -0,0 +1,220 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusteroperators.codemowers.io
spec:
group: codemowers.io
names:
plural: clusteroperators
singular: clusteroperator
kind: ClusterOperator
shortNames:
- clusteroperator
scope: Cluster
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
resource:
type: object
properties:
group:
type: string
version:
type: string
plural:
type: string
secret:
type: object
properties:
name:
type: string
enabled:
type: boolean
structure:
type: array
items:
type: object
properties:
key:
type: string
value:
type: string
services:
type: array
items:
type: object
x-kubernetes-preserve-unknown-fields: true
deployments:
type: array
items:
type: object
x-kubernetes-preserve-unknown-fields: true
statefulsets:
type: array
items:
type: object
x-kubernetes-preserve-unknown-fields: true
configmaps:
type: array
items:
type: object
x-kubernetes-preserve-unknown-fields: true
customresources:
type: array
items:
type: object
x-kubernetes-preserve-unknown-fields: true
required: ["spec"]
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: meta-operator
namespace: meta-operator
labels:
app.kubernetes.io/name: meta-operator
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: meta-operator
template:
metadata:
labels:
app.kubernetes.io/name: meta-operator
spec:
serviceAccountName: meta-operator
containers:
- name: meta-operator
image: harbor.k-space.ee/k-space/meta-operator
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
env:
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
---
apiVersion: codemowers.io/v1alpha1
kind: ClusterOperator
metadata:
name: meta
spec:
resource:
group: codemowers.io
version: v1alpha1
plural: clusteroperators
secret:
enabled: false
deployments:
- apiVersion: apps/v1
kind: Deployment
metadata:
name: foobar-operator
labels:
app.kubernetes.io/name: foobar-operator
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: foobar-operator
template:
metadata:
labels:
app.kubernetes.io/name: foobar-operator
spec:
serviceAccountName: meta-operator
containers:
- name: meta-operator
image: harbor.k-space.ee/k-space/meta-operator
command:
- /meta-operator.py
- --target
- foobar
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
env:
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: meta-operator
rules:
- apiGroups:
- ""
resources:
- secrets
- configmaps
- services
verbs:
- create
- get
- patch
- update
- delete
- list
- apiGroups:
- apps
resources:
- deployments
- statefulsets
verbs:
- create
- delete
- list
- update
- patch
- apiGroups:
- codemowers.io
resources:
- bindzones
- clusteroperators
- keydbs
verbs:
- get
- list
- watch
- apiGroups:
- k-space.ee
resources:
- cams
verbs:
- get
- list
- watch
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: meta-operator
namespace: meta-operator
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: meta-operator
subjects:
- kind: ServiceAccount
name: meta-operator
namespace: meta-operator
roleRef:
kind: ClusterRole
name: meta-operator
apiGroup: rbac.authorization.k8s.io

253
meta-operator/keydb.yml Normal file
View File

@ -0,0 +1,253 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: keydbs.codemowers.io
spec:
group: codemowers.io
names:
plural: keydbs
singular: keydb
kind: KeyDBCluster
shortNames:
- keydb
scope: Namespaced
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
replicas:
type: integer
description: Replica count
required: ["spec"]
---
apiVersion: codemowers.io/v1alpha1
kind: ClusterOperator
metadata:
name: keydb
spec:
resource:
group: codemowers.io
version: v1alpha1
plural: keydbs
secret:
enabled: true
name: foobar-secrets
structure:
- key: REDIS_PASSWORD
value: "%s"
- key: REDIS_URI
value: "redis://:%s@foobar"
configmaps:
- apiVersion: v1
kind: ConfigMap
metadata:
name: foobar-scripts
labels:
app.kubernetes.io/name: foobar
data:
entrypoint.sh: |
#!/bin/bash
set -euxo pipefail
host="$(hostname)"
port="6379"
replicas=()
for node in {0..2}; do
if [ "${host}" != "redis-${node}" ]; then
replicas+=("--replicaof redis-${node}.redis-headless ${port}")
fi
done
exec keydb-server /etc/keydb/redis.conf \
--active-replica "yes" \
--multi-master "yes" \
--appendonly "no" \
--bind "0.0.0.0" \
--port "${port}" \
--protected-mode "no" \
--server-threads "2" \
--masterauth "${REDIS_PASSWORD}" \
--requirepass "${REDIS_PASSWORD}" \
"${replicas[@]}"
ping_readiness_local.sh: |-
#!/bin/bash
set -e
[[ -n "${REDIS_PASSWORD}" ]] && export REDISCLI_AUTH="${REDIS_PASSWORD}"
response="$(
timeout -s 3 "${1}" \
keydb-cli \
-h localhost \
-p 6379 \
ping
)"
if [ "${response}" != "PONG" ]; then
echo "${response}"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
set -e
[[ -n "${REDIS_PASSWORD}" ]] && export REDISCLI_AUTH="${REDIS_PASSWORD}"
response="$(
timeout -s 3 "${1}" \
keydb-cli \
-h localhost \
-p 6379 \
ping
)"
if [ "${response}" != "PONG" ] && [[ ! "${response}" =~ ^.*LOADING.*$ ]]; then
echo "${response}"
exit 1
fi
cleanup_tempfiles.sh: |-
#!/bin/bash
set -e
find /data/ -type f \( -name "temp-*.aof" -o -name "temp-*.rdb" \) -mmin +60 -delete
services:
- apiVersion: v1
kind: Service
metadata:
name: foobar-headless
labels:
app.kubernetes.io/name: foobar
spec:
type: ClusterIP
clusterIP: None
ports:
- name: redis
port: 6379
protocol: TCP
targetPort: redis
selector:
app.kubernetes.io/name: foobar
- apiVersion: v1
kind: Service
metadata:
name: foobar
labels:
app.kubernetes.io/name: foobar
annotations:
{}
spec:
type: ClusterIP
ports:
- name: redis
port: 6379
protocol: TCP
targetPort: redis
- name: exporter
port: 9121
protocol: TCP
targetPort: exporter
selector:
app.kubernetes.io/name: foobar
sessionAffinity: ClientIP
statefulsets:
- apiVersion: apps/v1
kind: StatefulSet
metadata:
name: foobar
labels:
app.kubernetes.io/name: foobar
spec:
replicas: 3
serviceName: foobar-headless
selector:
matchLabels:
app.kubernetes.io/name: foobar
template:
metadata:
labels:
app.kubernetes.io/name: foobar
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- 'foobar'
topologyKey: kubernetes.io/hostname
weight: 100
containers:
- name: redis
image: eqalpha/keydb:x86_64_v6.3.1
imagePullPolicy: Always
command:
- /scripts/entrypoint.sh
ports:
- name: redis
containerPort: 6379
protocol: TCP
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /scripts/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /scripts/ping_readiness_local.sh 1
startupProbe:
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 2
failureThreshold: 24
exec:
command:
- sh
- -c
- /scripts/ping_readiness_local.sh 1
resources:
{}
securityContext:
{}
volumeMounts:
- name: foobar-scripts
mountPath: /scripts
- name: foobar-data
mountPath: /data
envFrom:
- secretRef:
name: foobar-secrets
- name: exporter
image: quay.io/oliver006/redis_exporter
ports:
- name: exporter
containerPort: 9121
envFrom:
- secretRef:
name: foobar-secrets
securityContext:
{}
volumes:
- name: foobar-scripts
configMap:
name: foobar-scripts
defaultMode: 0755
- name: foobar-data
emptyDir: {}

View File

@ -1,9 +0,0 @@
# Nyancat server deployment
Something silly for a change.
To connect use:
```
telnet nyancat.k-space.ee
```

View File

@ -1,49 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nyancat
namespace: nyancat
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/name: nyancat
template:
metadata:
labels:
app.kubernetes.io/name: nyancat
spec:
containers:
- name: nyancat
image: harbor.k-space.ee/k-space/nyancat-server:latest
command:
- onenetd
- -v1
- "0"
- "2323"
- nyancat
- -I
- --telnet
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65534
---
apiVersion: v1
kind: Service
metadata:
name: nyancat
namespace: nyancat
annotations:
metallb.universe.tf/address-pool: eenet
external-dns.alpha.kubernetes.io/hostname: nyancat.k-space.ee
spec:
type: LoadBalancer
externalTrafficPolicy: Local
selector:
app.kubernetes.io/name: nyancat
ports:
- protocol: TCP
port: 23
targetPort: 2323

View File

@ -1,11 +0,0 @@
# Raw file based local PV-s
We currently only use `rawfile-localpv` portion of OpenEBS.
The manifests were rendered using Helm template from https://github.com/openebs/rawfile-localpv
and subsequently modified
```
kubectl create namespace openebs
kubectl apply -n openebs -f rawfile.yaml
```

View File

@ -1,404 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rawfile-csi-driver
namespace: openebs
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rawfile-csi-provisioner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csistoragecapacities"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get"]
- apiGroups: ["apps"]
resources: ["daemonsets"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rawfile-csi-broker
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rawfile-csi-resizer
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rawfile-csi-provisioner
subjects:
- kind: ServiceAccount
name: rawfile-csi-driver
namespace: openebs
roleRef:
kind: ClusterRole
name: rawfile-csi-provisioner
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rawfile-csi-broker
subjects:
- kind: ServiceAccount
name: rawfile-csi-driver
namespace: openebs
roleRef:
kind: ClusterRole
name: rawfile-csi-broker
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rawfile-csi-resizer
subjects:
- kind: ServiceAccount
name: rawfile-csi-driver
namespace: openebs
roleRef:
kind: ClusterRole
name: rawfile-csi-resizer
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: rawfile-csi-controller
namespace: openebs
labels:
app.kubernetes.io/name: rawfile-csi
component: controller
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: rawfile-csi
component: controller
clusterIP: None
---
apiVersion: v1
kind: Service
metadata:
name: rawfile-csi-node
namespace: openebs
labels:
app.kubernetes.io/name: rawfile-csi
component: node
spec:
type: ClusterIP
ports:
- name: metrics
port: 9100
targetPort: metrics
protocol: TCP
selector:
app.kubernetes.io/name: rawfile-csi
component: node
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: rawfile-csi-node
namespace: openebs
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: "100%"
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: rawfile-csi
component: node
template:
metadata:
labels: *selectorLabels
spec:
serviceAccount: rawfile-csi-driver
priorityClassName: system-node-critical
tolerations:
- operator: "Exists"
volumes:
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/rawfile-csi
type: DirectoryOrCreate
- name: mountpoint-dir
hostPath:
path: /var/lib/kubelet
type: DirectoryOrCreate
- name: data-dir
hostPath:
path: /var/csi/rawfile
type: DirectoryOrCreate
containers:
- name: csi-driver
image: "harbor.k-space.ee/k-space/rawfile-localpv:latest"
imagePullPolicy: Always
securityContext:
privileged: true
env:
- name: PROVISIONER_NAME
value: "rawfile.csi.openebs.io"
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: IMAGE_REPOSITORY
value: "harbor.k-space.ee/k-space/rawfile-localpv"
- name: IMAGE_TAG
value: "latest"
- name: NODE_ID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
ports:
- name: metrics
containerPort: 9100
- name: csi-probe
containerPort: 9808
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: mountpoint-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- name: data-dir
mountPath: /data
resources:
limits:
cpu: 1
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
- name: node-driver-registrar
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0
imagePullPolicy: IfNotPresent
args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --health-port=9809
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/rawfile-csi/csi.sock
ports:
- containerPort: 9809
name: healthz
livenessProbe:
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 5
timeoutSeconds: 5
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
resources:
limits:
cpu: 500m
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
- name: external-provisioner
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2
imagePullPolicy: IfNotPresent
args:
- "--csi-address=$(ADDRESS)"
- "--feature-gates=Topology=true"
- "--strict-topology"
- "--immediate-topology=false"
- "--timeout=120s"
- "--enable-capacity=true"
- "--capacity-ownerref-level=1" # DaemonSet
- "--node-deployment=true"
env:
- name: ADDRESS
value: /csi/csi.sock
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
volumeMounts:
- name: socket-dir
mountPath: /csi
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: rawfile-csi-controller
namespace: openebs
spec:
replicas: 1
serviceName: rawfile-csi
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: rawfile-csi
component: controller
template:
metadata:
labels: *selectorLabels
spec:
serviceAccount: rawfile-csi-driver
priorityClassName: system-cluster-critical
tolerations:
- key: "node-role.kubernetes.io/master"
operator: Equal
value: "true"
effect: NoSchedule
volumes:
- name: socket-dir
emptyDir: {}
containers:
- name: csi-driver
image: "harbor.k-space.ee/k-space/rawfile-localpv"
imagePullPolicy: Always
args:
- csi-driver
- --disable-metrics
env:
- name: PROVISIONER_NAME
value: "rawfile.csi.openebs.io"
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: IMAGE_REPOSITORY
value: "harbor.k-space.ee/k-space/rawfile-localpv"
- name: IMAGE_TAG
value: "latest"
volumeMounts:
- name: socket-dir
mountPath: /csi
ports:
- name: csi-probe
containerPort: 9808
resources:
limits:
cpu: 1
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
- name: external-resizer
image: k8s.gcr.io/sig-storage/csi-resizer:v1.4.0
imagePullPolicy: IfNotPresent
args:
- "--csi-address=$(ADDRESS)"
- "--handle-volume-inuse-error=false"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: rawfile.csi.openebs.io
spec:
attachRequired: false
podInfoOnMount: true
fsGroupPolicy: File
storageCapacity: true
volumeLifecycleModes:
- Persistent
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rawfile-ext4
provisioner: rawfile.csi.openebs.io
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
fsType: "ext4"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rawfile-xfs
provisioner: rawfile.csi.openebs.io
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
fsType: "xfs"

View File

@ -1 +0,0 @@
bundle.yml

View File

@ -1,7 +1,7 @@
# Prometheus operator # Prometheus operator
``` ```
curl -L https://github.com/prometheus-operator/prometheus-operator/releases/download/v0.61.1/bundle.yaml | sed -e 's/namespace: default/namespace: prometheus-operator/g' > bundle.yml curl -L https://github.com/prometheus-operator/prometheus-operator/releases/download/v0.59.0/bundle.yaml | sed -e 's/namespace: default/namespace: prometheus-operator/g' > bundle.yml
kubectl create namespace prometheus-operator kubectl create namespace prometheus-operator
kubectl apply --server-side -n prometheus-operator -f bundle.yml kubectl apply --server-side -n prometheus-operator -f bundle.yml
kubectl delete -n prometheus-operator configmap snmp-exporter kubectl delete -n prometheus-operator configmap snmp-exporter

View File

@ -7,14 +7,7 @@ metadata:
app.kubernetes.io/name: alertmanager app.kubernetes.io/name: alertmanager
spec: spec:
route: route:
routes: receiver: 'slack-notifications'
- continue: false
receiver: slack-notifications
matchers:
- matchType: "="
name: severity
value: critical
receiver: 'null'
receivers: receivers:
- name: 'slack-notifications' - name: 'slack-notifications'
slackConfigs: slackConfigs:
@ -40,12 +33,9 @@ kind: Alertmanager
metadata: metadata:
name: alertmanager name: alertmanager
spec: spec:
alertmanagerConfigMatcherStrategy: alertmanagerConfigSelector:
type: None matchLabels:
alertmanagerConfigNamespaceSelector: {} app.kubernetes.io/name: alertmanager
alertmanagerConfigSelector: {}
alertmanagerConfiguration:
name: alertmanager
secrets: secrets:
- slack-secrets - slack-secrets
nodeSelector: nodeSelector:
@ -104,7 +94,7 @@ spec:
probeSelector: {} probeSelector: {}
ruleNamespaceSelector: {} ruleNamespaceSelector: {}
ruleSelector: {} ruleSelector: {}
retentionSize: 8GB retentionSize: 80GB
storage: storage:
volumeClaimTemplate: volumeClaimTemplate:
spec: spec:
@ -112,7 +102,7 @@ spec:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 10Gi storage: 100Gi
storageClassName: local-path storageClassName: local-path
--- ---
apiVersion: v1 apiVersion: v1

28816
prometheus-operator/bundle.yml Normal file

File diff suppressed because it is too large Load Diff

View File

@ -87,13 +87,7 @@ spec:
affinity: affinity:
podAntiAffinity: podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution: requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector: - topologyKey: "kubernetes.io/hostname"
matchExpressions:
- key: app
operator: In
values:
- mikrotik-exporter
topologyKey: "kubernetes.io/hostname"
--- ---
kind: Service kind: Service
apiVersion: v1 apiVersion: v1

View File

@ -4,13 +4,11 @@ kind: Probe
metadata: metadata:
name: nodes-proxmox name: nodes-proxmox
spec: spec:
scrapeTimeout: 30s
targets: targets:
staticConfig: staticConfig:
static: static:
- nas.mgmt.k-space.ee:9100 - nas.mgmt.k-space.ee:9100
- pve1.proxmox.infra.k-space.ee:9100 - pve1.proxmox.infra.k-space.ee:9100
- pve2.proxmox.infra.k-space.ee:9100
- pve8.proxmox.infra.k-space.ee:9100 - pve8.proxmox.infra.k-space.ee:9100
- pve9.proxmox.infra.k-space.ee:9100 - pve9.proxmox.infra.k-space.ee:9100
relabelingConfigs: relabelingConfigs:
@ -88,37 +86,37 @@ spec:
summary: Host memory under memory pressure (instance {{ $labels.instance }}) summary: Host memory under memory pressure (instance {{ $labels.instance }})
description: The node is under heavy memory pressure. High rate of major page faults description: The node is under heavy memory pressure. High rate of major page faults
- alert: HostUnusualNetworkThroughputIn - alert: HostUnusualNetworkThroughputIn
expr: sum by (instance) (rate(node_network_receive_bytes_total[2m])) > 800e+06 expr: sum by (instance) (rate(node_network_receive_bytes_total[2m])) > 160e+06
for: 1h for: 1h
labels: labels:
severity: warning severity: warning
annotations: annotations:
summary: Host unusual network throughput in (instance {{ $labels.instance }}) summary: Host unusual network throughput in (instance {{ $labels.instance }})
description: Host network interfaces are probably receiving too much data (> 800 MB/s) description: Host network interfaces are probably receiving too much data (> 160 MB/s)
- alert: HostUnusualNetworkThroughputOut - alert: HostUnusualNetworkThroughputOut
expr: sum by (instance) (rate(node_network_transmit_bytes_total[2m])) > 800e+06 expr: sum by (instance) (rate(node_network_transmit_bytes_total[2m])) > 160e+06
for: 1h for: 1h
labels: labels:
severity: warning severity: warning
annotations: annotations:
summary: Host unusual network throughput out (instance {{ $labels.instance }}) summary: Host unusual network throughput out (instance {{ $labels.instance }})
description: Host network interfaces are probably sending too much data (> 800 MB/s) description: Host network interfaces are probably sending too much data (> 160 MB/s)
- alert: HostUnusualDiskReadRate - alert: HostUnusualDiskReadRate
expr: sum by (instance) (rate(node_disk_read_bytes_total[2m])) > 500e+06 expr: sum by (instance) (rate(node_disk_read_bytes_total[2m])) > 50000000
for: 1h for: 1h
labels: labels:
severity: warning severity: warning
annotations: annotations:
summary: Host unusual disk read rate (instance {{ $labels.instance }}) summary: Host unusual disk read rate (instance {{ $labels.instance }})
description: Disk is probably reading too much data (> 500 MB/s) description: Disk is probably reading too much data (> 50 MB/s)
- alert: HostUnusualDiskWriteRate - alert: HostUnusualDiskWriteRate
expr: sum by (instance) (rate(node_disk_written_bytes_total[2m])) > 500e+06 expr: sum by (instance) (rate(node_disk_written_bytes_total[2m])) > 50000000
for: 1h for: 1h
labels: labels:
severity: warning severity: warning
annotations: annotations:
summary: Host unusual disk write rate (instance {{ $labels.instance }}) summary: Host unusual disk write rate (instance {{ $labels.instance }})
description: Disk is probably writing too much data (> 500 MB/s) description: Disk is probably writing too much data (> 50 MB/s)
# Please add ignored mountpoints in node_exporter parameters like # Please add ignored mountpoints in node_exporter parameters like
# "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|run)($|/)". # "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|run)($|/)".
# Same rule using "node_filesystem_free_bytes" will fire when disk fills for non-root users. # Same rule using "node_filesystem_free_bytes" will fire when disk fills for non-root users.
@ -363,13 +361,11 @@ kind: PodMonitor
metadata: metadata:
name: node-exporter name: node-exporter
spec: spec:
selector: selector:
matchLabels: matchLabels:
app: node-exporter app: node-exporter
podMetricsEndpoints: podMetricsEndpoints:
- port: web - port: web
scrapeTimeout: 30s
relabelings: relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name] - sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: node targetLabel: node
@ -406,10 +402,9 @@ spec:
- --path.rootfs=/host/root - --path.rootfs=/host/root
- --no-collector.wifi - --no-collector.wifi
- --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/)
- --collector.netclass.ignored-devices=^(veth|cali|vxlan|cni|vnet|tap|lo|wg) - --collector.netclass.ignored-devices=^(veth.*|[a-f0-9]{15})$
- --collector.netdev.device-exclude=^(veth|cali|vxlan|cni|vnet|tap|lo|wg) - --collector.netdev.device-exclude=^(veth.*|[a-f0-9]{15})$
- --collector.diskstats.ignored-devices=^(sr[0-9][0-9]*)$ image: prom/node-exporter:v1.3.1
image: prom/node-exporter:v1.5.0
resources: resources:
limits: limits:
cpu: 50m cpu: 50m

View File

@ -1,55 +0,0 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: mongo
provisioner: rawfile.csi.openebs.io
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
fsType: "xfs"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: minio
provisioner: rawfile.csi.openebs.io
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
fsType: "xfs"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: prometheus
provisioner: rawfile.csi.openebs.io
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
fsType: "xfs"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: postgres
provisioner: rawfile.csi.openebs.io
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
fsType: "xfs"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: mysql
provisioner: rawfile.csi.openebs.io
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
fsType: "xfs"

View File

@ -805,6 +805,11 @@ spec:
node appears to use the IP of the ingress node; this requires a node appears to use the IP of the ingress node; this requires a
permissive L2 network. [Default: Tunnel]' permissive L2 network. [Default: Tunnel]'
type: string type: string
bpfHostConntrackBypass:
description: 'BPFHostConntrackBypass Controls whether to bypass Linux
conntrack in BPF mode for workloads and services. [Default: true
- bypass Linux conntrack]'
type: boolean
bpfKubeProxyEndpointSlicesEnabled: bpfKubeProxyEndpointSlicesEnabled:
description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls
whether Felix's embedded kube-proxy accepts EndpointSlices or not. whether Felix's embedded kube-proxy accepts EndpointSlices or not.
@ -1337,8 +1342,8 @@ spec:
type: boolean type: boolean
vxlanEnabled: vxlanEnabled:
description: 'VXLANEnabled overrides whether Felix should create the description: 'VXLANEnabled overrides whether Felix should create the
VXLAN tunnel device for VXLAN networking. Optional as Felix determines VXLAN tunnel device for IPv4 VXLAN networking. Optional as Felix
this based on the existing IP pools. [Default: nil (unset)]' determines this based on the existing IP pools. [Default: nil (unset)]'
type: boolean type: boolean
vxlanMTU: vxlanMTU:
description: 'VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel description: 'VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel
@ -2771,7 +2776,7 @@ spec:
for internal use only.' for internal use only.'
type: boolean type: boolean
natOutgoing: natOutgoing:
description: When nat-outgoing is true, packets sent from Calico networked description: When natOutgoing is true, packets sent from Calico networked
containers in this pool to destinations outside of this pool will containers in this pool to destinations outside of this pool will
be masqueraded. be masqueraded.
type: boolean type: boolean
@ -9646,6 +9651,14 @@ spec:
type: string type: string
type: object type: object
type: array type: array
fipsMode:
description: 'FIPSMode uses images and features only that are using
FIPS 140-2 validated cryptographic modules and standards. Default:
Disabled'
enum:
- Enabled
- Disabled
type: string
flexVolumePath: flexVolumePath:
description: FlexVolumePath optionally specifies a custom path for description: FlexVolumePath optionally specifies a custom path for
FlexVolume. If not specified, FlexVolume will be enabled by default. FlexVolume. If not specified, FlexVolume will be enabled by default.
@ -9689,7 +9702,7 @@ spec:
kubeletVolumePluginPath: kubeletVolumePluginPath:
description: 'KubeletVolumePluginPath optionally specifies enablement description: 'KubeletVolumePluginPath optionally specifies enablement
of Calico CSI plugin. If not specified, CSI will be enabled by default. of Calico CSI plugin. If not specified, CSI will be enabled by default.
If set to "None", CSI will be disabled. Default: /var/lib/kubelet' If set to ''None'', CSI will be disabled. Default: /var/lib/kubelet'
type: string type: string
kubernetesProvider: kubernetesProvider:
description: KubernetesProvider specifies a particular provider of description: KubernetesProvider specifies a particular provider of
@ -15798,6 +15811,14 @@ spec:
type: string type: string
type: object type: object
type: array type: array
fipsMode:
description: 'FIPSMode uses images and features only that are
using FIPS 140-2 validated cryptographic modules and standards.
Default: Disabled'
enum:
- Enabled
- Disabled
type: string
flexVolumePath: flexVolumePath:
description: FlexVolumePath optionally specifies a custom path description: FlexVolumePath optionally specifies a custom path
for FlexVolume. If not specified, FlexVolume will be enabled for FlexVolume. If not specified, FlexVolume will be enabled
@ -15840,8 +15861,9 @@ spec:
type: array type: array
kubeletVolumePluginPath: kubeletVolumePluginPath:
description: 'KubeletVolumePluginPath optionally specifies enablement description: 'KubeletVolumePluginPath optionally specifies enablement
of Calico CSI plugin. If not specified, CSI will be enabled by default. of Calico CSI plugin. If not specified, CSI will be enabled
If set to "None", CSI will be disabled. Default: /var/lib/kubelet' by default. If set to ''None'', CSI will be disabled. Default:
/var/lib/kubelet'
type: string type: string
kubernetesProvider: kubernetesProvider:
description: KubernetesProvider specifies a particular provider description: KubernetesProvider specifies a particular provider

View File

@ -1,3 +1,34 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: proxmox
spec:
entryPoints:
- https
routes:
- kind: Rule
match: Host(`pve.k-space.ee`)
priority: 10
middlewares:
- name: proxmox-redirect
- name: traefik-sso@kubernetescrd
- name: traefik-proxmox-redirect@kubernetescrd
services:
- kind: Service
name: pve1
passHostHeader: true
port: 8006
responseForwarding:
flushInterval: 1ms
scheme: https
serversTransport: proxmox-servers-transport
tls:
secretName: pve
domains:
- main: pve.k-space.ee
sans:
- "*.k-space.ee"
apiVersion: traefik.containo.us/v1alpha1 apiVersion: traefik.containo.us/v1alpha1
kind: ServersTransport kind: ServersTransport
metadata: metadata:
@ -56,98 +87,6 @@ data:
RWRmRHIzNTBpZkRCQkVuL3RvL3JUczFOVjhyOGpjcG14a2MzNjlSQXp3TmJiRVkKMVE9PQotLS0t RWRmRHIzNTBpZkRCQkVuL3RvL3JUczFOVjhyOGpjcG14a2MzNjlSQXp3TmJiRVkKMVE9PQotLS0t
LUVORCBDRVJUSUZJQ0FURS0tLS0tCg== LUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
--- ---
apiVersion: v1
kind: Service
metadata:
name: pve1
annotations:
traefik.ingress.kubernetes.io/service.serverstransport: traefik-proxmox-servers-transport@kubernetescrd
spec:
type: ExternalName
externalName: pve1.proxmox.infra.k-space.ee
ports:
- name: https
port: 8006
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: pve8
annotations:
traefik.ingress.kubernetes.io/service.serverstransport: traefik-proxmox-servers-transport@kubernetescrd
spec:
type: ExternalName
externalName: pve8.proxmox.infra.k-space.ee
ports:
- name: https
port: 8006
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: pve9
annotations:
traefik.ingress.kubernetes.io/service.serverstransport: traefik-proxmox-servers-transport@kubernetescrd
spec:
type: ExternalName
externalName: pve9.proxmox.infra.k-space.ee
ports:
- name: https
port: 8006
protocol: TCP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: pve
annotations:
kubernetes.io/ingress.class: traefik
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: traefik-sso@kubernetescrd,traefik-proxmox-redirect@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
rules:
- host: proxmox.k-space.ee
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: whoami
port:
number: 80
- host: pve.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: pve1
port:
number: 8006
- pathType: Prefix
path: "/"
backend:
service:
name: pve8
port:
number: 8006
- pathType: Prefix
path: "/"
backend:
service:
name: pve9
port:
number: 8006
tls:
- hosts:
- "*.k-space.ee"
---
apiVersion: traefik.containo.us/v1alpha1 apiVersion: traefik.containo.us/v1alpha1
kind: Middleware kind: Middleware
metadata: metadata:

View File

@ -8,10 +8,29 @@ websecure:
providers: providers:
kubernetesCRD: kubernetesCRD:
enabled: true enabled: true
namespaces:
- traefik
- authelia
kubernetesIngress: kubernetesIngress:
allowEmptyServices: true allowEmptyServices: true
allowExternalNameServices: true allowExternalNameServices: true
namespaces:
- argocd
- authelia
- camtiler
- drone
- elastic-system
- etherpad
- freescout
- grafana
- harbor
- kubernetes-dashboard
- logging
- longhorn-system
- phpmyadmin
- prometheus-operator
- wildduck
deployment: deployment:
replicas: 2 replicas: 2