Compare commits

..

50 Commits

Author SHA1 Message Date
e4dfde9562 argo docs 2 2024-12-15 06:34:47 +02:00
a82193f059 add argocd-image-updater 2024-12-15 06:28:42 +02:00
68a75b8389 migrate OIDC codemowers.io/v1alpha1 to v1beta1 2024-12-15 05:39:41 +02:00
5368fe90eb argo: add localhost callback for CLI login 2024-12-15 05:39:41 +02:00
cded6fde3f fixup argo docs 2024-12-15 05:39:41 +02:00
402ff86fde grafana: disable non-oauth login 2024-12-15 01:46:22 +02:00
272f60ab73 monitoring: mikrotik-exporter fix 2024-11-22 08:16:12 +02:00
9bcad2481b monitoring: Update node-exporter 2024-11-22 05:59:34 +02:00
c04a7b7f67 monitoring: Update mikrotik-exporter 2024-11-22 05:59:08 +02:00
c23fa07c5e monitoring: Update mikrotik-exporter 2024-11-19 15:48:31 +02:00
c1822888ec dont compile discourse assets 2024-10-25 14:44:27 +03:00
e26cac6d86 add discourse 2024-10-25 14:35:20 +03:00
d7ba4bc90e upgrade cnpg 2024-10-25 14:03:50 +03:00
da4df6c21d frigate: move storage to dedicated nfs share and offload transcoding to separate go2rtc deployment 2024-10-19 13:51:13 +03:00
2964034cd3 fix rosdump scheduling 2024-10-18 18:45:42 +03:00
ae525380b1 fix gitea oidc reg 2024-10-18 18:44:27 +03:00
4b9c3ad394 monitoring: Temporarily disable monitoring of core switches 2024-10-15 10:07:28 +03:00
dbebb39749 gitea: Bump version 2024-10-02 08:15:20 +03:00
6f15e45402 freeswitch: fix network policy 2024-10-01 22:32:16 +03:00
36bf431259 freeswitch: fix network policy 2024-10-01 20:27:08 +03:00
c14a313c57 frigate: enable recording and use openvino 2024-09-29 23:06:41 +03:00
15a2fd9375 add frigate 2024-09-29 21:34:31 +03:00
5bd6cf2317 freeswitch: add gitignore 2024-09-29 19:05:42 +03:00
407f691152 add freeswitch 2024-09-29 19:05:42 +03:00
e931f490c2 asterisk: update network policy 2024-09-29 19:05:42 +03:00
b96e8d16a6 expose harbor via traefik 2024-09-29 19:05:42 +03:00
15d4d44be7 expose traefik via ingress 2024-09-29 19:05:42 +03:00
52ce6eab0a expose harbor via traefik 2024-09-29 19:04:51 +03:00
e89d045f38 goredirect: add nopath env var 2024-09-13 21:54:49 +03:00
7e70315514 monitoring: Fix snmp-exporter 2024-09-12 22:15:10 +03:00
af5a048bcd replace ups 2024-09-12 21:54:46 +03:00
0005219f81 monitoring: Fix mikrotik-exporter formatting 2024-09-12 21:48:43 +03:00
813bb32e48 monitoring: Update UPS-es 2024-09-12 21:47:20 +03:00
0efae7baf9 unschedule harbor from storage nodes 2024-09-12 19:48:51 +03:00
be90b4e266 monitoring: Update mikrotik-exporter 2024-09-09 22:19:46 +03:00
999d17c384 rosdump: Use codemowers/git image 2024-09-09 08:45:21 +03:00
bacef8d438 remove logmower 2024-09-08 23:54:32 +03:00
60d1ba9b18 monitoring: Bump mikrotik-exporter again 2024-09-06 12:10:45 +03:00
dcb80e6638 monitoring: Bump mikrotik-exporter 2024-09-06 11:55:49 +03:00
95e0f97db2 grafana: Specify OIDC scopes on Grafana side 2024-09-05 09:32:34 +03:00
f5a7b44ae6 grafana: Add groups OIDC scope 2024-09-05 09:29:16 +03:00
be7e1d9459 grafana: Assign editor role for hackerspace members 2024-09-05 09:23:41 +03:00
cd807ebcde grafana: Allow OIDC assignment to admin role 2024-09-05 09:04:02 +03:00
eaac7f61a7 monitoring: Pin specific mikrotik-exporter image 2024-09-04 23:29:37 +03:00
a0d5a585e4 add and configure calico ippool 2024-09-04 23:12:35 +03:00
1f8f288f95 monitoring: Update Mikrotik exporter 2024-09-04 22:33:15 +03:00
9de1881647 monitoring: Enable Prometheus admin API 2024-09-04 22:28:01 +03:00
28904cdd63 make calico use ipip encapsulation 2024-09-04 22:27:36 +03:00
0df188db36 monitoring: README syntax fix 2024-09-04 07:12:56 +03:00
a42b79b5ac monitoring: Add doc.crds.dev ref 2024-09-04 07:12:21 +03:00
36 changed files with 2044 additions and 977 deletions

View File

@ -1,65 +1,8 @@
# Workflow
Most applications in our Kubernetes cluster are managed by ArgoCD. Most applications in our Kubernetes cluster are managed by ArgoCD.
Most notably operators are NOT managed by ArgoCD. Most notably operators are NOT managed by ArgoCD.
Adding to `applications/`: `kubectl apply -f newapp.yaml` ## Managing applications
Update apps (see TODO below):
# Deployment
To deploy ArgoCD:
```bash
helm repo add argo-cd https://argoproj.github.io/argo-helm
kubectl create secret -n argocd generic argocd-secret # Initialize empty secret for sessions
helm template -n argocd --release-name k6 argo-cd/argo-cd --include-crds -f values.yaml > argocd.yml
kubectl apply -f argocd.yml -f application-extras.yml -n argocd
kubectl -n argocd rollout restart deployment/k6-argocd-redis
kubectl -n argocd rollout restart deployment/k6-argocd-repo-server
kubectl -n argocd rollout restart deployment/k6-argocd-server
kubectl -n argocd rollout restart deployment/k6-argocd-notifications-controller
kubectl -n argocd rollout restart statefulset/k6-argocd-application-controller
kubectl label -n argocd secret oidc-client-argocd-owner-secrets app.kubernetes.io/part-of=argocd
```
# Setting up Git secrets
Generate SSH key to access Gitea:
```
ssh-keygen -t ecdsa -f id_ecdsa -C argocd.k-space.ee -P ''
kubectl -n argocd create secret generic gitea-kube \
--from-literal=type=git \
--from-literal=url=git@git.k-space.ee:k-space/kube \
--from-file=sshPrivateKey=id_ecdsa
kubectl -n argocd create secret generic gitea-kube-staging \
--from-literal=type=git \
--from-literal=url=git@git.k-space.ee:k-space/kube-staging \
--from-file=sshPrivateKey=id_ecdsa
kubectl -n argocd create secret generic gitea-kube-members \
--from-literal=type=git \
--from-literal=url=git@git.k-space.ee:k-space/kube-members \
--from-file=sshPrivateKey=id_ecdsa
kubectl -n argocd create secret generic gitea-members \
--from-literal=type=git \
--from-literal=url=git@git.k-space.ee:k-space/kube-members \
--from-file=sshPrivateKey=id_ecdsa
kubectl label -n argocd secret gitea-kube argocd.argoproj.io/secret-type=repository
kubectl label -n argocd secret gitea-kube-staging argocd.argoproj.io/secret-type=repository
kubectl label -n argocd secret gitea-kube-members argocd.argoproj.io/secret-type=repository
kubectl label -n argocd secret gitea-members argocd.argoproj.io/secret-type=repository
rm -fv id_ecdsa
```
Have Gitea admin reset password for user `argocd` and log in with that account.
Add the SSH key for user `argocd` from file `id_ecdsa.pub`.
Delete any other SSH keys associated with Gitea user `argocd`.
# Managing applications
To update apps:
``` ```
for j in asterisk bind camtiler etherpad freescout gitea grafana hackerspace nextcloud nyancat rosdump traefik wiki wildduck woodpecker; do for j in asterisk bind camtiler etherpad freescout gitea grafana hackerspace nextcloud nyancat rosdump traefik wiki wildduck woodpecker; do
@ -70,6 +13,10 @@ kind: Application
metadata: metadata:
name: $j name: $j
namespace: argocd namespace: argocd
annotations:
# Works with only Kustomize and Helm. Kustomize is easy, see https://github.com/argoproj-labs/argocd-image-updater/tree/master/manifests/base for an example.
argocd-image-updater.argoproj.io/image-list: TODO:^2 # semver 2.*.*
argocd-image-updater.argoproj.io/write-back-method: git
spec: spec:
project: k-space.ee project: k-space.ee
source: source:
@ -88,3 +35,22 @@ EOF
done done
find applications -name "*.yaml" -exec kubectl apply -n argocd -f {} \; find applications -name "*.yaml" -exec kubectl apply -n argocd -f {} \;
``` ```
### Repository secrets
1. Generate keys locally with `ssh-keygen -f argo`
2. Add `argo.pub` in `git.k-space.ee/<your>/<repo>` → Settings → Deploy keys
3. Add `argo` (private key) at https://argocd.k-space.ee/settings/repos along with referenced repo.
## Argo Deployment
To deploy ArgoCD itself:
```bash
helm repo add argo-cd https://argoproj.github.io/argo-helm
kubectl create secret -n argocd generic argocd-secret # Empty secret for sessions
helm template -n argocd --release-name k6 argo-cd/argo-cd --include-crds -f values.yaml > argocd.yml
kubectl apply -f argocd.yml -f application-extras.yml -f redis.yaml -f monitoring.yml -n argocd
kubectl label -n argocd secret oidc-client-argocd-owner-secrets app.kubernetes.io/part-of=argocd
kubectl -n argocd rollout restart deployment/k6-argocd-redis deployment/k6-argocd-repo-server deployment/k6-argocd-server deployment/k6-argocd-notifications-controller statefulset/k6-argocd-application-controller
```

View File

@ -9,6 +9,7 @@ spec:
uri: https://argocd.k-space.ee uri: https://argocd.k-space.ee
redirectUris: redirectUris:
- https://argocd.k-space.ee/auth/callback - https://argocd.k-space.ee/auth/callback
- http://localhost:8085/auth/callback
allowedGroups: allowedGroups:
- k-space:kubernetes:admins - k-space:kubernetes:admins
grantTypes: grantTypes:

View File

@ -2,17 +2,17 @@
apiVersion: argoproj.io/v1alpha1 apiVersion: argoproj.io/v1alpha1
kind: Application kind: Application
metadata: metadata:
name: logmower name: argocd-image-updater
namespace: argocd namespace: argocd
spec: spec:
project: k-space.ee project: k-space.ee
source: source:
repoURL: 'git@git.k-space.ee:k-space/kube.git' repoURL: 'https://github.com/argoproj-labs/argocd-image-updater.git'
path: logmower path: manifests/base
targetRevision: HEAD targetRevision: stable
destination: destination:
server: 'https://kubernetes.default.svc' server: 'https://kubernetes.default.svc'
namespace: logmower namespace: argocd
syncPolicy: syncPolicy:
automated: automated:
prune: true prune: true

View File

@ -69,6 +69,10 @@ configs:
p, role:developers, applications, action/apps/Deployment/restart, default/camtiler, allow p, role:developers, applications, action/apps/Deployment/restart, default/camtiler, allow
p, role:developers, applications, sync, default/camtiler, allow p, role:developers, applications, sync, default/camtiler, allow
p, role:developers, applications, update, default/camtiler, allow p, role:developers, applications, update, default/camtiler, allow
# argocd-image-updater
p, role:image-updater, applications, get, */*, allow
p, role:image-updater, applications, update, */*, allow
g, image-updater, role:image-updater
cm: cm:
admin.enabled: "false" admin.enabled: "false"
resource.customizations: | resource.customizations: |

View File

@ -32,14 +32,8 @@ spec:
cidr: 172.20.8.241/32 # Erki A cidr: 172.20.8.241/32 # Erki A
- from: - from:
- ipBlock: - ipBlock:
cidr: 195.222.16.36/32 # Elisa SIP cidr: 212.47.211.10/32 # Elisa SIP
- from:
- ipBlock:
cidr: 195.222.16.38/32 # Elisa SIP
egress: egress:
- to: - to:
- ipBlock: - ipBlock:
cidr: 195.222.16.36/32 # Elisa SIP cidr: 212.47.211.10/32 # Elisa SIP
- to:
- ipBlock:
cidr: 195.222.16.38/32 # Elisa SIP

View File

@ -4,5 +4,5 @@ To deploy:
``` ```
kubectl apply --server-side -f \ kubectl apply --server-side -f \
https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.23/releases/cnpg-1.23.2.yaml https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml
``` ```

382
discourse/application.yaml Normal file
View File

@ -0,0 +1,382 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: discourse
annotations:
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
tls:
- hosts:
- "*.k-space.ee"
secretName:
rules:
- host: "discourse.k-space.ee"
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: discourse
port:
name: http
---
apiVersion: v1
kind: Service
metadata:
name: discourse
spec:
type: ClusterIP
ipFamilyPolicy: SingleStack
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
selector:
app.kubernetes.io/instance: discourse
app.kubernetes.io/name: discourse
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: discourse
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: discourse
annotations:
reloader.stakater.com/auto: "true"
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: discourse
app.kubernetes.io/name: discourse
strategy:
type: Recreate
template:
metadata:
labels:
app.kubernetes.io/instance: discourse
app.kubernetes.io/name: discourse
spec:
serviceAccountName: discourse
securityContext:
fsGroup: 0
fsGroupChangePolicy: Always
initContainers:
containers:
- name: discourse
image: docker.io/bitnami/discourse:3.3.2-debian-12-r0
imagePullPolicy: "IfNotPresent"
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- CHOWN
- SYS_CHROOT
- FOWNER
- SETGID
- SETUID
- DAC_OVERRIDE
drop:
- ALL
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
seLinuxOptions: {}
seccompProfile:
type: RuntimeDefault
env:
- name: BITNAMI_DEBUG
value: "true"
- name: DISCOURSE_USERNAME
valueFrom:
secretKeyRef:
name: discourse-password
key: username
- name: DISCOURSE_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-password
key: password
- name: DISCOURSE_PORT_NUMBER
value: "8080"
- name: DISCOURSE_EXTERNAL_HTTP_PORT_NUMBER
value: "80"
- name: DISCOURSE_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-postgresql
key: password
- name: POSTGRESQL_CLIENT_CREATE_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-postgres-superuser
key: password
- name: POSTGRESQL_CLIENT_POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-postgres-superuser
key: password
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-redis
key: redis-password
envFrom:
- configMapRef:
name: discourse
- secretRef:
name: discourse-email
ports:
- name: http
containerPort: 8080
protocol: TCP
livenessProbe:
tcpSocket:
port: http
initialDelaySeconds: 500
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
readinessProbe:
httpGet:
path: /srv/status
port: http
initialDelaySeconds: 100
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
resources:
limits:
cpu: "6.0"
ephemeral-storage: 2Gi
memory: 12288Mi
requests:
cpu: "1.0"
ephemeral-storage: 50Mi
memory: 3072Mi
volumeMounts:
- name: discourse-data
mountPath: /bitnami/discourse
subPath: discourse
- name: sidekiq
image: docker.io/bitnami/discourse:3.3.2-debian-12-r0
imagePullPolicy: "IfNotPresent"
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- CHOWN
- SYS_CHROOT
- FOWNER
- SETGID
- SETUID
- DAC_OVERRIDE
drop:
- ALL
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
seLinuxOptions: {}
seccompProfile:
type: RuntimeDefault
command:
- /opt/bitnami/scripts/discourse/entrypoint.sh
args:
- /opt/bitnami/scripts/discourse-sidekiq/run.sh
env:
- name: BITNAMI_DEBUG
value: "true"
- name: DISCOURSE_USERNAME
valueFrom:
secretKeyRef:
name: discourse-password
key: username
- name: DISCOURSE_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-password
key: password
- name: DISCOURSE_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-postgresql
key: password
- name: DISCOURSE_POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-postgres-superuser
key: password
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-redis
key: redis-password
envFrom:
- configMapRef:
name: discourse
- secretRef:
name: discourse-email
livenessProbe:
exec:
command: ["/bin/sh", "-c", "pgrep -f ^sidekiq"]
initialDelaySeconds: 500
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command: ["/bin/sh", "-c", "pgrep -f ^sidekiq"]
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
resources:
limits:
cpu: 750m
ephemeral-storage: 2Gi
memory: 768Mi
requests:
cpu: 500m
ephemeral-storage: 50Mi
memory: 512Mi
volumeMounts:
- name: discourse-data
mountPath: /bitnami/discourse
subPath: discourse
volumes:
- name: discourse-data
persistentVolumeClaim:
claimName: discourse-data
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: discourse-data
namespace: discourse
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "3Gi"
storageClassName: "proxmox-nas"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: discourse
namespace: discourse
data:
DISCOURSE_HOST: "discourse.k-space.ee"
DISCOURSE_SKIP_INSTALL: "yes"
DISCOURSE_PRECOMPILE_ASSETS: "no"
DISCOURSE_SITE_NAME: "K-Space Discourse"
DISCOURSE_USERNAME: "k-space"
DISCOURSE_EMAIL: "dos4dev@k-space.ee"
DISCOURSE_REDIS_HOST: "discourse-redis"
DISCOURSE_REDIS_PORT_NUMBER: "6379"
DISCOURSE_DATABASE_HOST: "discourse-postgres-rw"
DISCOURSE_DATABASE_PORT_NUMBER: "5432"
DISCOURSE_DATABASE_NAME: "discourse"
DISCOURSE_DATABASE_USER: "discourse"
POSTGRESQL_CLIENT_DATABASE_HOST: "discourse-postgres-rw"
POSTGRESQL_CLIENT_DATABASE_PORT_NUMBER: "5432"
POSTGRESQL_CLIENT_POSTGRES_USER: "postgres"
POSTGRESQL_CLIENT_CREATE_DATABASE_NAME: "discourse"
POSTGRESQL_CLIENT_CREATE_DATABASE_EXTENSIONS: "hstore,pg_trgm"
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCClient
metadata:
name: discourse
namespace: discourse
spec:
displayName: Discourse
uri: https://discourse.k-space.ee
redirectUris:
- https://discourse.k-space.ee/auth/oidc/callback
allowedGroups:
- k-space:floor
- k-space:friends
grantTypes:
- authorization_code
- refresh_token
responseTypes:
- code
availableScopes:
- openid
- profile
pkce: false
---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
metadata:
name: discourse-redis
namespace: discourse
spec:
size: 32
mapping:
- key: redis-password
value: "%(plaintext)s"
- key: REDIS_URI
value: "redis://:%(plaintext)s@discourse-redis"
---
apiVersion: dragonflydb.io/v1alpha1
kind: Dragonfly
metadata:
name: discourse-redis
namespace: discourse
spec:
authentication:
passwordFromSecret:
key: redis-password
name: discourse-redis
replicas: 3
resources:
limits:
cpu: 1000m
memory: 1Gi
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app: discourse-redis
app.kubernetes.io/part-of: dragonfly
---
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: discourse-postgres
namespace: discourse
spec:
instances: 1
enableSuperuserAccess: true
bootstrap:
initdb:
database: discourse
owner: discourse
secret:
name: discourse-postgresql
dataChecksums: true
encoding: 'UTF8'
storage:
size: 10Gi
storageClass: postgres

View File

@ -1,9 +1,8 @@
--- ---
apiVersion: codemowers.io/v1alpha1 apiVersion: codemowers.cloud/v1beta1
kind: OIDCGWMiddlewareClient kind: OIDCMiddlewareClient
metadata: metadata:
name: sso name: etherpad
namespace: etherpad
spec: spec:
displayName: Etherpad displayName: Etherpad
uri: 'https://pad.k-space.ee/' uri: 'https://pad.k-space.ee/'

1
freeswitch/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
PASSWORDS.xml

View File

@ -0,0 +1,14 @@
<include>
<X-PRE-PROCESS cmd="set" data="default_password=">
<X-PRE-PROCESS cmd="set" data="ipcall_password="/>
<X-PRE-PROCESS cmd="set" data="1000_password="/>
<X-PRE-PROCESS cmd="set" data="1001_password="/>
<X-PRE-PROCESS cmd="set" data="1002_password="/>
<X-PRE-PROCESS cmd="set" data="1003_password="/>
<X-PRE-PROCESS cmd="set" data="1004_password="/>
<X-PRE-PROCESS cmd="set" data="1005_password="/>
<X-PRE-PROCESS cmd="set" data="1006_password="/>
<X-PRE-PROCESS cmd="set" data="1007_password="/>
<X-PRE-PROCESS cmd="set" data="1008_password="/>
<X-PRE-PROCESS cmd="set" data="1009_password="/>
</include>

3
freeswitch/README.md Normal file
View File

@ -0,0 +1,3 @@
```
kubectl -n freeswitch create secret generic freeswitch-passwords --from-file freeswitch/PASSWORDS.xml
```

567
freeswitch/application.yaml Normal file
View File

@ -0,0 +1,567 @@
apiVersion: v1
kind: Service
metadata:
name: freeswitch
namespace: freeswitch
annotations:
external-dns.alpha.kubernetes.io/hostname: freeswitch.k-space.ee
metallb.universe.tf/address-pool: eenet
metallb.universe.tf/ip-allocated-from-pool: eenet
spec:
ports:
- name: sip-internal-udp
protocol: UDP
port: 5060
targetPort: 5060
nodePort: 31787
- name: sip-nat-udp
protocol: UDP
port: 5070
targetPort: 5070
nodePort: 32241
- name: sip-external-udp
protocol: UDP
port: 5080
targetPort: 5080
nodePort: 31354
- name: sip-data-10000
protocol: UDP
port: 10000
targetPort: 10000
nodePort: 30786
- name: sip-data-10001
protocol: UDP
port: 10001
targetPort: 10001
nodePort: 31788
- name: sip-data-10002
protocol: UDP
port: 10002
targetPort: 10002
nodePort: 30247
- name: sip-data-10003
protocol: UDP
port: 10003
targetPort: 10003
nodePort: 32389
- name: sip-data-10004
protocol: UDP
port: 10004
targetPort: 10004
nodePort: 30723
- name: sip-data-10005
protocol: UDP
port: 10005
targetPort: 10005
nodePort: 30295
- name: sip-data-10006
protocol: UDP
port: 10006
targetPort: 10006
nodePort: 30782
- name: sip-data-10007
protocol: UDP
port: 10007
targetPort: 10007
nodePort: 32165
- name: sip-data-10008
protocol: UDP
port: 10008
targetPort: 10008
nodePort: 30282
- name: sip-data-10009
protocol: UDP
port: 10009
targetPort: 10009
nodePort: 31325
- name: sip-data-10010
protocol: UDP
port: 10010
targetPort: 10010
nodePort: 31234
selector:
app: freeswitch
type: LoadBalancer
externalTrafficPolicy: Local
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
internalTrafficPolicy: Cluster
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: freeswitch-sounds
namespace: freeswitch
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi
storageClassName: longhorn
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: freeswitch
namespace: freeswitch
labels:
app: freeswitch
annotations:
reloader.stakater.com/auto: "true"
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: freeswitch
template:
metadata:
labels:
app: freeswitch
spec:
volumes:
- name: config
configMap:
name: freeswitch-config
defaultMode: 420
- name: directory
configMap:
name: freeswitch-directory
defaultMode: 420
- name: sounds
persistentVolumeClaim:
claimName: freeswitch-sounds
- name: passwords
secret:
secretName: freeswitch-passwords
containers:
- name: freeswitch
image: dheaps/freeswitch
env:
- name: SOUND_TYPES
value: en-us-callie
- name: SOUND_RATES
value: "32000"
resources: {}
volumeMounts:
- name: config
mountPath: /etc/freeswitch/sip_profiles/external/ipcall.xml
subPath: ipcall.xml
- name: config
mountPath: /etc/freeswitch/dialplan/default/00_outbound_ipcall.xml
subPath: 00_outbound_ipcall.xml
- name: config
mountPath: /etc/freeswitch/dialplan/public.xml
subPath: dialplan.xml
- name: config
mountPath: /etc/freeswitch/autoload_configs/switch.conf.xml
subPath: switch.xml
- name: config
mountPath: /etc/freeswitch/vars.xml
subPath: vars.xml
- name: passwords
mountPath: /etc/freeswitch/PASSWORDS.xml
subPath: PASSWORDS.xml
- name: directory
mountPath: /etc/freeswitch/directory/default
- name: sounds
mountPath: /usr/share/freeswitch/sounds
---
apiVersion: v1
kind: ConfigMap
metadata:
name: freeswitch-config
namespace: freeswitch
data:
dialplan.xml: |
<!--
NOTICE:
This context is usually accessed via the external sip profile listening on port 5080.
It is recommended to have separate inbound and outbound contexts. Not only for security
but clearing up why you would need to do such a thing. You don't want outside un-authenticated
callers hitting your default context which allows dialing calls thru your providers and results
in Toll Fraud.
-->
<!-- http://wiki.freeswitch.org/wiki/Dialplan_XML -->
<include>
<context name="public">
<extension name="unloop">
<condition field="${unroll_loops}" expression="^true$"/>
<condition field="${sip_looped_call}" expression="^true$">
<action application="deflect" data="${destination_number}"/>
</condition>
</extension>
<!--
Tag anything pass thru here as an outside_call so you can make sure not
to create any routing loops based on the conditions that it came from
the outside of the switch.
-->
<extension name="outside_call" continue="true">
<condition>
<action application="set" data="outside_call=true"/>
<action application="export" data="RFC2822_DATE=${strftime(%a, %d %b %Y %T %z)}"/>
</condition>
</extension>
<extension name="call_debug" continue="true">
<condition field="${call_debug}" expression="^true$" break="never">
<action application="info"/>
</condition>
</extension>
<extension name="public_extensions">
<condition field="destination_number" expression="^(10[01][0-9])$">
<action application="transfer" data="$1 XML default"/>
</condition>
</extension>
<extension name="public_conference_extensions">
<condition field="destination_number" expression="^(3[5-8][01][0-9])$">
<action application="transfer" data="$1 XML default"/>
</condition>
</extension>
<!--
You can place files in the public directory to get included.
-->
<X-PRE-PROCESS cmd="include" data="public/*.xml"/>
<!--
If you have made it this far lets challenge the caller and if they authenticate
lets try what they dialed in the default context. (commented out by default)
-->
<!-- TODO:
<extension name="check_auth" continue="true">
<condition field="${sip_authorized}" expression="^true$" break="never">
<anti-action application="respond" data="407"/>
</condition>
</extension>
-->
<extension name="transfer_to_default">
<condition>
<!-- TODO: proper ring grouping -->
<action application="bridge" data="user/1004@freeswitch.k-space.ee,user/1003@freeswitch.k-space.ee,sofia/gateway/ipcall/53543824"/>
</condition>
</extension>
</context>
</include>
ipcall.xml: |
<include>
<gateway name="ipcall">
<param name="proxy" value="sip.ipcall.ee"/>
<param name="register" value="true"/>
<param name="realm" value="sip.ipcall.ee"/>
<param name="username" value="6659652"/>
<param name="password" value="$${ipcall_password}"/>
<param name="from-user" value="6659652"/>
<param name="from-domain" value="sip.ipcall.ee"/>
<param name="extension" value="ring_group/default"/>
</gateway>
</include>
00_outbound_ipcall.xml: |
<extension name="outbound">
<!-- TODO: check toll_allow ? -->
<condition field="destination_number" expression="^(\d+)$">
<action application="set" data="sip_invite_domain=sip.ipcall.ee"/>
<action application="bridge" data="sofia/gateway/ipcall/${destination_number}"/>
</condition>
</extension>
switch.xml: |
<configuration name="switch.conf" description="Core Configuration">
<cli-keybindings>
<key name="1" value="help"/>
<key name="2" value="status"/>
<key name="3" value="show channels"/>
<key name="4" value="show calls"/>
<key name="5" value="sofia status"/>
<key name="6" value="reloadxml"/>
<key name="7" value="console loglevel 0"/>
<key name="8" value="console loglevel 7"/>
<key name="9" value="sofia status profile internal"/>
<key name="10" value="sofia profile internal siptrace on"/>
<key name="11" value="sofia profile internal siptrace off"/>
<key name="12" value="version"/>
</cli-keybindings>
<default-ptimes>
</default-ptimes>
<settings>
<param name="colorize-console" value="true"/>
<param name="dialplan-timestamps" value="false"/>
<param name="max-db-handles" value="50"/>
<param name="db-handle-timeout" value="10"/>
<param name="max-sessions" value="1000"/>
<param name="sessions-per-second" value="30"/>
<param name="loglevel" value="debug"/>
<param name="mailer-app" value="sendmail"/>
<param name="mailer-app-args" value="-t"/>
<param name="dump-cores" value="yes"/>
<param name="rtp-start-port" value="10000"/>
<param name="rtp-end-port" value="10010"/>
</settings>
</configuration>
vars.xml: |
<include>
<X-PRE-PROCESS cmd="set" data="disable_system_api_commands=true"/>
<X-PRE-PROCESS cmd="set" data="sound_prefix=$${sounds_dir}/en/us/callie"/>
<X-PRE-PROCESS cmd="set" data="domain=freeswitch.k-space.ee"/>
<X-PRE-PROCESS cmd="set" data="domain_name=$${domain}"/>
<X-PRE-PROCESS cmd="set" data="hold_music=local_stream://moh"/>
<X-PRE-PROCESS cmd="set" data="use_profile=external"/>
<X-PRE-PROCESS cmd="set" data="rtp_sdes_suites=AEAD_AES_256_GCM_8|AEAD_AES_128_GCM_8|AES_CM_256_HMAC_SHA1_80|AES_CM_192_HMAC_SHA1_80|AES_CM_128_HMAC_SHA1_80|AES_CM_256_HMAC_SHA1_32|AES_CM_192_HMAC_SHA1_32|AES_CM_128_HMAC_SHA1_32|AES_CM_128_NULL_AUTH"/>
<X-PRE-PROCESS cmd="set" data="global_codec_prefs=OPUS,G722,PCMU,PCMA,H264,VP8"/>
<X-PRE-PROCESS cmd="set" data="outbound_codec_prefs=OPUS,G722,PCMU,PCMA,H264,VP8"/>
<X-PRE-PROCESS cmd="set" data="xmpp_client_profile=xmppc"/>
<X-PRE-PROCESS cmd="set" data="xmpp_server_profile=xmpps"/>
<X-PRE-PROCESS cmd="set" data="bind_server_ip=auto"/>
<X-PRE-PROCESS cmd="stun-set" data="external_rtp_ip=host:freeswitch.k-space.ee"/>
<X-PRE-PROCESS cmd="stun-set" data="external_sip_ip=host:freeswitch.k-space.ee"/>
<X-PRE-PROCESS cmd="set" data="unroll_loops=true"/>
<X-PRE-PROCESS cmd="set" data="outbound_caller_name=FreeSWITCH"/>
<X-PRE-PROCESS cmd="set" data="outbound_caller_id=0000000000"/>
<X-PRE-PROCESS cmd="set" data="call_debug=false"/>
<X-PRE-PROCESS cmd="set" data="console_loglevel=info"/>
<X-PRE-PROCESS cmd="set" data="default_areacode=372"/>
<X-PRE-PROCESS cmd="set" data="default_country=EE"/>
<X-PRE-PROCESS cmd="set" data="presence_privacy=false"/>
<X-PRE-PROCESS cmd="set" data="au-ring=%(400,200,383,417);%(400,2000,383,417)"/>
<X-PRE-PROCESS cmd="set" data="be-ring=%(1000,3000,425)"/>
<X-PRE-PROCESS cmd="set" data="ca-ring=%(2000,4000,440,480)"/>
<X-PRE-PROCESS cmd="set" data="cn-ring=%(1000,4000,450)"/>
<X-PRE-PROCESS cmd="set" data="cy-ring=%(1500,3000,425)"/>
<X-PRE-PROCESS cmd="set" data="cz-ring=%(1000,4000,425)"/>
<X-PRE-PROCESS cmd="set" data="de-ring=%(1000,4000,425)"/>
<X-PRE-PROCESS cmd="set" data="dk-ring=%(1000,4000,425)"/>
<X-PRE-PROCESS cmd="set" data="dz-ring=%(1500,3500,425)"/>
<X-PRE-PROCESS cmd="set" data="eg-ring=%(2000,1000,475,375)"/>
<X-PRE-PROCESS cmd="set" data="es-ring=%(1500,3000,425)"/>
<X-PRE-PROCESS cmd="set" data="fi-ring=%(1000,4000,425)"/>
<X-PRE-PROCESS cmd="set" data="fr-ring=%(1500,3500,440)"/>
<X-PRE-PROCESS cmd="set" data="hk-ring=%(400,200,440,480);%(400,3000,440,480)"/>
<X-PRE-PROCESS cmd="set" data="hu-ring=%(1250,3750,425)"/>
<X-PRE-PROCESS cmd="set" data="il-ring=%(1000,3000,400)"/>
<X-PRE-PROCESS cmd="set" data="in-ring=%(400,200,425,375);%(400,2000,425,375)"/>
<X-PRE-PROCESS cmd="set" data="jp-ring=%(1000,2000,420,380)"/>
<X-PRE-PROCESS cmd="set" data="ko-ring=%(1000,2000,440,480)"/>
<X-PRE-PROCESS cmd="set" data="pk-ring=%(1000,2000,400)"/>
<X-PRE-PROCESS cmd="set" data="pl-ring=%(1000,4000,425)"/>
<X-PRE-PROCESS cmd="set" data="ro-ring=%(1850,4150,475,425)"/>
<X-PRE-PROCESS cmd="set" data="rs-ring=%(1000,4000,425)"/>
<X-PRE-PROCESS cmd="set" data="ru-ring=%(800,3200,425)"/>
<X-PRE-PROCESS cmd="set" data="sa-ring=%(1200,4600,425)"/>
<X-PRE-PROCESS cmd="set" data="tr-ring=%(2000,4000,450)"/>
<X-PRE-PROCESS cmd="set" data="uk-ring=%(400,200,400,450);%(400,2000,400,450)"/>
<X-PRE-PROCESS cmd="set" data="us-ring=%(2000,4000,440,480)"/>
<X-PRE-PROCESS cmd="set" data="bong-ring=v=-7;%(100,0,941.0,1477.0);v=-7;>=2;+=.1;%(1400,0,350,440)"/>
<X-PRE-PROCESS cmd="set" data="beep=%(1000,0,640)"/>
<X-PRE-PROCESS cmd="set" data="sit=%(274,0,913.8);%(274,0,1370.6);%(380,0,1776.7)"/>
<X-PRE-PROCESS cmd="set" data="df_us_ssn=(?!219099999|078051120)(?!666|000|9\d{2})\d{3}(?!00)\d{2}(?!0{4})\d{4}"/>
<X-PRE-PROCESS cmd="set" data="df_luhn=?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|6(?:011|5[0-9]{2})[0-9]{12}|(?:2131|1800|35\d{3})\d{11}"/>
<XX-PRE-PROCESS cmd="set" data="digits_dialed_filter=(($${df_luhn})|($${df_us_ssn}))"/>
<X-PRE-PROCESS cmd="set" data="default_provider=sip.ipcall.ee"/>
<X-PRE-PROCESS cmd="set" data="default_provider_username="/>
<X-PRE-PROCESS cmd="set" data="default_provider_password="/>
<X-PRE-PROCESS cmd="set" data="default_provider_from_domain=sip.ipcall.ee"/>
<X-PRE-PROCESS cmd="set" data="default_provider_register=true"/>
<X-PRE-PROCESS cmd="set" data="default_provider_contact=1004"/>
<X-PRE-PROCESS cmd="set" data="sip_tls_version=tlsv1,tlsv1.1,tlsv1.2"/>
<X-PRE-PROCESS cmd="set" data="sip_tls_ciphers=ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH"/>
<X-PRE-PROCESS cmd="set" data="internal_auth_calls=true"/>
<X-PRE-PROCESS cmd="set" data="internal_sip_port=5060"/>
<X-PRE-PROCESS cmd="set" data="internal_tls_port=5061"/>
<X-PRE-PROCESS cmd="set" data="internal_ssl_enable=false"/>
<X-PRE-PROCESS cmd="set" data="external_auth_calls=false"/>
<X-PRE-PROCESS cmd="set" data="external_sip_port=5080"/>
<X-PRE-PROCESS cmd="set" data="external_tls_port=5081"/>
<X-PRE-PROCESS cmd="set" data="external_ssl_enable=false"/>
<X-PRE-PROCESS cmd="set" data="rtp_video_max_bandwidth_in=3mb"/>
<X-PRE-PROCESS cmd="set" data="rtp_video_max_bandwidth_out=3mb"/>
<X-PRE-PROCESS cmd="set" data="suppress_cng=true"/>
<X-PRE-PROCESS cmd="set" data="rtp_liberal_dtmf=true"/>
<X-PRE-PROCESS cmd="set" data="video_mute_png=$${images_dir}/default-mute.png"/>
<X-PRE-PROCESS cmd="set" data="video_no_avatar_png=$${images_dir}/default-avatar.png"/>
<X-PRE-PROCESS cmd="include" data="PASSWORDS.xml"/>
</include>
---
apiVersion: v1
kind: ConfigMap
metadata:
name: freeswitch-directory
namespace: freeswitch
data:
1000.xml: |
<include>
<user id="1000">
<params>
<param name="password" value="$${1000_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1000"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1000"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1001.xml: |
<include>
<user id="1001">
<params>
<param name="password" value="$${1001_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1001"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1001"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1002.xml: |
<include>
<user id="1002">
<params>
<param name="password" value="$${1002_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1002"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1002"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1003.xml: |
<include>
<user id="1003">
<params>
<param name="password" value="$${1003_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1003"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value="Erki A"/>
<variable name="effective_caller_id_number" value="1003"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1004.xml: |
<include>
<user id="1004">
<params>
<param name="password" value="$${1004_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1004"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value="Erki A"/>
<variable name="effective_caller_id_number" value="1004"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1005.xml: |
<include>
<user id="1005">
<params>
<param name="password" value="$${1005_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1005"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1005"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1006.xml: |
<include>
<user id="1006">
<params>
<param name="password" value="$${1006_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1006"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1006"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1007.xml: |
<include>
<user id="1007">
<params>
<param name="password" value="$${1007_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1007"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1007"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1008.xml: |
<include>
<user id="1008">
<params>
<param name="password" value="$${1008_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1008"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1008"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1009.xml: |
<include>
<user id="1009">
<params>
<param name="password" value="$${1009_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1009"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1009"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>

View File

@ -0,0 +1,49 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: freeswitch
spec:
podSelector:
matchLabels:
app: freeswitch
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: monitoring
podSelector:
matchLabels:
app.kubernetes.io/name: prometheus
- from:
- ipBlock:
cidr: 100.101.0.0/16
- from:
- ipBlock:
cidr: 100.102.0.0/16
- from:
- ipBlock:
cidr: 81.90.125.224/32 # Lauri home
- from:
- ipBlock:
cidr: 172.20.8.241/32 # Erki A
- from:
- ipBlock:
cidr: 212.47.211.10/32 # Elisa SIP
- from:
- ipBlock:
cidr: 212.47.211.10/32 # Elisa SIP
egress:
- to:
- ipBlock:
cidr: 212.47.211.10/32 # Elisa SIP
- to:
- ipBlock:
cidr: 195.222.16.38/32 # Elisa SIP
- to:
ports:
- port: 53
protocol: UDP

4
frigate/README.md Normal file
View File

@ -0,0 +1,4 @@
```
helm repo add blakeblackshear https://blakeblackshear.github.io/blakeshome-charts/
helm template -n frigate --release-name frigate blakeblackshear/frigate --include-crds -f values.yaml > application.yml
```

279
frigate/application.yml Normal file
View File

@ -0,0 +1,279 @@
---
# Source: frigate/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: frigate
namespace: frigate
labels:
app.kubernetes.io/name: frigate
helm.sh/chart: frigate-7.6.0
app.kubernetes.io/instance: frigate
app.kubernetes.io/managed-by: Helm
data:
config.yml: |
mqtt:
host: frigate-mqtt
port: 1883
topic_prefix: frigate
client_id: frigate
user: '{FRIGATE_MQTT_USERNAME}'
password: '{FRIGATE_MQTT_PASSWORD}'
stats_interval: 60
detectors:
# coral:
# type: edgetpu
# device: usb
#cpu1:
#type: cpu
ov:
type: openvino
device: CPU
model:
width: 300
height: 300
input_tensor: nhwc
input_pixel_format: bgr
path: /openvino-model/ssdlite_mobilenet_v2.xml
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
record:
enabled: True
retain:
days: 3
mode: motion
events:
retain:
default: 30
mode: motion
cameras:
server_room:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/server_room
roles:
- detect
- rtmp
- record
chaos:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/chaos
roles:
- detect
- rtmp
- record
cyber:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/cyber
roles:
- detect
- rtmp
- record
workshop:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/workshop
roles:
- detect
- rtmp
- record
---
# Source: frigate/templates/config-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: frigate-config
labels:
app.kubernetes.io/name: frigate
helm.sh/chart: frigate-7.6.0
app.kubernetes.io/instance: frigate
app.kubernetes.io/managed-by: Helm
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "100Mi"
storageClassName: "frigate-config"
---
# Source: frigate/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: frigate
labels:
app.kubernetes.io/name: frigate
helm.sh/chart: frigate-7.6.0
app.kubernetes.io/instance: frigate
app.kubernetes.io/version: "0.14.0"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ipFamilyPolicy: SingleStack
ports:
- name: http
port: 5000
protocol: TCP
targetPort: http
- name: http-auth
port: 8971
protocol: TCP
targetPort: http-auth
- name: rtmp
port: 1935
protocol: TCP
targetPort: rtmp
- name: rtsp
port: 8554
protocol: TCP
targetPort: rtsp
selector:
app.kubernetes.io/name: frigate
app.kubernetes.io/instance: frigate
---
# Source: frigate/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: frigate
labels:
app.kubernetes.io/name: frigate
helm.sh/chart: frigate-7.6.0
app.kubernetes.io/instance: frigate
app.kubernetes.io/version: "0.14.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
revisionHistoryLimit: 3
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: frigate
app.kubernetes.io/instance: frigate
template:
metadata:
labels:
app.kubernetes.io/name: frigate
app.kubernetes.io/instance: frigate
annotations:
checksum/configmap: 9de9e29d499af45a0e7392032d64d26d8e13e12211971a307f201c97ac91f173
spec:
containers:
- name: frigate
image: "ghcr.io/blakeblackshear/frigate:0.14.0"
imagePullPolicy: IfNotPresent
securityContext:
{}
ports:
- name: http
containerPort: 5000
protocol: TCP
- name: http-auth
containerPort: 8971
protocol: TCP
- name: rtmp
containerPort: 1935
protocol: TCP
- name: rtsp
containerPort: 8554
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
scheme: HTTP
initialDelaySeconds: 30
failureThreshold: 5
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /
port: http
scheme: HTTP
initialDelaySeconds: 30
failureThreshold: 5
timeoutSeconds: 10
env:
envFrom:
- secretRef:
name: frigate-rstp-credentials
- secretRef:
name: frigate-mqtt-credentials
volumeMounts:
- mountPath: /config/config.yml
subPath: config.yml
name: configmap
- mountPath: /config
name: config
- mountPath: /data
name: data
- mountPath: /media
name: media
- name: dshm
mountPath: /dev/shm
- name: tmp
mountPath: /tmp
resources:
{}
volumes:
- name: configmap
configMap:
name: frigate
- name: config
persistentVolumeClaim:
claimName: frigate-config
- name: data
emptyDir: {}
- name: media
persistentVolumeClaim:
claimName: frigate-storage
- name: dshm
emptyDir:
medium: Memory
sizeLimit: 4Gi
- name: tmp
emptyDir:
medium: Memory
sizeLimit: 4Gi
---
# Source: frigate/templates/ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: frigate
labels:
app.kubernetes.io/name: frigate
helm.sh/chart: frigate-7.6.0
app.kubernetes.io/instance: frigate
app.kubernetes.io/version: "0.14.0"
app.kubernetes.io/managed-by: Helm
annotations:
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: frigate-frigate@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
tls:
- hosts:
- "*.k-space.ee"
secretName:
rules:
- host: "frigate.k-space.ee"
http:
paths:
- path: /
pathType: "ImplementationSpecific"
backend:
service:
name: frigate
port:
name: http

10
frigate/auth.yml Normal file
View File

@ -0,0 +1,10 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: frigate
spec:
displayName: Frigate
uri: 'https://frigate.k-space.ee/'
allowedGroups:
- k-space:legalmember

12
frigate/rabbitmq.yml Normal file
View File

@ -0,0 +1,12 @@
apiVersion: rabbitmq.com/v1beta1
kind: RabbitmqCluster
metadata:
name: frigate-mqtt
spec:
replicas: 3
persistence:
storageClassName: rabbitmq
storage: 10Gi
rabbitmq:
additionalPlugins:
- rabbitmq_mqtt

28
frigate/storage-class.yml Normal file
View File

@ -0,0 +1,28 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: frigate-config
provisioner: csi.proxmox.sinextra.dev
parameters:
cache: none
csi.storage.k8s.io/fstype: xfs
ssd: 'true'
storage: ks-pvs
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: frigate-data
provisioner: csi.proxmox.sinextra.dev
parameters:
cache: none
csi.storage.k8s.io/fstype: xfs
shared: 'true'
ssd: 'false'
storage: ks-pvs-nas
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer

32
frigate/storage.yml Normal file
View File

@ -0,0 +1,32 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: frigate-storage
spec:
persistentVolumeReclaimPolicy: Retain
capacity:
storage: 1Ti
accessModes:
- ReadWriteMany
storageClassName: ""
nfs:
server: 172.21.0.7
path: /nas/k6/frigate
mountOptions:
- vers=4
- minorversion=1
- noac
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: frigate-storage
spec:
volumeName: frigate-storage
storageClassName: ""
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Ti

80
frigate/transcode.yml Normal file
View File

@ -0,0 +1,80 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: go2rtc
labels:
app.kubernetes.io/name: go2rtc
app.kubernetes.io/instance: go2rtc
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: go2rtc
app.kubernetes.io/instance: go2rtc
template:
metadata:
labels:
app.kubernetes.io/name: go2rtc
app.kubernetes.io/instance: go2rtc
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- frigate
topologyKey: "kubernetes.io/hostname"
# nodeSelector:
# dedicated: nvr
# tolerations:
# - key: dedicated
# operator: Equal
# value: nvr
# effect: NoSchedule
containers:
- name: go2rtc
image: alexxit/go2rtc
ports:
- name: rtsp
containerPort: 8554
protocol: TCP
- name: api
containerPort: 1984
protocol: TCP
volumeMounts:
- mountPath: /config/go2rtc.yaml
subPath: config.yml
name: config
resources:
{}
volumes:
- name: config
secret:
secretName: go2rtc-config
items:
- key: config.yml
path: config.yml
---
apiVersion: v1
kind: Service
metadata:
name: go2rtc
labels:
app.kubernetes.io/name: go2rtc
app.kubernetes.io/instance: go2rtc
spec:
type: ClusterIP
ipFamilyPolicy: SingleStack
ports:
- name: rtsp
port: 8554
protocol: TCP
targetPort: rtsp
selector:
app.kubernetes.io/name: go2rtc
app.kubernetes.io/instance: go2rtc

178
frigate/values.yaml Normal file
View File

@ -0,0 +1,178 @@
# Default values for frigate.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- upgrade strategy type (e.g. Recreate or RollingUpdate)
strategyType: Recreate
image:
# -- Docker registry/repository to pull the image from
repository: ghcr.io/blakeblackshear/frigate
# -- Overrides the default tag (appVersion) used in Chart.yaml ([Docker Hub](https://hub.docker.com/r/blakeblackshear/frigate/tags?page=1))
tag:
# -- Docker image pull policy
pullPolicy: IfNotPresent
# -- Docker image pull policy
imagePullSecrets: []
# -- additional ENV variables to set. Prefix with FRIGATE_ to target Frigate configuration values
env: {}
# TZ: UTC
# -- set environment variables from Secret(s)
envFromSecrets:
# secrets are required before `helm install`
- frigate-rstp-credentials
- frigate-mqtt-credentials
coral:
# -- enables the use of a Coral device
enabled: false
# -- path on the host to which to mount the Coral device
hostPath: /dev/bus/usb
gpu:
nvidia:
# -- Enables NVIDIA GPU compatibility. Must also use the "amd64nvidia" tagged image
enabled: false
# -- Overrides the default runtimeClassName
runtimeClassName:
# -- amount of shared memory to use for caching
shmSize: 4Gi
# -- use memory for tmpfs (mounted to /tmp)
tmpfs:
enabled: true
sizeLimit: 4Gi
# -- frigate configuration - see [Docs](https://docs.frigate.video/configuration/index) for more info
config: |
mqtt:
host: frigate-mqtt
port: 1883
topic_prefix: frigate
client_id: frigate
user: '{FRIGATE_MQTT_USERNAME}'
password: '{FRIGATE_MQTT_PASSWORD}'
stats_interval: 60
detectors:
# coral:
# type: edgetpu
# device: usb
#cpu1:
#type: cpu
ov:
type: openvino
device: CPU
model:
width: 300
height: 300
input_tensor: nhwc
input_pixel_format: bgr
path: /openvino-model/ssdlite_mobilenet_v2.xml
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
record:
enabled: True
retain:
days: 3
mode: motion
events:
retain:
default: 30
mode: motion
cameras:
server_room:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/server_room
roles:
- detect
- rtmp
- record
chaos:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/chaos
roles:
- detect
- rtmp
- record
cyber:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/cyber
roles:
- detect
- rtmp
- record
workshop:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/workshop
roles:
- detect
- rtmp
- record
# Probes configuration
probes:
liveness:
enabled: true
initialDelaySeconds: 30
failureThreshold: 5
timeoutSeconds: 10
readiness:
enabled: true
initialDelaySeconds: 30
failureThreshold: 5
timeoutSeconds: 10
startup:
enabled: false
failureThreshold: 30
periodSeconds: 10
service:
type: ClusterIP
port: 5000
annotations: {}
labels: {}
loadBalancerIP:
ipFamilyPolicy: SingleStack
ipFamilies: []
ingress:
enabled: true
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.middlewares: frigate-frigate@kubernetescrd
hosts:
- host: frigate.k-space.ee
paths:
- path: '/'
portName: http
tls:
- hosts:
- "*.k-space.ee"
persistence:
config:
enabled: true
storageClass: "frigate-config"
accessMode: ReadWriteOnce
size: 100Mi
skipuninstall: false
media:
enabled: true
existingClaim: "frigate-storage"
skipuninstall: true

View File

@ -53,6 +53,7 @@ spec:
availableScopes: availableScopes:
- openid - openid
- profile - profile
overrideIncomingScopes: true
pkce: false pkce: false
secretRefreshPod: secretRefreshPod:
apiVersion: v1 apiVersion: v1
@ -121,7 +122,7 @@ spec:
runAsNonRoot: true runAsNonRoot: true
containers: containers:
- name: gitea - name: gitea
image: gitea/gitea:1.22.1-rootless image: gitea/gitea:1.22.2-rootless
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
securityContext: securityContext:
readOnlyRootFilesystem: true readOnlyRootFilesystem: true

View File

@ -18,6 +18,7 @@ spec:
availableScopes: availableScopes:
- openid - openid
- profile - profile
- groups
tokenEndpointAuthMethod: none tokenEndpointAuthMethod: none
--- ---
apiVersion: v1 apiVersion: v1
@ -49,14 +50,17 @@ data:
root_url = https://%(domain)s/ root_url = https://%(domain)s/
[auth] [auth]
oauth_allow_insecure_email_lookup=true oauth_allow_insecure_email_lookup=true
[auth.basic]
enabled = false
[auth.generic_oauth] [auth.generic_oauth]
name = OAuth name = OAuth
icon = signin icon = signin
enabled = true enabled = true
empty_scopes = false scopes = openid profile groups
allow_sign_up = true allow_sign_up = true
use_pkce = true use_pkce = true
role_attribute_path = contains(groups[*], 'k-space:kubernetes:admins') && 'Admin' || 'Viewer' role_attribute_path = contains(groups[*], 'k-space:kubernetes:admins') && 'Admin' || contains(groups[*], 'k-space:floor') && 'Editor' || Viewer
allow_assign_grafana_admin = true
[security] [security]
disable_initial_admin_creation = true disable_initial_admin_creation = true
--- ---

View File

@ -34,6 +34,8 @@ spec:
value: https://inventory.k-space.ee/m/inventory/add-slug/%s value: https://inventory.k-space.ee/m/inventory/add-slug/%s
- name: GOREDIRECT_FOUND - name: GOREDIRECT_FOUND
value: https://inventory.k-space.ee/m/inventory/%s/view value: https://inventory.k-space.ee/m/inventory/%s/view
- name: GOREDIRECT_NOPATH
value: https://inventory.k-space.ee/m/inventory
- name: MONGO_URI - name: MONGO_URI
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:

View File

@ -1,4 +1,125 @@
--- ---
# Source: harbor/templates/core/core-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: harbor-core
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.1"
type: Opaque
data:
secretKey: "bm90LWEtc2VjdXJlLWtleQ=="
secret: "ZW92VFRmU3RSNGR4R2FPYg=="
tls.key: "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdm96RWMrWEJ2RVkvbVgxaVlLOXk5MjhoQ2V5VXBiVTRwUWY0VHZQUzE4alFObklqClNUNnJNUS96ejdRR2hvSHpPci90YjNDQnE1ZjY1NjdQd3VscVpjcUxKK1FydzlZZEJyQlhFMnRzWHhwei81UGcKT1pWdG05b2lUVDJJRUw1N0xVYVhlNjRheG5GbkVHTTdmY3JOdlFyZzhGWU1BdGJhV3puMHUyWUNQS0xKWG0wRwpIQlUrWXcxOHM2Q2o2ZG5uRUtITVBSNDF3VTNSU01aTlAxcWovc1dlVDFWMWZGdlV6OFNlOFJLOGZHUE9CQjBsClhHVDd1U0wzZTZUc1N1ZHZMMDV0cmR4QzJhbjNpMTBvM2FtMkh4d1EramRWZUx2cXNrU2daWnBGck9lZUxTc08KdmR4SFJ0azNIRlY4UFlwMVBHZWR3NFlNSDFiU1ZkTDgxZ1JBbFFJREFRQUJBb0lCQVFDb3hjZXZHNTVZSXlScgp6OG1WOTNCdHhrbE9IL1NkUFFENTh6QTB4Zk5HMnYvSWFabmxZeE4vOWpBdUZMYis1aFRJUVh5TkN1cGZVTktUCjZKcnVlWTJhc1NaRjQrdjduaWJCYU9rSE0wdWVoTENmZTV2TGhSVzdTQjYvcVROZUdVakNsbGJQM2lxcS80Q1kKM0R1eU5ERzhNZW1BRmNhM3NSTmdYOW5yTGZQblBZU0s5d3NFbkhSS0N1ZldBeGJBZEw3QTZYemFEM3k2Qy9uUApTSEtzSHprM3hNcXVaeFBRTUIvR3dvSWo1RXRMUmZjcGRBbjYzNE9GdG5KNG1JaVJnM3E4WXNSSThSNFRlbkExCjdmMzBWaHB4djd4cFRhZjJoNGRhKzNnS0RqR0U0aEhyOVVNUWVKYnUzTGJVeno1Q2Rtck1HQUYyczFDdWlKOTQKbUpwRjIxUGhBb0dCQU5ZTkhhRlFSRTFoVHhiMFY3ajc5RStKY1NtajNtMnNKUk9WWWRrMG5DOHJ3QjF0amMycQozTE0rV3hJZlBMcmRuMXU0SzBnUGlLVW1lcnNWQzVaVk80M3IxT2dTVXBUanFteXBJdkNOdHBjQXVTeDA1RkYzClhGVXFxL3JhYWNGcmg3Rzh0cVpWNFRFa3QxQXRvb3ZOQmJTT0Z2UDVicEFwWHRRY2FrM0dXRUo5QW9HQkFPUGsKbUh2LzllVWxRaVQ3eHBhQ3lKMm54OHJEdFpEMCs4WnFDVHlteVd4TnhXWWYwY3hQempsaU93T040c09uY3krdQp6K3ZuSWVUNEV5QW9WTUJ4dG9pdlZtNWwydGM1aDVYKzdzSFpFNDRtcWRvMW8rQTNNcXZDWThKYmxzaGNyMEZtCjYwUit0Q3NZOXhObkcwRDY4WXdXT0lHSjVLVmNOc0pnYVo2UGdQbjVBb0dBZjFOOTlKOFNnaFVJUHRkSjl6ZW8KdEVkRmtVZHc0RHd1TWVBQ1BIb1hEZGd3aDFETHNUT1o1T1dLdHRUdUF4b2gyU1ZNV2FrajExbDBXQ1RFcDhBNwpsTWhiOVZBdzMzQXR5RVUwQzNQQTBVSVU1R1VFQzVHODBzeDNVTmpyWDZyZkpOTGtzejBTbjNEL3VpbnJMczNlClB2RG1wbW9SWXArNVVPTTJPTjFyVXVFQ2dZQXl1d2VLeXY3MUpLY2x6bnAxdE9WdDJ4U3B1WWxvbkhDVHliVUcKc0dZMFd0ZXZsVFBCMUo0bXZndy9EVTNHbWpjVkRVZ3h6b0VJRklWRmFsVDdoaFRKTnFiNTZtaWQ4cXVSSTNBSQo3UWJpWlE3NGlXcFNsbGNNVHppZjRuMmRXcXlVMVpGWDdSQzBhMWxORTIzSXVGUWZDQzlKL3VWTVBrdmVWMUlMCnZ3eEtHUUtCZ0JvcFJCMHhUalJtY0tiN2FGeVFub2tycUNoWE5rd0phNTFaekRUNU10Wm1kUUdYTTZ2WkxwTnkKb3IvUENnVXBoRVRKWjFJVEZEUDNrZkJOUk83TzRlQ3kvWVdtZVROZ3M4VlJvZXpiNUFWVnNURHdyVWdJeUc2SQpGdG9CQmgrOGZvcUdaQXlxN3BLR3JNc201RVd4aXZiOStYVHc0UldhaWFXZWdOK20weUJxCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg=="
tls.crt: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURIekNDQWdlZ0F3SUJBZ0lRUFJDbFZlWlV1bTRrWklGOUxNb21HVEFOQmdrcWhraUc5dzBCQVFzRkFEQWEKTVJnd0ZnWURWUVFERXc5b1lYSmliM0l0ZEc5clpXNHRZMkV3SGhjTk1qUXdPVEE0TWpFeE5UUXdXaGNOTWpVdwpPVEE0TWpFeE5UUXdXakFhTVJnd0ZnWURWUVFERXc5b1lYSmliM0l0ZEc5clpXNHRZMkV3Z2dFaU1BMEdDU3FHClNJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUMrak1SejVjRzhSaitaZldKZ3IzTDNieUVKN0pTbHRUaWwKQi9oTzg5TFh5TkEyY2lOSlBxc3hEL1BQdEFhR2dmTTZ2KzF2Y0lHcmwvcm5ycy9DNldwbHlvc241Q3ZEMWgwRwpzRmNUYTJ4ZkduUC9rK0E1bFcyYjJpSk5QWWdRdm5zdFJwZDdyaHJHY1djUVl6dDl5czI5Q3VEd1Znd0MxdHBiCk9mUzdaZ0k4b3NsZWJRWWNGVDVqRFh5em9LUHAyZWNRb2N3OUhqWEJUZEZJeGswL1dxUCt4WjVQVlhWOFc5VFAKeEo3eEVyeDhZODRFSFNWY1pQdTVJdmQ3cE94SzUyOHZUbTJ0M0VMWnFmZUxYU2pkcWJZZkhCRDZOMVY0dStxeQpSS0JsbWtXczU1NHRLdzY5M0VkRzJUY2NWWHc5aW5VOFo1M0RoZ3dmVnRKVjB2eldCRUNWQWdNQkFBR2pZVEJmCk1BNEdBMVVkRHdFQi93UUVBd0lDcERBZEJnTlZIU1VFRmpBVUJnZ3JCZ0VGQlFjREFRWUlLd1lCQlFVSEF3SXcKRHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVOElIQ01yTENBdEtUMHd4ZnZxS21sR1NzZ0UwdwpEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQ2NsTXpUYmNreHlRMWEwNWFjRS8xbTlNQm5RV2sxQ2NPVitEeVRuCjR3S1RpZFg2NG5lT1Z5aUUrVzM1REtzZWpEYUpWekZpOFR6aUlsanFSbDYzVk15OWxTMFdPUUdQOWdhRVZ1NWYKR2VJemlqT1NKQXhwZ1RUcmZpQTRtbUllTTFqbVVpTml6bGxTa2s2b0NoNWRsQzNBaU1QTHpSbnNXT200VXlwLwpER0o4d0NmbGhxREd2Z3YrTEV5OUNUNVI2SFBmdVE0RGVlSHJDa1FuenhySlJZankrSTRmR3liSTdtSlYrb3dtCnNObnoxUlRzTmJqY2JldnV5a0RTaUNRdnRvMmtxU0l1MDdIRHpXVGVkbk9KamRvekpLU002UEZ1ZlJneDRhVVIKOEhtT25tckp5V0Zjem8yUlFXdTVHVCt1RFZEL3RVWXJTZzBOdjBDZEtrazBPWFU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"
HARBOR_ADMIN_PASSWORD: "SGFyYm9yMTIzNDU="
REGISTRY_CREDENTIAL_PASSWORD: "aGFyYm9yX3JlZ2lzdHJ5X3Bhc3N3b3Jk"
CSRF_KEY: "Vlk1MFNhMFp4N3lNUUlOWXlZcVR3c0l4cDBCMUhhdWM="
---
# Source: harbor/templates/exporter/exporter-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: harbor-exporter
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.1"
type: Opaque
data:
HARBOR_ADMIN_PASSWORD: "SGFyYm9yMTIzNDU="
---
# Source: harbor/templates/jobservice/jobservice-secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: "harbor-jobservice"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.1"
type: Opaque
data:
JOBSERVICE_SECRET: "Sk5JU3VIUHViZDYwTjlOUQ=="
REGISTRY_CREDENTIAL_PASSWORD: "aGFyYm9yX3JlZ2lzdHJ5X3Bhc3N3b3Jk"
---
# Source: harbor/templates/registry/registry-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: "harbor-registry"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.1"
type: Opaque
data:
REGISTRY_HTTP_SECRET: "ODV4RjZ5TkZlYnh0M3hkZA=="
REGISTRY_REDIS_PASSWORD: "TXZZY3VVMFJhSXUxU1g3ZlkxbTFKcmdMVVNhWkpqZ2U="
---
# Source: harbor/templates/registry/registry-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: "harbor-registry-htpasswd"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.1"
type: Opaque
data:
REGISTRY_HTPASSWD: "aGFyYm9yX3JlZ2lzdHJ5X3VzZXI6JDJhJDEwJDJiSTc1ZjlUeXdmYkZ1bnpNQk50NnVOQUNMVko4UjdDWmtrV2M2UzROUnlIMlZiR2RaNXVT"
---
# Source: harbor/templates/registry/registryctl-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: "harbor-registryctl"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.1"
type: Opaque
data:
---
# Source: harbor/templates/core/core-cm.yaml # Source: harbor/templates/core/core-cm.yaml
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
@ -13,7 +134,7 @@ metadata:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
data: data:
app.conf: |+ app.conf: |+
appname = Harbor appname = Harbor
@ -73,7 +194,7 @@ metadata:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
data: data:
HTTP_PROXY: "" HTTP_PROXY: ""
HTTPS_PROXY: "" HTTPS_PROXY: ""
@ -114,7 +235,7 @@ metadata:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
data: data:
CORE_URL: "http://harbor-core:80" CORE_URL: "http://harbor-core:80"
TOKEN_SERVICE_URL: "http://harbor-core:80/service/token" TOKEN_SERVICE_URL: "http://harbor-core:80/service/token"
@ -144,7 +265,7 @@ metadata:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
data: data:
config.yml: |+ config.yml: |+
#Server listening port #Server listening port
@ -180,180 +301,6 @@ data:
# the max time for execution in running state without new task created # the max time for execution in running state without new task created
max_dangling_hours: 168 max_dangling_hours: 168
--- ---
# Source: harbor/templates/nginx/configmap-https.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: harbor-nginx
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0"
data:
nginx.conf: |+
worker_processes auto;
pid /tmp/nginx.pid;
events {
worker_connections 3096;
use epoll;
multi_accept on;
}
http {
client_body_temp_path /tmp/client_body_temp;
proxy_temp_path /tmp/proxy_temp;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
tcp_nodelay on;
# this is necessary for us to be able to disable request buffering in all cases
proxy_http_version 1.1;
upstream core {
server "harbor-core:80";
}
upstream portal {
server "harbor-portal:80";
}
log_format timed_combined '[$time_local]:$remote_addr - '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent" '
'$request_time $upstream_response_time $pipe';
access_log /dev/stdout timed_combined;
map $http_x_forwarded_proto $x_forwarded_proto {
default $http_x_forwarded_proto;
"" $scheme;
}
server {
listen 8443 ssl;
listen [::]:8443 ssl;
# server_name harbordomain.com;
server_tokens off;
# SSL
ssl_certificate /etc/nginx/cert/tls.crt;
ssl_certificate_key /etc/nginx/cert/tls.key;
# Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:';
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)
chunked_transfer_encoding on;
# Add extra headers
add_header Strict-Transport-Security "max-age=31536000; includeSubdomains; preload";
add_header X-Frame-Options DENY;
add_header Content-Security-Policy "frame-ancestors 'none'";
location / {
proxy_pass http://portal/;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_cookie_path / "/; HttpOnly; Secure";
proxy_buffering off;
proxy_request_buffering off;
}
location /api/ {
proxy_pass http://core/api/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_cookie_path / "/; Secure";
proxy_buffering off;
proxy_request_buffering off;
}
location /chartrepo/ {
proxy_pass http://core/chartrepo/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_cookie_path / "/; Secure";
proxy_buffering off;
proxy_request_buffering off;
}
location /c/ {
proxy_pass http://core/c/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_cookie_path / "/; Secure";
proxy_buffering off;
proxy_request_buffering off;
}
location /v1/ {
return 404;
}
location /v2/ {
proxy_pass http://core/v2/;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_buffering off;
proxy_request_buffering off;
}
location /service/ {
proxy_pass http://core/service/;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_cookie_path / "/; Secure";
proxy_buffering off;
proxy_request_buffering off;
}
location /service/notifications {
return 404;
}
}
server {
listen 8080;
listen [::]:8080;
#server_name harbordomain.com;
return 301 https://$host$request_uri;
}
}
---
# Source: harbor/templates/portal/configmap.yaml # Source: harbor/templates/portal/configmap.yaml
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
@ -368,7 +315,7 @@ metadata:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
data: data:
nginx.conf: |+ nginx.conf: |+
worker_processes auto; worker_processes auto;
@ -419,7 +366,7 @@ metadata:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
data: data:
config.yml: |+ config.yml: |+
version: 0.1 version: 0.1
@ -443,7 +390,7 @@ data:
delete: delete:
enabled: true enabled: true
redirect: redirect:
disable: true disable: false
redis: redis:
addr: dragonfly:6379 addr: dragonfly:6379
db: 2 db: 2
@ -495,7 +442,7 @@ metadata:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
data: data:
--- ---
# Source: harbor/templates/jobservice/jobservice-pvc.yaml # Source: harbor/templates/jobservice/jobservice-pvc.yaml
@ -514,7 +461,7 @@ metadata:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
component: jobservice component: jobservice
app.kubernetes.io/component: jobservice app.kubernetes.io/component: jobservice
spec: spec:
@ -539,7 +486,7 @@ metadata:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
spec: spec:
ports: ports:
- name: http-web - name: http-web
@ -566,7 +513,7 @@ metadata:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
spec: spec:
ports: ports:
- name: http-metrics - name: http-metrics
@ -590,7 +537,7 @@ metadata:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
spec: spec:
ports: ports:
- name: http-jobservice - name: http-jobservice
@ -603,39 +550,6 @@ spec:
app: "harbor" app: "harbor"
component: jobservice component: jobservice
--- ---
# Source: harbor/templates/nginx/service.yaml
apiVersion: v1
kind: Service
metadata:
name: harbor
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0"
annotations:
cert-manager.io/cluster-issuer: default
external-dns.alpha.kubernetes.io/hostname: harbor.k-space.ee
metallb.universe.tf/address-pool: elisa
spec:
type: LoadBalancer
ports:
- name: http
port: 80
targetPort: 8080
- name: https
port: 443
targetPort: 8443
selector:
release: harbor
app: "harbor"
component: nginx
---
# Source: harbor/templates/portal/service.yaml # Source: harbor/templates/portal/service.yaml
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
@ -650,7 +564,7 @@ metadata:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
spec: spec:
ports: ports:
- port: 80 - port: 80
@ -674,7 +588,7 @@ metadata:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
spec: spec:
ports: ports:
- name: http-registry - name: http-registry
@ -703,7 +617,7 @@ metadata:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
component: core component: core
app.kubernetes.io/component: core app.kubernetes.io/component: core
spec: spec:
@ -725,13 +639,13 @@ spec:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
component: core component: core
app.kubernetes.io/component: core app.kubernetes.io/component: core
annotations: annotations:
checksum/configmap: 9ea7f1881e4fe5b908355ee28e246b67c8c498d2f719dd74a5536a51ee2d9865 checksum/configmap: 459defa5f990e3b5029d62cfdb86ca9a4191544419bdac39dac6eabc20a1d07c
checksum/secret: 7827f00e118d39ccc4caad6df2df2125a0cef6b6ad9583cb30a6b17e62e1b934 checksum/secret: cb3c7b9ca5ab79bbe000a4fc96165503da4a59cb40edc0e0b75b3f154910e171
checksum/secret-jobservice: f6fcc2a7c9a0224eefa0b4ed2deed3fb22335c417d5645067efdc1341de26bc7 checksum/secret-jobservice: abd218ca34d4473a7f8e1f6ed12b0b5e85e4f0b1ce1f6127afdfc59c9853fe7d
spec: spec:
securityContext: securityContext:
runAsUser: 10000 runAsUser: 10000
@ -740,7 +654,7 @@ spec:
terminationGracePeriodSeconds: 120 terminationGracePeriodSeconds: 120
containers: containers:
- name: core - name: core
image: goharbor/harbor-core:v2.11.0 image: goharbor/harbor-core:v2.11.1
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
startupProbe: startupProbe:
httpGet: httpGet:
@ -830,13 +744,6 @@ spec:
secret: secret:
- name: psc - name: psc
emptyDir: {} emptyDir: {}
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
--- ---
# Source: harbor/templates/exporter/exporter-dpl.yaml # Source: harbor/templates/exporter/exporter-dpl.yaml
apiVersion: apps/v1 apiVersion: apps/v1
@ -852,7 +759,7 @@ metadata:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
component: exporter component: exporter
app.kubernetes.io/component: exporter app.kubernetes.io/component: exporter
spec: spec:
@ -874,12 +781,12 @@ spec:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
component: exporter component: exporter
app.kubernetes.io/component: exporter app.kubernetes.io/component: exporter
annotations: annotations:
checksum/configmap: 79f5dcd087513f8f1d03fca430907faeb9bd7df805dbb251b750fb49ccb0f0b5 checksum/configmap: 7175588df9aea5ad07381b9e28514d0f3506380b511be090b7d2ddc40beb5ab0
checksum/secret: 55bad27b07dca97c644c9977eb8c3da9c08c8b8bbda2854878d2936a8da28508 checksum/secret: be1b09e9e24f666fd357cca51bb49abd966708df0bd2e97078bf88db7ffddf85
spec: spec:
securityContext: securityContext:
runAsUser: 10000 runAsUser: 10000
@ -887,7 +794,7 @@ spec:
automountServiceAccountToken: false automountServiceAccountToken: false
containers: containers:
- name: exporter - name: exporter
image: goharbor/harbor-exporter:v2.11.0 image: goharbor/harbor-exporter:v2.11.1
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
livenessProbe: livenessProbe:
httpGet: httpGet:
@ -944,7 +851,7 @@ metadata:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
component: jobservice component: jobservice
app.kubernetes.io/component: jobservice app.kubernetes.io/component: jobservice
spec: spec:
@ -968,14 +875,14 @@ spec:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
component: jobservice component: jobservice
app.kubernetes.io/component: jobservice app.kubernetes.io/component: jobservice
annotations: annotations:
checksum/configmap: 3a35bef831e58536bf86670117b43e2913a4c1a60d0e74d948559d7a7d564684 checksum/configmap: 5af691ab7fd728ad91fbd355f03ea709d69f58a32e405436cec9056617490bb3
checksum/configmap-env: 80e8b81abf755707210d6112ad65167a7d53088b209f63c603d308ef68c4cfad checksum/configmap-env: f86af5d5cdbf21c00a2721265d7db84c8cda8ef1b2ac4da29aff32dbdf0a875d
checksum/secret: 35297960a512675e6dcdff9d387587916f748c2c2ca2b5b8e5cbe5853488971b checksum/secret: 28c5439858d9583576afb9f6fa5ec06f8cd0ddd5883de3d0bf500e043b1e9f37
checksum/secret-core: 72ed9c9917dd47ba68b05273e113792198afa5e09a696689e1063fbaffc80034 checksum/secret-core: 0fd20ee0eab72090f437861dd69bda563072baddf2c1be9c8ea9adae7cd4450a
spec: spec:
securityContext: securityContext:
runAsUser: 10000 runAsUser: 10000
@ -984,7 +891,7 @@ spec:
terminationGracePeriodSeconds: 120 terminationGracePeriodSeconds: 120
containers: containers:
- name: jobservice - name: jobservice
image: goharbor/harbor-jobservice:v2.11.0 image: goharbor/harbor-jobservice:v2.11.1
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
livenessProbe: livenessProbe:
httpGet: httpGet:
@ -1036,110 +943,6 @@ spec:
- name: job-logs - name: job-logs
persistentVolumeClaim: persistentVolumeClaim:
claimName: harbor-jobservice claimName: harbor-jobservice
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
---
# Source: harbor/templates/nginx/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: harbor-nginx
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0"
component: nginx
app.kubernetes.io/component: nginx
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
release: harbor
app: "harbor"
component: nginx
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0"
component: nginx
app.kubernetes.io/component: nginx
annotations:
checksum/configmap: 7114a5d89af834358c44d0e87c66e2c69da2e3dd545c02472a416c8a7857b983
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
containers:
- name: nginx
image: "goharbor/nginx-photon:v2.11.0"
imagePullPolicy: "IfNotPresent"
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 1
periodSeconds: 10
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
ports:
- containerPort: 8080
- containerPort: 8443
volumeMounts:
- name: config
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
- name: certificate
mountPath: /etc/nginx/cert
volumes:
- name: config
configMap:
name: harbor-nginx
- name: certificate
secret:
secretName: harbor-ingress
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
--- ---
# Source: harbor/templates/portal/deployment.yaml # Source: harbor/templates/portal/deployment.yaml
apiVersion: apps/v1 apiVersion: apps/v1
@ -1155,7 +958,7 @@ metadata:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
component: portal component: portal
app.kubernetes.io/component: portal app.kubernetes.io/component: portal
spec: spec:
@ -1177,11 +980,11 @@ spec:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
component: portal component: portal
app.kubernetes.io/component: portal app.kubernetes.io/component: portal
annotations: annotations:
checksum/configmap: d1b4818dc76aa5b382b435491e437f3c5f9795bf1fb019c82b003f75e7bc3d8f checksum/configmap: 24d858ac32ea0ba10f15274a5dc08a307a5bb9f3577cab5a58d086976c36aee5
spec: spec:
securityContext: securityContext:
runAsUser: 10000 runAsUser: 10000
@ -1189,7 +992,7 @@ spec:
automountServiceAccountToken: false automountServiceAccountToken: false
containers: containers:
- name: portal - name: portal
image: goharbor/harbor-portal:v2.11.0 image: goharbor/harbor-portal:v2.11.1
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
securityContext: securityContext:
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
@ -1224,13 +1027,6 @@ spec:
- name: portal-config - name: portal-config
configMap: configMap:
name: "harbor-portal" name: "harbor-portal"
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
--- ---
# Source: harbor/templates/registry/registry-dpl.yaml # Source: harbor/templates/registry/registry-dpl.yaml
apiVersion: apps/v1 apiVersion: apps/v1
@ -1246,7 +1042,7 @@ metadata:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
component: registry component: registry
app.kubernetes.io/component: registry app.kubernetes.io/component: registry
spec: spec:
@ -1270,14 +1066,14 @@ spec:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
component: registry component: registry
app.kubernetes.io/component: registry app.kubernetes.io/component: registry
annotations: annotations:
checksum/configmap: b6973055b0a56022c00f9460283665c292d00f4ec15c0b36ae334781fd72ff93 checksum/configmap: 275b555209ecc9f8ff34a171d588f4030db27ae049e605ccf3cfa3c75d1acb6d
checksum/secret: b246f895959725e4164cb10bc8c1c5d4d50618736c48129c8ee233b126164339 checksum/secret: 2e37b86bd1c3d83b57d2ed1d80dcbe1fc39a5e241fa8cb776dac29cbccf64448
checksum/secret-jobservice: 37d8a246aaaed2ca66ea98c8e6b0fd875de5cb0cf2660abd7bda98fa6d630ccb checksum/secret-jobservice: b153867cbce5e1d1b9ca0d2f4757c175d79e0f7d439cd472f35f28d764497d0b
checksum/secret-core: a3deaec6a79903eb0619162ab91a87581ae2da37bc3f894792a2f48912a2b7c8 checksum/secret-core: 12550e5628a5bb979f1820737ebd09608aee707cfea2596b65edbeedc75d2558
spec: spec:
securityContext: securityContext:
runAsUser: 10000 runAsUser: 10000
@ -1287,7 +1083,7 @@ spec:
terminationGracePeriodSeconds: 120 terminationGracePeriodSeconds: 120
containers: containers:
- name: registry - name: registry
image: goharbor/registry-photon:v2.11.0 image: goharbor/registry-photon:v2.11.1
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
livenessProbe: livenessProbe:
httpGet: httpGet:
@ -1333,7 +1129,7 @@ spec:
mountPath: /etc/registry/config.yml mountPath: /etc/registry/config.yml
subPath: config.yml subPath: config.yml
- name: registryctl - name: registryctl
image: goharbor/harbor-registryctl:v2.11.0 image: goharbor/harbor-registryctl:v2.11.1
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
livenessProbe: livenessProbe:
httpGet: httpGet:
@ -1403,13 +1199,82 @@ spec:
name: "harbor-registry" name: "harbor-registry"
- name: registry-data - name: registry-data
emptyDir: {} emptyDir: {}
nodeSelector: ---
dedicated: storage # Source: harbor/templates/ingress/ingress.yaml
tolerations: apiVersion: networking.k8s.io/v1
- effect: NoSchedule kind: Ingress
key: dedicated metadata:
operator: Equal name: "harbor-ingress"
value: storage labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.1"
annotations:
cert-manager.io/cluster-issuer: default
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
ingress.kubernetes.io/proxy-body-size: "0"
ingress.kubernetes.io/ssl-redirect: "true"
kubernetes.io/ingress.class: traefik
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
tls:
- hosts:
- "*.k-space.ee"
rules:
- http:
paths:
- path: /api/
pathType: Prefix
backend:
service:
name: harbor-core
port:
number: 80
- path: /service/
pathType: Prefix
backend:
service:
name: harbor-core
port:
number: 80
- path: /v2/
pathType: Prefix
backend:
service:
name: harbor-core
port:
number: 80
- path: /chartrepo/
pathType: Prefix
backend:
service:
name: harbor-core
port:
number: 80
- path: /c/
pathType: Prefix
backend:
service:
name: harbor-core
port:
number: 80
- path: /
pathType: Prefix
backend:
service:
name: harbor-portal
port:
number: 80
host: harbor.k-space.ee
--- ---
# Source: harbor/templates/metrics/metrics-svcmon.yaml # Source: harbor/templates/metrics/metrics-svcmon.yaml
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1
@ -1425,7 +1290,7 @@ metadata:
app.kubernetes.io/name: harbor app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0" app.kubernetes.io/version: "2.11.1"
spec: spec:
jobLabel: app.kubernetes.io/name jobLabel: app.kubernetes.io/name
endpoints: endpoints:

View File

@ -1,21 +1,17 @@
expose: expose:
type: loadBalancer type: ingress
tls: tls:
enabled: true enabled: true
certSource: secret ingress:
secret: hosts:
secretName: "harbor-ingress" core: harbor.k-space.ee
loadBalancer:
name: harbor
ports:
httpPort: 80
httpsPort: 443
annotations: annotations:
cert-manager.io/cluster-issuer: default cert-manager.io/cluster-issuer: default
external-dns.alpha.kubernetes.io/hostname: harbor.k-space.ee kubernetes.io/ingress.class: traefik
metallb.universe.tf/address-pool: elisa traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
labels: {} labels: {}
sourceRanges: []
externalURL: https://harbor.k-space.ee externalURL: https://harbor.k-space.ee
@ -48,7 +44,7 @@ persistence:
# Refer to # Refer to
# https://github.com/distribution/distribution/blob/main/docs/configuration.md#redirect # https://github.com/distribution/distribution/blob/main/docs/configuration.md#redirect
# for the detail. # for the detail.
disableredirect: true disableredirect: false
type: s3 type: s3
s3: s3:
# Set an existing secret for S3 accesskey and secretkey # Set an existing secret for S3 accesskey and secretkey
@ -143,49 +139,3 @@ redis:
addr: "dragonfly:6379" addr: "dragonfly:6379"
username: "" username: ""
password: "MvYcuU0RaIu1SX7fY1m1JrgLUSaZJjge" password: "MvYcuU0RaIu1SX7fY1m1JrgLUSaZJjge"
nginx:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
portal:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
core:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
jobservice:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
registry:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule

View File

@ -1,382 +0,0 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: frontend
spec:
displayName: Kubernetes pod log aggregator
uri: 'https://log.k-space.ee'
allowedGroups:
- k-space:kubernetes:developers
- k-space:kubernetes:admins
headerMapping:
email: Remote-Email
groups: Remote-Groups
name: Remote-Name
user: Remote-Username
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: logmower-shipper
spec:
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 50%
selector:
matchLabels:
app: logmower-shipper
template:
metadata:
labels:
app: logmower-shipper
spec:
serviceAccountName: logmower-shipper
containers:
- name: logmower-shipper
image: logmower/shipper:latest
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MONGO_URI
valueFrom:
secretKeyRef:
name: logmower-mongodb-application-readwrite
key: connectionString.standard
ports:
- containerPort: 8000
name: metrics
securityContext:
readOnlyRootFilesystem: true
command:
- /app/log_shipper.py
- --parse-json
- --normalize-log-level
- --stream-to-log-level
- --merge-top-level
- --max-collection-size
- "10000000000"
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: etcmachineid
mountPath: /etc/machine-id
readOnly: true
- name: varlog
mountPath: /var/log
readOnly: true
volumes:
- name: etcmachineid
hostPath:
path: /etc/machine-id
- name: varlog
hostPath:
path: /var/log
tolerations:
- operator: "Exists"
effect: "NoSchedule"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: logging-logmower-shipper
subjects:
- kind: ServiceAccount
name: logmower-shipper
namespace: logmower
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: logmower-shipper
labels:
app: logmower-shipper
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-shipper
spec:
podSelector:
matchLabels:
app: logmower-shipper
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: monitoring
podSelector:
matchLabels:
app.kubernetes.io/name: prometheus
egress:
- to:
- podSelector:
matchLabels:
app: logmower-mongodb-svc
ports:
- port: 27017
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-eventsource
spec:
podSelector:
matchLabels:
app: logmower-eventsource
policyTypes:
- Ingress
- Egress
egress:
- to:
- podSelector:
matchLabels:
app: logmower-mongodb-svc
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-frontend
spec:
podSelector:
matchLabels:
app: logmower-frontend
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: logmower-shipper
spec:
selector:
matchLabels:
app: logmower-shipper
podMetricsEndpoints:
- port: metrics
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: logmower-shipper
spec:
groups:
- name: logmower-shipper
rules:
- alert: LogmowerSingleInsertionErrors
annotations:
summary: Logmower shipper is having issues submitting log records
to database
expr: rate(logmower_insertion_error_count_total[30m]) > 0
for: 0m
labels:
severity: warning
- alert: LogmowerBulkInsertionErrors
annotations:
summary: Logmower shipper is having issues submitting log records
to database
expr: rate(logmower_bulk_insertion_error_count_total[30m]) > 0
for: 0m
labels:
severity: warning
- alert: LogmowerHighDatabaseLatency
annotations:
summary: Database operations are slow
expr: histogram_quantile(0.95, logmower_database_operation_latency_bucket) > 10
for: 1m
labels:
severity: warning
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: logmower
annotations:
kubernetes.io/ingress.class: traefik
cert-manager.io/cluster-issuer: default
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: logmower-frontend@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: log.k-space.ee
http:
paths:
- pathType: Prefix
path: "/events"
backend:
service:
name: logmower-eventsource
port:
number: 3002
- pathType: Prefix
path: "/"
backend:
service:
name: logmower-frontend
port:
number: 8080
tls:
- hosts:
- "*.k-space.ee"
---
apiVersion: v1
kind: Service
metadata:
name: logmower-eventsource
spec:
type: ClusterIP
selector:
app: logmower-eventsource
ports:
- protocol: TCP
port: 3002
---
apiVersion: v1
kind: Service
metadata:
name: logmower-frontend
spec:
type: ClusterIP
selector:
app: logmower-frontend
ports:
- protocol: TCP
port: 8080
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logmower-frontend
spec:
selector:
matchLabels:
app: logmower-frontend
template:
metadata:
labels:
app: logmower-frontend
spec:
containers:
- name: logmower-frontend
image: logmower/frontend:latest
ports:
- containerPort: 8080
name: http
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
resources:
limits:
memory: 50Mi
requests:
cpu: 1m
memory: 20Mi
volumeMounts:
- name : nginx-cache
mountPath: /var/cache/nginx/
- name : nginx-config
mountPath: /var/config/nginx/
- name: var-run
mountPath: /var/run/
volumes:
- emptyDir: {}
name: nginx-cache
- emptyDir: {}
name: nginx-config
- emptyDir: {}
name: var-run
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logmower-eventsource
spec:
selector:
matchLabels:
app: logmower-eventsource
template:
metadata:
labels:
app: logmower-eventsource
spec:
containers:
- name: logmower-eventsource
image: logmower/eventsource:latest
ports:
- containerPort: 3002
name: nodejs
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
resources:
limits:
cpu: 500m
memory: 200Mi
requests:
cpu: 10m
memory: 100Mi
env:
- name: MONGODB_HOST
valueFrom:
secretKeyRef:
name: logmower-mongodb-application-readonly
key: connectionString.standard
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-mongodb
spec:
podSelector:
matchLabels:
app: logmower-mongodb-svc
policyTypes:
- Ingress
- Egress
ingress:
- from:
- podSelector: {}
ports:
- port: 27017
egress:
- to:
- podSelector:
matchLabels:
app: logmower-mongodb-svc
ports:
- port: 27017

View File

@ -1 +0,0 @@
../shared/networkpolicy-base.yml

View File

@ -32,6 +32,9 @@ Sample queries:
* [Disk space left](https://prom.k-space.ee/graph?g0.range_input=1h&g0.expr=node_filesystem_avail_bytes&g0.tab=1) * [Disk space left](https://prom.k-space.ee/graph?g0.range_input=1h&g0.expr=node_filesystem_avail_bytes&g0.tab=1)
* Minio [s3 egress](https://prom.k-space.ee/graph?g0.expr=rate(minio_s3_traffic_sent_bytes%5B3m%5D)&g0.tab=0&g0.display_mode=lines&g0.show_exemplars=0&g0.range_input=6h), [internode egress](https://prom.k-space.ee/graph?g0.expr=rate(minio_inter_node_traffic_sent_bytes%5B2m%5D)&g0.tab=0&g0.display_mode=lines&g0.show_exemplars=0&g0.range_input=6h), [storage used](https://prom.k-space.ee/graph?g0.expr=minio_node_disk_used_bytes&g0.tab=0&g0.display_mode=lines&g0.show_exemplars=0&g0.range_input=6h) * Minio [s3 egress](https://prom.k-space.ee/graph?g0.expr=rate(minio_s3_traffic_sent_bytes%5B3m%5D)&g0.tab=0&g0.display_mode=lines&g0.show_exemplars=0&g0.range_input=6h), [internode egress](https://prom.k-space.ee/graph?g0.expr=rate(minio_inter_node_traffic_sent_bytes%5B2m%5D)&g0.tab=0&g0.display_mode=lines&g0.show_exemplars=0&g0.range_input=6h), [storage used](https://prom.k-space.ee/graph?g0.expr=minio_node_disk_used_bytes&g0.tab=0&g0.display_mode=lines&g0.show_exemplars=0&g0.range_input=6h)
Another useful tool for exploring Prometheus operator custom resources is
[doc.crds.dev/github.com/prometheus-operator/prometheus-operator](https://doc.crds.dev/github.com/prometheus-operator/prometheus-operator@v0.75.0)
# For administrators # For administrators
To reconfigure SNMP targets etc: To reconfigure SNMP targets etc:
@ -52,7 +55,14 @@ To set Mikrotik secrets:
``` ```
kubectl create -n monitoring secret generic mikrotik-exporter \ kubectl create -n monitoring secret generic mikrotik-exporter \
--from-literal=MIKROTIK_PASSWORD='f7W!H*Pu' \ --from-literal=username=netpoller \
--from-literal=PROMETHEUS_BEARER_TOKEN=$(cat /dev/urandom | base64 | head -c 30) --from-literal=password=...
``` ```
To wipe timeseries:
```
for replica in $(seq 0 2); do
kubectl exec -n monitoring prometheus-prometheus-$replica -- wget --post-data='match[]={__name__=~"mikrotik_.*"}' http://127.0.0.1:9090/api/v1/admin/tsdb/delete_series -O -
done
```

View File

@ -4,25 +4,29 @@ kind: Probe
metadata: metadata:
name: mikrotik name: mikrotik
spec: spec:
bearerTokenSecret: basicAuth:
name: mikrotik-exporter username:
key: PROMETHEUS_BEARER_TOKEN name: mikrotik-exporter
key: username
password:
name: mikrotik-exporter
key: password
prober: prober:
path: /metrics
url: mikrotik-exporter url: mikrotik-exporter
module: full
targets: targets:
staticConfig: staticConfig:
static: static:
- router.mgmt.k-space.ee - 172.23.0.1
- sw_chaos.mgmt.k-space.ee - 172.23.0.100
- sw_poe.mgmt.k-space.ee #- 100.102.1.111
- sw_mgmt.mgmt.k-space.ee #- 100.102.1.112
- sw_core02.mgmt.k-space.ee - 100.102.1.114
- sw_cyber.mgmt.k-space.ee - 100.102.1.115
- sw_ha.mgmt.k-space.ee - 100.102.1.121
- sw_asocial.mgmt.k-space.ee - 100.102.1.131
- sw_kitchen.mgmt.k-space.ee - 100.102.1.141
- sw_core01.mgmt.k-space.ee - 100.102.1.151
--- ---
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule kind: PrometheusRule
@ -32,22 +36,30 @@ spec:
groups: groups:
- name: mikrotik - name: mikrotik
rules: rules:
- alert: MikrotikUplinkRedundancyLost - alert: MikrotikBondRedundancyLost
expr: mikrotik_interface_running{port=~"sfp-sfpplus[12]", instance!~"sw_core.*", instance!~"sw_mgmt.*"} == 0 expr: mikrotik_bond_port_active == 0
for: 0m for: 2m
labels: labels:
severity: error severity: error
annotations: annotations:
summary: Switch uplink high availability lost summary: Switch uplink high availability lost
description: One of the two 10Gb optical links is malfunctioning description: One of the two bonds has inactive member interface
- alert: MikrotikLinkRateDegraded - alert: MikrotikLinkRateDegraded
expr: mikrotik_interface_rate{port=~"sfp-sfpplus.*"} < 10000000000 expr: mikrotik_interface_link_rate_bps{interface=~"sfp-sfpplus.*"} < 10000000000
for: 0m for: 2m
labels: labels:
severity: error severity: error
annotations: annotations:
summary: 10Gb link degraded summary: SFP+ link degraded
description: One of the 10Gb links is running at lower speed description: One of the SFP+ (10G) links is running at lower speed
- alert: MikrotikLinkRateDegraded
expr: mikrotik_interface_link_rate_bps{interface=~"qsfpplus.*"} < 40000000000
for: 2m
labels:
severity: error
annotations:
summary: QSFP+ link degraded
description: One of the QSFP+ (40G) links is running at lower speed
--- ---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
@ -63,20 +75,10 @@ spec:
metadata: metadata:
labels: labels:
app: mikrotik-exporter app: mikrotik-exporter
annotations:
co.elastic.logs/multiline.pattern: '^ '
co.elastic.logs/multiline.negate: "false"
co.elastic.logs/multiline.match: after
spec: spec:
containers: containers:
- name: mikrotik-exporter - name: mikrotik-exporter
image: mirror.gcr.io/codemowers/mikrotik-exporter:latest image: mirror.gcr.io/codemowers/mikrotik-exporter:latest@sha256:895ed4a96364aa6f37aa049eb7882779529dce313360e78b01dee7d6f9b3e0bb
env:
- name: MIKROTIK_USER
value: netpoller
envFrom:
- secretRef:
name: mikrotik-exporter
topologySpreadConstraints: topologySpreadConstraints:
- maxSkew: 1 - maxSkew: 1
topologyKey: topology.kubernetes.io/zone topologyKey: topology.kubernetes.io/zone
@ -94,13 +96,13 @@ spec:
affinity: affinity:
podAntiAffinity: podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution: requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector: - labelSelector:
matchExpressions: matchExpressions:
- key: app - key: app
operator: In operator: In
values: values:
- mikrotik-exporter - mikrotik-exporter
topologyKey: "kubernetes.io/hostname" topologyKey: "kubernetes.io/hostname"
--- ---
kind: Service kind: Service
apiVersion: v1 apiVersion: v1
@ -112,6 +114,6 @@ spec:
- name: http - name: http
port: 80 port: 80
protocol: TCP protocol: TCP
targetPort: 3001 targetPort: 8728
selector: selector:
app: mikrotik-exporter app: mikrotik-exporter

View File

@ -33,7 +33,7 @@ spec:
groups: groups:
- name: node-exporter - name: node-exporter
rules: rules:
- alert: ZfsOfflinePool - alert: ZfsDegradedPool
expr: node_zfs_zpool_state{state!="online"} > 0 expr: node_zfs_zpool_state{state!="online"} > 0
for: 1m for: 1m
labels: labels:
@ -377,13 +377,19 @@ spec:
- name: node-exporter - name: node-exporter
args: args:
- --web.listen-address=0.0.0.0:9101 - --web.listen-address=0.0.0.0:9101
- --path.sysfs=/host/sys - --no-collector.bonding
- --path.rootfs=/host/root - --no-collector.fibrechannel
- --no-collector.infiniband
- --no-collector.nfs
- --no-collector.nfsd
- --no-collector.nvme
- --no-collector.zfs
- --no-collector.tapestats
- --no-collector.wifi - --no-collector.wifi
- --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker|var/lib/kubelet/pods|run)(/.+)?$
- --collector.netclass.ignored-devices=^(veth|cali|vxlan|cni|vnet|tap|lo|wg) - --collector.netclass.ignored-devices=^(veth|cali|vxlan|cni|vnet|tap|lo|wg)
- --collector.netdev.device-exclude=^(veth|cali|vxlan|cni|vnet|tap|lo|wg) - --collector.netdev.device-exclude=^(veth|cali|vxlan|cni|vnet|tap|lo|wg)
- --collector.diskstats.ignored-devices=^(sr[0-9][0-9]*)$ - --collector.diskstats.ignored-devices=^(sr|loop)[0-9][0-9]*$
image: mirror.gcr.io/prom/node-exporter:v1.8.2 image: mirror.gcr.io/prom/node-exporter:v1.8.2
resources: resources:
limits: limits:
@ -393,13 +399,11 @@ spec:
cpu: 5m cpu: 5m
memory: 20Mi memory: 20Mi
volumeMounts: volumeMounts:
- mountPath: /host/sys - name: sys
mountPropagation: HostToContainer mountPath: /sys
name: sys
readOnly: true readOnly: true
- mountPath: /host/root - name: proc
mountPropagation: HostToContainer mountPath: /proc
name: root
readOnly: true readOnly: true
ports: ports:
- containerPort: 9101 - containerPort: 9101
@ -419,9 +423,9 @@ spec:
tolerations: tolerations:
- operator: Exists - operator: Exists
volumes: volumes:
- hostPath: - name: sys
hostPath:
path: /sys path: /sys
name: sys - name: proc
- hostPath: hostPath:
path: / path: /proc
name: root

View File

@ -17,6 +17,7 @@ metadata:
name: prometheus name: prometheus
namespace: monitoring namespace: monitoring
spec: spec:
enableAdminAPI: true
topologySpreadConstraints: topologySpreadConstraints:
- maxSkew: 1 - maxSkew: 1
topologyKey: topology.kubernetes.io/zone topologyKey: topology.kubernetes.io/zone

View File

@ -86,8 +86,8 @@ spec:
staticConfig: staticConfig:
static: static:
- ups-4.mgmt.k-space.ee - ups-4.mgmt.k-space.ee
- ups-6.mgmt.k-space.ee
- ups-7.mgmt.k-space.ee - ups-7.mgmt.k-space.ee
- ups-8.mgmt.k-space.ee
- ups-9.mgmt.k-space.ee - ups-9.mgmt.k-space.ee
--- ---
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1

View File

@ -21,7 +21,7 @@ data:
ssh rosdump@$target '/export' | grep -v '^# serial number =' | grep -v '^#.* by RouterOS' > $target ssh rosdump@$target '/export' | grep -v '^# serial number =' | grep -v '^#.* by RouterOS' > $target
git add $target git add $target
done done
if [[ `git status --porcelain` ]]; then if [[ `git status --porcelain` ]]; then
echo "Attempting Git check in" echo "Attempting Git check in"
git commit -m "$(git diff --cached --shortstat)" git commit -m "$(git diff --cached --shortstat)"
git push git push
@ -52,18 +52,11 @@ spec:
activeDeadlineSeconds: 300 activeDeadlineSeconds: 300
template: template:
spec: spec:
nodeSelector:
dedicated: monitoring
tolerations:
- key: dedicated
operator: Equal
value: monitoring
effect: NoSchedule
restartPolicy: OnFailure restartPolicy: OnFailure
containers: containers:
- name: rosdump - name: rosdump
image: harbor.k-space.ee/k-space/microscript-base image: codemowers/git
imagePullPolicy: Always imagePullPolicy: IfNotPresent
args: args:
- bash - bash
- /config/script.sh - /config/script.sh

View File

@ -11,9 +11,23 @@ spec:
ipPools: ipPools:
- blockSize: 26 - blockSize: 26
cidr: 10.244.0.0/16 cidr: 10.244.0.0/16
encapsulation: VXLANCrossSubnet
natOutgoing: Disabled natOutgoing: Disabled
nodeSelector: all() nodeSelector: all()
---
apiVersion: crd.projectcalico.org/v1
kind: IPPool
metadata:
name: default-ipv4-ippool
spec:
allowedUses:
- Workload
- Tunnel
blockSize: 26
cidr: 10.244.0.0/16
ipipMode: Always
nodeSelector: all()
vxlanMode: Never
natOutgoing: True
--- ---
# This section configures the Calico API server. # This section configures the Calico API server.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer # For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
@ -52,6 +66,7 @@ spec:
- cidr: 193.40.103.39/32 - cidr: 193.40.103.39/32
- cidr: 62.65.250.36/32 - cidr: 62.65.250.36/32
- cidr: 62.65.250.37/32 - cidr: 62.65.250.37/32
- cidr: 62.65.250.38/32
- cidr: 62.65.250.2/32 - cidr: 62.65.250.2/32
- cidr: 193.40.103.25/32 - cidr: 193.40.103.25/32
--- ---

View File

@ -1,10 +1,11 @@
--- ---
apiVersion: codemowers.io/v1alpha1 apiVersion: codemowers.cloud/v1beta1
kind: OIDCGWClient kind: OIDCClient
metadata: metadata:
name: whoami-oidc name: whoami-oidc
namespace: whoami-oidc
spec: spec:
displayName: Whoami (oidc-tester-app) displayName: Whoami OIDC
uri: https://whoami-oidc.k-space.ee uri: https://whoami-oidc.k-space.ee
redirectUris: redirectUris:
- https://whoami-oidc.k-space.ee/auth/callback - https://whoami-oidc.k-space.ee/auth/callback
@ -16,7 +17,6 @@ spec:
availableScopes: availableScopes:
- openid - openid
- profile - profile
tokenEndpointAuthMethod: client_secret_post
pkce: false pkce: false
--- ---
apiVersion: apps/v1 apiVersion: apps/v1