17 Commits

Author SHA1 Message Date
fd58faeccb Switch to non-deprecated metallb annotations
Fixes issue #118
2025-08-26 19:58:08 +03:00
Erki Aas
4b7073997c Add unifi controller 2025-08-14 22:22:50 +03:00
Erki Aas
f27a92a545 Add unifi controller 2025-08-14 21:48:48 +03:00
Erki Aas
f823300169 Add unifi controller 2025-08-14 21:42:32 +03:00
Erki Aas
c0f79a229c Add unifi controller 2025-08-14 21:06:59 +03:00
Erki Aas
d8120a3b0d Add unifi controller 2025-08-14 20:34:28 +03:00
Erki Aas
07c04bf216 Add unifi controller 2025-08-14 20:31:19 +03:00
Erki Aas
16fd71d6f0 Add unifi controller 2025-08-14 20:29:41 +03:00
Erki Aas
da0f3ea05f Add unifi controller 2025-08-14 20:28:42 +03:00
Erki Aas
1204039ba3 Remove longhorn 2025-08-14 18:58:54 +03:00
Erki Aas
a85367da34 Add cephfs storage class 2025-08-14 18:56:21 +03:00
Erki Aas
9a02fcdef0 Add cephfs storage class 2025-08-14 18:55:26 +03:00
Erki Aas
b6d4ee2b05 Migrate all storage classes to ceph 2025-08-14 18:55:07 +03:00
Erki Aas
9c66882d83 Use cephfs for harbor job pvc 2025-08-14 18:40:34 +03:00
07d4039ffe there is no nvidia
old cameras will transcode on CPU

fixup kube-system to yaml
2025-08-14 02:02:26 +03:00
aff54f33e1 storage-classes to rook 2025-08-14 01:54:36 +03:00
c65a2330af frigate to VM and hdd-ceph 2025-08-14 01:53:15 +03:00
27 changed files with 527 additions and 471 deletions

View File

@@ -5,8 +5,8 @@ metadata:
namespace: freeswitch
annotations:
external-dns.alpha.kubernetes.io/hostname: freeswitch.k-space.ee
metallb.universe.tf/address-pool: eenet
metallb.universe.tf/ip-allocated-from-pool: eenet
metallb.io/address-pool: eenet
metallb.io/ip-allocated-from-pool: eenet
spec:
ports:
- name: sip-internal-udp

View File

@@ -0,0 +1,20 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: unifi
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: unifi
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: unifi
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -146,7 +146,7 @@ metadata:
name: filebeat-syslog-udp
annotations:
external-dns.alpha.kubernetes.io/hostname: syslog.k-space.ee
metallb.universe.tf/allow-shared-ip: syslog.k-space.ee
metallb.io/allow-shared-ip: syslog.k-space.ee
spec:
type: LoadBalancer
externalTrafficPolicy: Local
@@ -165,7 +165,7 @@ metadata:
name: filebeat-syslog-tcp
annotations:
external-dns.alpha.kubernetes.io/hostname: syslog.k-space.ee
metallb.universe.tf/allow-shared-ip: syslog.k-space.ee
metallb.io/allow-shared-ip: syslog.k-space.ee
spec:
type: LoadBalancer
externalTrafficPolicy: Local

View File

@@ -14,3 +14,7 @@ kustomize build . --enable-helm
- Amcrest 5MP Turret PoE Outdoor IP Camera with Mic/Audio, 98ft NightVision, 132° FOV, MicroSD (256GB) IP5M-T1179EW-AI-V3 white
Cameras are enumerated (with credentials) in secretspace.
## Coral setup
1. Map USB to VM (#TODO: blog post coming up for exact steps)
2. `k label no worker91.kube.k-space.ee coral.ai/tpu=Exists`

View File

@@ -16,5 +16,4 @@ resources:
- ssh://git@git.k-space.ee/secretspace/kube/frigate # secrets (.env): go2rtc-config, frigate-mqtt-secret, frigate-rtsp-secret
- ./auth.yml
- ./rabbitmq.yml
- ./storage.yml
- ./transcode.yml

View File

@@ -1,32 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: frigate-storage
spec:
persistentVolumeReclaimPolicy: Retain
capacity:
storage: 1Ti
accessModes:
- ReadWriteMany
storageClassName: ""
nfs:
server: 172.21.0.7
path: /nas/k6/frigate
mountOptions:
- vers=4
- minorversion=1
- noac
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: frigate-storage
spec:
volumeName: frigate-storage
storageClassName: ""
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Ti

View File

@@ -29,13 +29,6 @@ spec:
values:
- go2rtc
topologyKey: "kubernetes.io/hostname"
nodeSelector:
dedicated: nvr
tolerations:
- key: dedicated
operator: Equal
value: nvr
effect: NoSchedule
containers:
- name: go2rtc
image: alexxit/go2rtc
@@ -50,9 +43,9 @@ spec:
- mountPath: /config/go2rtc.yaml
subPath: config.yml
name: config
resources:
limits:
nvidia.com/gpu: 1
# resources:
# limits:
# nvidia.com/gpu: 1
volumes:
- name: config
secret:

View File

@@ -125,24 +125,16 @@ ingress:
- "*.k-space.ee"
persistence:
config:
# WIP :)
enabled: false
storageClass: "null"
accessMode: ReadWriteOnce
size: 1000Mi
skipuninstall: false
media:
enabled: true
existingClaim: "frigate-storage"
skipuninstall: true
storageClass: "hdd-ceph"
size: 1Ti
# Force application to run on nvr node
nodeSelector:
dedicated: nvr
coral.ai/tpu: Exists
tolerations:
- key: dedicated
- key: coral.ai/tpu
operator: Equal
value: nvr
value: Exists
effect: NoSchedule

View File

@@ -34,7 +34,7 @@ persistence:
jobservice:
jobLog:
existingClaim: ""
storageClass: "longhorn"
storageClass: "cephfs"
subPath: ""
accessMode: ReadWriteMany
size: 5Gi

View File

@@ -4,7 +4,7 @@ kind: Kustomization
namespace: kube-system
resources:
- ./descheduler.yml
- ./kube-state-metrics.yml
- ./metrics-server.yml
- ./nvidia-device-plugin.yml
- ./descheduler.yaml
- ./kube-state-metrics.yaml
- ./metrics-server.yaml
# - ./nvidia-device-plugin.yml

View File

@@ -1 +0,0 @@
longhorn.yaml

View File

@@ -1,41 +0,0 @@
# Longhorn distributed block storage system
## For users
You should really avoid using Longhorn as it has over time
[proven to be unreliable system](https://www.reddit.com/r/kubernetes/comments/1cbggo8/longhorn_is_unreliable/).
Prefer using remote databases in your application via
the Kubernetes operator pattern.
Use Longhorn for applications that need persistent storage, but are unable
to provide replication in the application layer:
* Applications that insist writing into filesystem
* Applications that serve Git repositories (eg Gitea)
* Applications that check out Git repositories (eg Woodpecker, Drone and CI systems)
* Applications that need to use SQLite
Instead of using built-in `longhorn` storage class, please add new storage class
with suitable replication, data locality parameters and reclaim policy
[here](https://git.k-space.ee/k-space/kube/src/branch/master/storage-class.yaml)
Longhorn backups are made once per day and it's configured to be uploaded to
the Minio S3 bucket hosted at nas.k-space.ee
## For administrators
Longhorn was last upgraded with following snippet:
```
wget https://raw.githubusercontent.com/longhorn/longhorn/v1.8.2/deploy/longhorn.yaml
patch -p0 < changes.diff
kubectl -n longhorn-system apply -f longhorn.yaml -f application-extras.yml -f backup.yaml
```
After initial deployment `dedicated=storage:NoSchedule` was specified
for `Kubernetes Taint Toleration` under `Setting -> General` on
[Longhorn Dashboard](https://longhorn.k-space.ee/).
Suitable nodes were tagged with `storage` and Longhorn scheduling was disabled on others.
This is to prevent scheduling Longhorn data on arbitrary Kubernetes nodes as
`storage[1-4].kube.k-space.ee` nodes are the ones which have additional 200G volume mounted at `/mnt/persistent/`

View File

@@ -1,138 +0,0 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: ui
spec:
displayName: Longhorn
uri: 'https://longhorn.k-space.ee'
allowedGroups:
- k-space:kubernetes:admins
headerMapping:
email: Remote-Email
groups: Remote-Groups
name: Remote-Name
user: Remote-Username
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: longhorn-dashboard
namespace: longhorn-system
annotations:
kubernetes.io/ingress.class: traefik
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: longhorn-system-ui@kubernetescrd
spec:
rules:
- host: longhorn.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: longhorn-frontend
port:
number: 80
tls:
- hosts:
- "*.k-space.ee"
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: manager
spec:
selector: {}
podMetricsEndpoints:
- port: manager
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: longhorn
spec:
# Copied from https://longhorn.io/docs/1.2.4/monitoring/alert-rules-example/
groups:
- name: longhorn
rules:
- alert: LonghornVolumeActualSpaceUsedWarning
annotations:
description: The accumulated snapshots for volume use up more space than the volume's capacity
summary: The actual used space of Longhorn volume is twice the size of the volume capacity.
expr: longhorn_volume_actual_size_bytes > longhorn_volume_capacity_bytes * 2
for: 5m
labels:
issue: The actual used space of Longhorn volume {{$labels.volume}} on {{$labels.node}} is high.
severity: warning
- alert: LonghornVolumeStatusCritical
annotations:
description: Longhorn volume {{$labels.volume}} on {{$labels.node}} is Fault for
more than 2 minutes.
summary: Longhorn volume {{$labels.volume}} is Fault
expr: longhorn_volume_robustness == 3
for: 5m
labels:
issue: Longhorn volume {{$labels.volume}} is Fault.
severity: critical
- alert: LonghornVolumeStatusWarning
annotations:
description: Longhorn volume {{$labels.volume}} on {{$labels.node}} is Degraded for
more than 5 minutes.
summary: Longhorn volume {{$labels.volume}} is Degraded
expr: longhorn_volume_robustness == 2
for: 5m
labels:
issue: Longhorn volume {{$labels.volume}} is Degraded.
severity: warning
- alert: LonghornNodeStorageWarning
annotations:
description: The used storage of node {{$labels.node}} is at {{$value}}% capacity for
more than 5 minutes.
summary: The used storage of node is over 70% of the capacity.
expr: (longhorn_node_storage_usage_bytes / longhorn_node_storage_capacity_bytes) * 100 > 70
for: 5m
labels:
issue: The used storage of node {{$labels.node}} is high.
severity: warning
- alert: LonghornDiskStorageWarning
annotations:
description: The used storage of disk {{$labels.disk}} on node {{$labels.node}} is at {{$value}}% capacity for
more than 5 minutes.
summary: The used storage of disk is over 70% of the capacity.
expr: (longhorn_disk_usage_bytes / longhorn_disk_capacity_bytes) * 100 > 70
for: 5m
labels:
issue: The used storage of disk {{$labels.disk}} on node {{$labels.node}} is high.
severity: warning
- alert: LonghornNodeDown
annotations:
description: There are {{$value}} Longhorn nodes which have been offline for more than 5 minutes.
summary: Longhorn nodes is offline
expr: (avg(longhorn_node_count_total) or on() vector(0)) - (count(longhorn_node_status{condition="ready"} == 1) or on() vector(0)) > 0
for: 5m
labels:
issue: There are {{$value}} Longhorn nodes are offline
severity: critical
- alert: LonghornIntanceManagerCPUUsageWarning
annotations:
description: Longhorn instance manager {{$labels.instance_manager}} on {{$labels.node}} has CPU Usage / CPU request is {{$value}}% for
more than 5 minutes.
summary: Longhorn instance manager {{$labels.instance_manager}} on {{$labels.node}} has CPU Usage / CPU request is over 300%.
expr: (longhorn_instance_manager_cpu_usage_millicpu/longhorn_instance_manager_cpu_requests_millicpu) * 100 > 300
for: 5m
labels:
issue: Longhorn instance manager {{$labels.instance_manager}} on {{$labels.node}} consumes 3 times the CPU request.
severity: warning
- alert: LonghornNodeCPUUsageWarning
annotations:
description: Longhorn node {{$labels.node}} has CPU Usage / CPU capacity is {{$value}}% for
more than 5 minutes.
summary: Longhorn node {{$labels.node}} experiences high CPU pressure for more than 5m.
expr: (longhorn_node_cpu_usage_millicpu / longhorn_node_cpu_capacity_millicpu) * 100 > 90
for: 5m
labels:
issue: Longhorn node {{$labels.node}} experiences high CPU pressure.
severity: warning

View File

@@ -1,46 +0,0 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: MinioBucketClaim
metadata:
name: backup
spec:
capacity: 1Ti
class: external
---
apiVersion: longhorn.io/v1beta2
kind: Setting
metadata:
name: backup-target
namespace: longhorn-system
value: 's3://longhorn-system-a4b235c5-7919-4cb0-9949-259e60c579f1@us-east1/'
---
apiVersion: longhorn.io/v1beta2
kind: Setting
metadata:
name: backup-target-credential-secret
namespace: longhorn-system
value: 'miniobucket-backup-owner-secrets'
---
apiVersion: longhorn.io/v1beta1
kind: RecurringJob
metadata:
name: backup
namespace: longhorn-system
spec:
cron: "0 2 * * *"
task: backup
groups:
- default
retain: 1
concurrency: 4
---
apiVersion: longhorn.io/v1beta1
kind: RecurringJob
metadata:
name: trim
namespace: longhorn-system
spec:
cron: "0 * * * *"
task: trim
groups:
- default

View File

@@ -1,53 +0,0 @@
--- longhorn.yaml 2024-07-07 14:16:47.953593433 +0300
+++ longhorn.modded 2024-07-07 14:18:51.103452617 +0300
@@ -86,14 +86,14 @@
storageclass.kubernetes.io/is-default-class: "true"
provisioner: driver.longhorn.io
allowVolumeExpansion: true
- reclaimPolicy: "Delete"
+ reclaimPolicy: "Retain"
volumeBindingMode: Immediate
parameters:
- numberOfReplicas: "3"
+ numberOfReplicas: "2"
staleReplicaTimeout: "30"
fromBackup: ""
- fsType: "ext4"
- dataLocality: "disabled"
+ fsType: "xfs"
+ dataLocality: "best-effort"
unmapMarkSnapChainRemoved: "ignored"
---
# Source: longhorn/templates/crds.yaml
@@ -4379,6 +4379,15 @@
app.kubernetes.io/version: v1.6.2
app: longhorn-manager
spec:
+ tolerations:
+ - key: dedicated
+ operator: Equal
+ value: nvr
+ effect: NoSchedule
+ - key: arch
+ operator: Equal
+ value: arm64
+ effect: NoSchedule
containers:
- name: longhorn-manager
image: longhornio/longhorn-manager:v1.6.2
@@ -4484,6 +4493,15 @@
app.kubernetes.io/version: v1.6.2
app: longhorn-driver-deployer
spec:
+ tolerations:
+ - key: dedicated
+ operator: Equal
+ value: nvr
+ effect: NoSchedule
+ - key: arch
+ operator: Equal
+ value: arm64
+ effect: NoSchedule
initContainers:
- name: wait-longhorn-manager
image: longhornio/longhorn-manager:v1.6.2

View File

@@ -40,6 +40,15 @@ spec:
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: unifi
namespace: metallb-system
spec:
addresses:
- 172.21.102.0/24
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: bind-secondary-external
namespace: metallb-system

View File

@@ -36,7 +36,7 @@ metadata:
name: nyancat
namespace: nyancat
annotations:
metallb.universe.tf/address-pool: zoo
metallb.io/address-pool: zoo
external-dns.alpha.kubernetes.io/hostname: nyancat.k-space.ee
spec:
type: LoadBalancer

View File

@@ -0,0 +1,236 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: mongo
annotations:
kubernetes.io/description: |
Storage class for Mongo and similar applications that
implement high availability in application layer.
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: prometheus
annotations:
kubernetes.io/description: |
Storage class for Prometheus and similar applications that
implement high availability in application layer.
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: woodpecker
annotations:
kubernetes.io/description: |
Storage class for Drone, Woodpecker and similar application
pipeline runs where Git repos are checked out to.
This storage class uses XFS, has no block level redundancy and it's
deleted as soon as the pod exits.
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gitea
annotations:
kubernetes.io/description: |
Storage class for Gitea and similar applications needing
block device level replication with 3 replicas using XFS filesystem and
best effort data locality.
provisioner: rook-ceph.rbd.csi.ceph.com
reclaimPolicy: Retain
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nextcloud
annotations:
kubernetes.io/description: |
Storage class for Nextcloud needing
block device level replication with 3 replicas using XFS filesystem and
best effort data locality.
provisioner: rook-ceph.rbd.csi.ceph.com
reclaimPolicy: Retain
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rabbitmq
annotations:
kubernetes.io/description: |
Storage class for RabbitMQ and similar applications
deployed in highly available fashion utilizing application level
replication needing persistent volume.
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: unifi
annotations:
kubernetes.io/description: |
Storage class for Unifi and similar applications
deployed in highly available fashion utilizing application level
replication needing persistent volume.
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: hdd-ceph
annotations:
kubernetes.io/description: |
Generic HDD storage on CEPH.
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-hdd
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: "null"
annotations:
kubernetes.io/description: |
Storage class for applications insisting on having a PV, but actually do
not and for data that can be discarded immediately
provisioner: rancher.io/local-path
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: cephfs
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
fsName: ks-fs
pool: ks-fs_data
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: Immediate

View File

@@ -1,125 +0,0 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: mongo
annotations:
kubernetes.io/description: |
Storage class for Mongo and similar applications that
implement high availability in application layer.
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: prometheus
annotations:
kubernetes.io/description: |
Storage class for Prometheus and similar applications that
implement high availability in application layer.
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: woodpecker
annotations:
kubernetes.io/description: |
Storage class for Drone, Woodpecker and similar application
pipeline runs where Git repos are checked out to.
This storage class uses XFS, has no block level redundancy and it's
deleted as soon as the pod exits.
provisioner: driver.longhorn.io
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
dataLocality: best-effort
numberOfReplicas: "1"
fsType: "xfs"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gitea
annotations:
kubernetes.io/description: |
Storage class for Gitea and similar applications needing
block device level replication with 3 replicas using XFS filesystem and
best effort data locality.
provisioner: driver.longhorn.io
reclaimPolicy: Retain
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
dataLocality: best-effort
numberOfReplicas: "3"
fsType: "xfs"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rabbitmq
annotations:
kubernetes.io/description: |
Storage class for RabbitMQ and similar applications
deployed in highly available fashion utilizing application level
replication needing persistent volume.
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: "null"
annotations:
kubernetes.io/description: |
Storage class for applications insisting on having a PV, but actually do
not and for data that can be discarded immediately
provisioner: rancher.io/local-path
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer

View File

@@ -199,6 +199,7 @@ spec:
- cidr: 172.21.53.1/32
- cidr: 172.21.53.2/32
- cidr: 172.21.53.3/32
- cidr: 172.21.102.1/32
- cidr: 193.40.103.36/32
- cidr: 193.40.103.37/32
- cidr: 193.40.103.38/32

9
unifi/kustomization.yaml Normal file
View File

@@ -0,0 +1,9 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: &ns unifi
resources:
- ssh://git@git.k-space.ee/secretspace/kube/unifi # secrets: unifi, unifi-mongo
- unifi-mongo.yaml
- unifi.yaml

53
unifi/unifi-mongo.yaml Normal file
View File

@@ -0,0 +1,53 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: unifi-db
namespace: unifi
spec:
selector:
matchLabels:
app: unifi-db
replicas: 1
minReadySeconds: 10
template:
metadata:
labels:
app: unifi-db
spec:
terminationGracePeriodSeconds: 10
containers:
- name: mongodb
image: mongo:8
ports:
- containerPort: 27017
name: mongo
envFrom:
- secretRef:
name: unifi-mongo
volumeMounts:
- name: data
mountPath: /data/db
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: unifi
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: Service
metadata:
name: unifi-db
namespace: unifi
spec:
ports:
- port: 27017
name: mongo
targetPort: 27017
selector:
app: unifi-db
type: ClusterIP

176
unifi/unifi.yaml Normal file
View File

@@ -0,0 +1,176 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: unifi-app
namespace: unifi
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: unifi
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: unifi
namespace: unifi
spec:
serviceName: "unifi"
replicas: 1
selector:
matchLabels:
name: unifi
template:
metadata:
name: unifi
labels:
name: unifi
spec:
containers:
- name: unifi
image: lscr.io/linuxserver/unifi-network-application:latest
env:
- name: PUID
value: '1000'
- name: GUID
value: '1000'
- name: TZ
value: Etc/UTC
envFrom:
- secretRef:
name: unifi
ports:
- containerPort: 3478
protocol: UDP
- containerPort: 10001
protocol: UDP
- containerPort: 8080
protocol: TCP
- containerPort: 8443
protocol: TCP
- containerPort: 1900
protocol: UDP
- containerPort: 8843
protocol: TCP
- containerPort: 8880
protocol: TCP
- containerPort: 6789
protocol: TCP
- containerPort: 5514
protocol: UDP
volumeMounts:
- name: unifi-persistent-storage
mountPath: /config
volumes:
- name: unifi-persistent-storage
persistentVolumeClaim:
claimName: unifi-app
---
kind: Service
apiVersion: v1
metadata:
name: lb-unifi
namespace: unifi
annotations:
metallb.io/allow-shared-ip: 'true'
traefik.ingress.kubernetes.io/service.serverstransport: unifi-unifi@kubernetescrd
spec:
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: 172.21.102.1
selector:
name: unifi
ports:
- name: '8080'
protocol: TCP
port: 8080
targetPort: 8080
- name: '8443'
protocol: TCP
port: 8443
targetPort: 8443
- name: '1900'
protocol: TCP
port: 1900
targetPort: 1900
- name: '8843'
protocol: TCP
port: 8843
targetPort: 8843
- name: '8880'
protocol: TCP
port: 8880
targetPort: 8880
- name: '6789'
protocol: TCP
port: 6789
targetPort: 6789
---
kind: Service
apiVersion: v1
metadata:
name: lb-unifi-udp
namespace: unifi
annotations:
metallb.io/allow-shared-ip: 'true'
spec:
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: 172.21.102.1
selector:
name: unifi
ports:
- name: '3478'
protocol: UDP
port: 3478
targetPort: 3478
- name: '10001'
protocol: UDP
port: 10001
targetPort: 10001
- name: '5514'
protocol: UDP
port: 5514
targetPort: 5514
---
apiVersion: traefik.io/v1alpha1
kind: ServersTransport
metadata:
name: unifi
namespace: unifi
spec:
insecureSkipVerify: true
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: unifi
namespace: unifi
annotations:
traefik.ingress.kubernetes.io/router.tls: "true"
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: "unifi.k-space.ee"
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: lb-unifi
port:
number: 8443
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: unifi
namespace: unifi
spec:
uri: 'https://unifi.k-space.ee/'

View File

@@ -5,7 +5,7 @@ metadata:
name: wildduck
annotations:
external-dns.alpha.kubernetes.io/hostname: mail.k-space.ee
metallb.universe.tf/address-pool: wildduck
metallb.io/address-pool: wildduck
spec:
loadBalancerIP: 193.40.103.25
type: LoadBalancer