Compare commits
1 Commits
master
...
9e3183d696
Author | SHA1 | Date | |
---|---|---|---|
9e3183d696 |
52
CLUSTER.md
52
CLUSTER.md
@@ -61,24 +61,44 @@ Network → VPN → `IPv6` → Search domains (Otsingudomeenid): `kube.k-space.e
|
|||||||
Created Ubuntu 22.04 VM-s on Proxmox with local storage.
|
Created Ubuntu 22.04 VM-s on Proxmox with local storage.
|
||||||
Added some ARM64 workers by using Ubuntu 22.04 server on Raspberry Pi.
|
Added some ARM64 workers by using Ubuntu 22.04 server on Raspberry Pi.
|
||||||
|
|
||||||
First master:
|
After machines have booted up and you can reach them via SSH:
|
||||||
|
|
||||||
|
```
|
||||||
|
# Disable Ubuntu caching DNS resolver
|
||||||
|
systemctl disable systemd-resolved.service
|
||||||
|
systemctl stop systemd-resolved
|
||||||
|
rm -fv /etc/resolv.conf
|
||||||
|
cat > /etc/resolv.conf << EOF
|
||||||
|
nameserver 1.1.1.1
|
||||||
|
nameserver 8.8.8.8
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Disable multipathd as Longhorn handles that itself
|
||||||
|
systemctl mask multipathd snapd
|
||||||
|
systemctl disable --now multipathd snapd bluetooth ModemManager hciuart wpa_supplicant packagekit
|
||||||
|
|
||||||
|
# Permit root login
|
||||||
|
sed -i -e 's/PermitRootLogin no/PermitRootLogin without-password/' /etc/ssh/sshd_config
|
||||||
|
systemctl reload ssh
|
||||||
|
cat ~ubuntu/.ssh/authorized_keys > /root/.ssh/authorized_keys
|
||||||
|
userdel -f ubuntu
|
||||||
|
apt-get install -yqq linux-image-generic
|
||||||
|
apt-get remove -yq cloud-init linux-image-*-kvm
|
||||||
|
```
|
||||||
|
|
||||||
|
On master:
|
||||||
|
|
||||||
```
|
```
|
||||||
kubeadm init --token-ttl=120m --pod-network-cidr=10.244.0.0/16 --control-plane-endpoint "master.kube.k-space.ee:6443" --upload-certs --apiserver-cert-extra-sans master.kube.k-space.ee --node-name master1.kube.k-space.ee
|
kubeadm init --token-ttl=120m --pod-network-cidr=10.244.0.0/16 --control-plane-endpoint "master.kube.k-space.ee:6443" --upload-certs --apiserver-cert-extra-sans master.kube.k-space.ee --node-name master1.kube.k-space.ee
|
||||||
```
|
```
|
||||||
|
|
||||||
Joining nodes:
|
For the `kubeadm join` command specify FQDN via `--node-name $(hostname -f)`.
|
||||||
```
|
|
||||||
# On a master:
|
|
||||||
kubeadm token create --print-join-command
|
|
||||||
|
|
||||||
# Joining node:
|
|
||||||
<printed join command --node-name "$(hostname -f)"
|
|
||||||
```
|
|
||||||
|
|
||||||
Set AZ labels:
|
Set AZ labels:
|
||||||
|
|
||||||
```
|
```
|
||||||
for j in $(seq 1 9); do
|
for j in $(seq 1 9); do
|
||||||
for t in master mon worker; do
|
for t in master mon worker storage; do
|
||||||
kubectl label nodes ${t}${j}.kube.k-space.ee topology.kubernetes.io/zone=node${j}
|
kubectl label nodes ${t}${j}.kube.k-space.ee topology.kubernetes.io/zone=node${j}
|
||||||
done
|
done
|
||||||
done
|
done
|
||||||
@@ -95,6 +115,11 @@ for j in $(seq 1 4); do
|
|||||||
kubectl taint nodes mon${j}.kube.k-space.ee dedicated=monitoring:NoSchedule
|
kubectl taint nodes mon${j}.kube.k-space.ee dedicated=monitoring:NoSchedule
|
||||||
kubectl label nodes mon${j}.kube.k-space.ee dedicated=monitoring
|
kubectl label nodes mon${j}.kube.k-space.ee dedicated=monitoring
|
||||||
done
|
done
|
||||||
|
|
||||||
|
for j in $(seq 1 4); do
|
||||||
|
kubectl taint nodes storage${j}.kube.k-space.ee dedicated=storage:NoSchedule
|
||||||
|
kubectl label nodes storage${j}.kube.k-space.ee dedicated=storage
|
||||||
|
done
|
||||||
```
|
```
|
||||||
|
|
||||||
For `arm64` nodes add suitable taint to prevent scheduling non-multiarch images on them:
|
For `arm64` nodes add suitable taint to prevent scheduling non-multiarch images on them:
|
||||||
@@ -112,6 +137,13 @@ for j in ground front back; do
|
|||||||
done
|
done
|
||||||
```
|
```
|
||||||
|
|
||||||
|
To reduce wear on storage:
|
||||||
|
```
|
||||||
|
echo StandardOutput=null >> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl restart kubelet
|
||||||
|
```
|
||||||
|
|
||||||
## Technology mapping
|
## Technology mapping
|
||||||
Our self-hosted Kubernetes stack compared to AWS based deployments:
|
Our self-hosted Kubernetes stack compared to AWS based deployments:
|
||||||
|
|
||||||
|
@@ -5,8 +5,8 @@ metadata:
|
|||||||
namespace: freeswitch
|
namespace: freeswitch
|
||||||
annotations:
|
annotations:
|
||||||
external-dns.alpha.kubernetes.io/hostname: freeswitch.k-space.ee
|
external-dns.alpha.kubernetes.io/hostname: freeswitch.k-space.ee
|
||||||
metallb.io/address-pool: eenet
|
metallb.universe.tf/address-pool: eenet
|
||||||
metallb.io/ip-allocated-from-pool: eenet
|
metallb.universe.tf/ip-allocated-from-pool: eenet
|
||||||
spec:
|
spec:
|
||||||
ports:
|
ports:
|
||||||
- name: sip-internal-udp
|
- name: sip-internal-udp
|
||||||
|
@@ -1,20 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: argoproj.io/v1alpha1
|
|
||||||
kind: Application
|
|
||||||
metadata:
|
|
||||||
name: unifi
|
|
||||||
namespace: argocd
|
|
||||||
spec:
|
|
||||||
project: k-space.ee
|
|
||||||
source:
|
|
||||||
repoURL: 'git@git.k-space.ee:k-space/kube.git'
|
|
||||||
path: unifi
|
|
||||||
targetRevision: HEAD
|
|
||||||
destination:
|
|
||||||
server: 'https://kubernetes.default.svc'
|
|
||||||
namespace: unifi
|
|
||||||
syncPolicy:
|
|
||||||
automated:
|
|
||||||
prune: true
|
|
||||||
syncOptions:
|
|
||||||
- CreateNamespace=true
|
|
@@ -146,7 +146,7 @@ metadata:
|
|||||||
name: filebeat-syslog-udp
|
name: filebeat-syslog-udp
|
||||||
annotations:
|
annotations:
|
||||||
external-dns.alpha.kubernetes.io/hostname: syslog.k-space.ee
|
external-dns.alpha.kubernetes.io/hostname: syslog.k-space.ee
|
||||||
metallb.io/allow-shared-ip: syslog.k-space.ee
|
metallb.universe.tf/allow-shared-ip: syslog.k-space.ee
|
||||||
spec:
|
spec:
|
||||||
type: LoadBalancer
|
type: LoadBalancer
|
||||||
externalTrafficPolicy: Local
|
externalTrafficPolicy: Local
|
||||||
@@ -165,7 +165,7 @@ metadata:
|
|||||||
name: filebeat-syslog-tcp
|
name: filebeat-syslog-tcp
|
||||||
annotations:
|
annotations:
|
||||||
external-dns.alpha.kubernetes.io/hostname: syslog.k-space.ee
|
external-dns.alpha.kubernetes.io/hostname: syslog.k-space.ee
|
||||||
metallb.io/allow-shared-ip: syslog.k-space.ee
|
metallb.universe.tf/allow-shared-ip: syslog.k-space.ee
|
||||||
spec:
|
spec:
|
||||||
type: LoadBalancer
|
type: LoadBalancer
|
||||||
externalTrafficPolicy: Local
|
externalTrafficPolicy: Local
|
||||||
|
@@ -14,7 +14,3 @@ kustomize build . --enable-helm
|
|||||||
- Amcrest 5MP Turret PoE Outdoor IP Camera with Mic/Audio, 98ft NightVision, 132° FOV, MicroSD (256GB) IP5M-T1179EW-AI-V3 white
|
- Amcrest 5MP Turret PoE Outdoor IP Camera with Mic/Audio, 98ft NightVision, 132° FOV, MicroSD (256GB) IP5M-T1179EW-AI-V3 white
|
||||||
|
|
||||||
Cameras are enumerated (with credentials) in secretspace.
|
Cameras are enumerated (with credentials) in secretspace.
|
||||||
|
|
||||||
## Coral setup
|
|
||||||
1. Map USB to VM (#TODO: blog post coming up for exact steps)
|
|
||||||
2. `k label no worker91.kube.k-space.ee coral.ai/tpu=Exists`
|
|
||||||
|
@@ -16,4 +16,5 @@ resources:
|
|||||||
- ssh://git@git.k-space.ee/secretspace/kube/frigate # secrets (.env): go2rtc-config, frigate-mqtt-secret, frigate-rtsp-secret
|
- ssh://git@git.k-space.ee/secretspace/kube/frigate # secrets (.env): go2rtc-config, frigate-mqtt-secret, frigate-rtsp-secret
|
||||||
- ./auth.yml
|
- ./auth.yml
|
||||||
- ./rabbitmq.yml
|
- ./rabbitmq.yml
|
||||||
|
- ./storage.yml
|
||||||
- ./transcode.yml
|
- ./transcode.yml
|
||||||
|
32
frigate/storage.yml
Normal file
32
frigate/storage.yml
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: frigate-storage
|
||||||
|
spec:
|
||||||
|
persistentVolumeReclaimPolicy: Retain
|
||||||
|
capacity:
|
||||||
|
storage: 1Ti
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
storageClassName: ""
|
||||||
|
nfs:
|
||||||
|
server: 172.21.0.7
|
||||||
|
path: /nas/k6/frigate
|
||||||
|
mountOptions:
|
||||||
|
- vers=4
|
||||||
|
- minorversion=1
|
||||||
|
- noac
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: frigate-storage
|
||||||
|
spec:
|
||||||
|
volumeName: frigate-storage
|
||||||
|
storageClassName: ""
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 1Ti
|
@@ -29,6 +29,13 @@ spec:
|
|||||||
values:
|
values:
|
||||||
- go2rtc
|
- go2rtc
|
||||||
topologyKey: "kubernetes.io/hostname"
|
topologyKey: "kubernetes.io/hostname"
|
||||||
|
nodeSelector:
|
||||||
|
dedicated: nvr
|
||||||
|
tolerations:
|
||||||
|
- key: dedicated
|
||||||
|
operator: Equal
|
||||||
|
value: nvr
|
||||||
|
effect: NoSchedule
|
||||||
containers:
|
containers:
|
||||||
- name: go2rtc
|
- name: go2rtc
|
||||||
image: alexxit/go2rtc
|
image: alexxit/go2rtc
|
||||||
@@ -43,9 +50,9 @@ spec:
|
|||||||
- mountPath: /config/go2rtc.yaml
|
- mountPath: /config/go2rtc.yaml
|
||||||
subPath: config.yml
|
subPath: config.yml
|
||||||
name: config
|
name: config
|
||||||
# resources:
|
resources:
|
||||||
# limits:
|
limits:
|
||||||
# nvidia.com/gpu: 1
|
nvidia.com/gpu: 1
|
||||||
volumes:
|
volumes:
|
||||||
- name: config
|
- name: config
|
||||||
secret:
|
secret:
|
||||||
|
@@ -125,16 +125,24 @@ ingress:
|
|||||||
- "*.k-space.ee"
|
- "*.k-space.ee"
|
||||||
|
|
||||||
persistence:
|
persistence:
|
||||||
|
config:
|
||||||
|
# WIP :)
|
||||||
|
enabled: false
|
||||||
|
storageClass: "null"
|
||||||
|
accessMode: ReadWriteOnce
|
||||||
|
size: 1000Mi
|
||||||
|
skipuninstall: false
|
||||||
|
|
||||||
media:
|
media:
|
||||||
enabled: true
|
enabled: true
|
||||||
storageClass: "hdd-ceph"
|
existingClaim: "frigate-storage"
|
||||||
size: 1Ti
|
skipuninstall: true
|
||||||
|
|
||||||
# Force application to run on nvr node
|
# Force application to run on nvr node
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
coral.ai/tpu: Exists
|
dedicated: nvr
|
||||||
tolerations:
|
tolerations:
|
||||||
- key: coral.ai/tpu
|
- key: dedicated
|
||||||
operator: Equal
|
operator: Equal
|
||||||
value: Exists
|
value: nvr
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
|
@@ -1,8 +1,20 @@
|
|||||||
## inventory.k-space.ee
|
## hackerspace / inventory
|
||||||
Reads-writes to mongo.
|
|
||||||
|
|
||||||
<!-- Referenced/linked by https://wiki.k-space.ee/en/hosting/doors -->
|
<!-- Referenced/linked by https://wiki.k-space.ee/en/hosting/doors -->
|
||||||
A component of inventory is 'doorboy' (https://wiki.k-space.ee/en/hosting/doors)
|
|
||||||
|
|
||||||
## k6.ee
|
## [doorboy-proxy](https://github.com/k-space/doorboy-proxy)
|
||||||
|
- Dispatches open events (from mongodb) to door controllers.
|
||||||
|
- Handles Slack open events (to mongodb).
|
||||||
|
- Forwards logs from door controllers to mongodb.
|
||||||
|
- Broadcasts mongodb logs to Slack.
|
||||||
|
|
||||||
|
See also:
|
||||||
|
- inventory-app door components
|
||||||
|
- https://wiki.k-space.ee/en/hosting/doors
|
||||||
|
|
||||||
|
## [inventory-app](https://github.com/k-space/inventory-app) (inventory.k-space.ee)
|
||||||
|
- Inventory
|
||||||
|
- Manages door keycards.
|
||||||
|
- Forwards door opens from website to mongodb (what are picked up by doorboy-proxy).
|
||||||
|
|
||||||
|
## [goredirect](https://github.com/k-space/goredirect) (k6.ee)
|
||||||
Reads from mongo, HTTP redirect to //inventory.k-space.ee/m/inventory/{uuid}/view
|
Reads from mongo, HTTP redirect to //inventory.k-space.ee/m/inventory/{uuid}/view
|
||||||
|
@@ -26,6 +26,7 @@ spec:
|
|||||||
- doorboy-proxy
|
- doorboy-proxy
|
||||||
topologyKey: topology.kubernetes.io/zone
|
topologyKey: topology.kubernetes.io/zone
|
||||||
weight: 100
|
weight: 100
|
||||||
|
serviceAccountName: inventory-svcacc
|
||||||
containers:
|
containers:
|
||||||
- name: doorboy-proxy
|
- name: doorboy-proxy
|
||||||
image: harbor.k-space.ee/k-space/doorboy-proxy:latest
|
image: harbor.k-space.ee/k-space/doorboy-proxy:latest
|
||||||
@@ -33,21 +34,14 @@ spec:
|
|||||||
- secretRef:
|
- secretRef:
|
||||||
name: inventory-mongodb
|
name: inventory-mongodb
|
||||||
- secretRef:
|
- secretRef:
|
||||||
name: doorboy-api
|
name: doorboy-godoor
|
||||||
|
- secretRef:
|
||||||
|
name: doorboy-slack
|
||||||
env:
|
env:
|
||||||
- name: FLOOR_ACCESS_GROUP
|
- name: OIDC_USERS_NAMESPACE
|
||||||
value: 'k-space:floor'
|
value: passmower
|
||||||
- name: WORKSHOP_ACCESS_GROUP
|
- name: SLACK_CHANNEL_ID
|
||||||
value: 'k-space:workshop'
|
value: CDL9H8Q9W
|
||||||
- name: CARD_URI
|
|
||||||
value: 'https://inventory.k-space.ee/cards'
|
|
||||||
- name: SWIPE_URI
|
|
||||||
value: 'https://inventory.k-space.ee/m/doorboy/swipe'
|
|
||||||
- name: INVENTORY_API_KEY
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: inventory-api-key
|
|
||||||
key: INVENTORY_API_KEY
|
|
||||||
securityContext:
|
securityContext:
|
||||||
readOnlyRootFilesystem: true
|
readOnlyRootFilesystem: true
|
||||||
runAsNonRoot: true
|
runAsNonRoot: true
|
@@ -1,37 +1,24 @@
|
|||||||
apiVersion: traefik.io/v1alpha1
|
|
||||||
kind: Middleware
|
|
||||||
metadata:
|
|
||||||
name: members-inventory-redirect
|
|
||||||
spec:
|
|
||||||
redirectRegex:
|
|
||||||
regex: ^https://members.k-space.ee/(.*)
|
|
||||||
replacement: https://inventory.k-space.ee/${1}
|
|
||||||
permanent: false
|
|
||||||
---
|
---
|
||||||
# Creates a dummy/stub in auth.k-space.ee user-facing service listing (otherwise only inventory.k-space.ee is listed).
|
|
||||||
apiVersion: codemowers.cloud/v1beta1
|
apiVersion: codemowers.cloud/v1beta1
|
||||||
kind: OIDCMiddlewareClient
|
kind: OIDCClient
|
||||||
metadata:
|
metadata:
|
||||||
name: doorboy
|
name: inventory-app
|
||||||
spec:
|
spec:
|
||||||
displayName: Doorboy
|
uri: 'https://inventory.k-space.ee'
|
||||||
uri: 'https://inventory.k-space.ee/m/doorboy'
|
redirectUris:
|
||||||
---
|
- 'https://inventory.k-space.ee/login-callback'
|
||||||
apiVersion: traefik.io/v1alpha1
|
grantTypes:
|
||||||
kind: IngressRoute
|
- 'authorization_code'
|
||||||
metadata:
|
- 'refresh_token'
|
||||||
name: members-inventory
|
responseTypes:
|
||||||
spec:
|
- 'code'
|
||||||
entryPoints:
|
availableScopes:
|
||||||
- websecure
|
- 'openid'
|
||||||
routes:
|
- 'profile'
|
||||||
- match: Host(`members.k-space.ee`)
|
- 'groups'
|
||||||
kind: Rule
|
- 'offline_access'
|
||||||
middlewares:
|
tokenEndpointAuthMethod: 'client_secret_basic'
|
||||||
- name: members-inventory-redirect
|
pkce: false
|
||||||
services:
|
|
||||||
- kind: TraefikService
|
|
||||||
name: api@internal
|
|
||||||
---
|
---
|
||||||
apiVersion: codemowers.cloud/v1beta1
|
apiVersion: codemowers.cloud/v1beta1
|
||||||
kind: MinioBucketClaim
|
kind: MinioBucketClaim
|
||||||
|
35
hackerspace/inventory-redirects.yaml
Normal file
35
hackerspace/inventory-redirects.yaml
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
---
|
||||||
|
# Creates a dummy/stub in auth.k-space.ee user-facing service listing (otherwise only inventory.k-space.ee is listed).
|
||||||
|
apiVersion: codemowers.cloud/v1beta1
|
||||||
|
kind: OIDCMiddlewareClient
|
||||||
|
metadata:
|
||||||
|
name: doorboy
|
||||||
|
spec:
|
||||||
|
displayName: Doorboy
|
||||||
|
uri: 'https://inventory.k-space.ee/m/doorboy'
|
||||||
|
---
|
||||||
|
apiVersion: traefik.io/v1alpha1
|
||||||
|
kind: Middleware
|
||||||
|
metadata:
|
||||||
|
name: members-inventory-redirect
|
||||||
|
spec:
|
||||||
|
redirectRegex:
|
||||||
|
regex: ^https://members.k-space.ee/(.*)
|
||||||
|
replacement: https://inventory.k-space.ee/${1}
|
||||||
|
permanent: false
|
||||||
|
---
|
||||||
|
apiVersion: traefik.io/v1alpha1
|
||||||
|
kind: IngressRoute
|
||||||
|
metadata:
|
||||||
|
name: members-inventory
|
||||||
|
spec:
|
||||||
|
entryPoints:
|
||||||
|
- websecure
|
||||||
|
routes:
|
||||||
|
- match: Host(`members.k-space.ee`)
|
||||||
|
kind: Rule
|
||||||
|
middlewares:
|
||||||
|
- name: members-inventory-redirect
|
||||||
|
services:
|
||||||
|
- kind: TraefikService
|
||||||
|
name: api@internal
|
@@ -20,36 +20,12 @@ spec:
|
|||||||
- image: harbor.k-space.ee/k-space/inventory-app:latest
|
- image: harbor.k-space.ee/k-space/inventory-app:latest
|
||||||
imagePullPolicy: Always
|
imagePullPolicy: Always
|
||||||
env:
|
env:
|
||||||
- name: ENVIRONMENT_TYPE
|
|
||||||
value: PROD
|
|
||||||
- name: PYTHONUNBUFFERED
|
|
||||||
value: "1"
|
|
||||||
- name: INVENTORY_ASSETS_BASE_URL
|
- name: INVENTORY_ASSETS_BASE_URL
|
||||||
value: https://external.minio-clusters.k-space.ee/hackerspace-701d9303-0f27-4829-a2be-b1084021ad91/
|
value: https://external.minio-clusters.k-space.ee/hackerspace-701d9303-0f27-4829-a2be-b1084021ad91/
|
||||||
- name: MACADDRESS_OUTLINK_BASEURL
|
- name: MACADDRESS_OUTLINK_BASEURL
|
||||||
value: https://grafana.k-space.ee/d/ddwyidbtbc16oa/ip-usage?orgId=1&from=now-2y&to=now&timezone=browser&var-Filters=mac%7C%3D%7C
|
value: https://grafana.k-space.ee/d/ddwyidbtbc16oa/ip-usage?orgId=1&from=now-2y&to=now&timezone=browser&var-Filters=mac%7C%3D%7C
|
||||||
- name: OIDC_USERS_NAMESPACE
|
- name: OIDC_USERS_NAMESPACE
|
||||||
value: passmower
|
value: passmower
|
||||||
- name: SECRET_KEY
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
key: SECRET_KEY
|
|
||||||
name: inventory-secrets
|
|
||||||
- name: INVENTORY_API_KEY
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
key: INVENTORY_API_KEY
|
|
||||||
name: inventory-api-key
|
|
||||||
- name: SLACK_DOORLOG_CALLBACK
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
key: SLACK_DOORLOG_CALLBACK
|
|
||||||
name: slack-secrets
|
|
||||||
- name: SLACK_VERIFICATION_TOKEN
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
key: SLACK_VERIFICATION_TOKEN
|
|
||||||
name: slack-secrets
|
|
||||||
envFrom:
|
envFrom:
|
||||||
- secretRef:
|
- secretRef:
|
||||||
name: miniobucket-inventory-external-owner-secrets
|
name: miniobucket-inventory-external-owner-secrets
|
||||||
@@ -122,59 +98,3 @@ spec:
|
|||||||
tls:
|
tls:
|
||||||
- hosts:
|
- hosts:
|
||||||
- "*.k-space.ee"
|
- "*.k-space.ee"
|
||||||
---
|
|
||||||
apiVersion: codemowers.cloud/v1beta1
|
|
||||||
kind: OIDCClient
|
|
||||||
metadata:
|
|
||||||
name: inventory-app
|
|
||||||
spec:
|
|
||||||
uri: 'https://inventory.k-space.ee'
|
|
||||||
redirectUris:
|
|
||||||
- 'https://inventory.k-space.ee/login-callback'
|
|
||||||
grantTypes:
|
|
||||||
- 'authorization_code'
|
|
||||||
- 'refresh_token'
|
|
||||||
responseTypes:
|
|
||||||
- 'code'
|
|
||||||
availableScopes:
|
|
||||||
- 'openid'
|
|
||||||
- 'profile'
|
|
||||||
- 'groups'
|
|
||||||
- 'offline_access'
|
|
||||||
tokenEndpointAuthMethod: 'client_secret_basic'
|
|
||||||
pkce: false
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
name: inventory-role
|
|
||||||
namespace: hackerspace
|
|
||||||
rules:
|
|
||||||
- verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
apiGroups:
|
|
||||||
- codemowers.cloud
|
|
||||||
resources:
|
|
||||||
- oidcusers
|
|
||||||
- oidcusers/status
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
name: inventory-roles
|
|
||||||
namespace: hackerspace
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: inventory-role
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: inventory-svcacc
|
|
||||||
namespace: hackerspace
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: inventory-svcacc
|
|
||||||
|
13
hackerspace/kustomization.yaml
Normal file
13
hackerspace/kustomization.yaml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
|
||||||
|
namespace: hackerspace
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- ssh://git@git.k-space.ee/secretspace/kube/hackerspace # secrets: inventory-mongodb, inventory-s3, doorboy-godoor, doorboy-slack
|
||||||
|
- ./doorboy.yaml
|
||||||
|
- ./svcacc.yaml
|
||||||
|
- ./inventory.yaml
|
||||||
|
- ./inventory-extras.yaml
|
||||||
|
- ./inventory-redirects.yaml
|
||||||
|
- ./goredirect.yaml
|
35
hackerspace/svcacc.yaml
Normal file
35
hackerspace/svcacc.yaml
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: inventory-role
|
||||||
|
namespace: hackerspace
|
||||||
|
rules:
|
||||||
|
- verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
apiGroups:
|
||||||
|
- codemowers.cloud
|
||||||
|
resources:
|
||||||
|
- oidcusers
|
||||||
|
- oidcusers/status
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: inventory-roles
|
||||||
|
namespace: hackerspace
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: inventory-role
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: inventory-svcacc
|
||||||
|
namespace: hackerspace
|
||||||
|
---
|
||||||
|
# used by inventory and doorboy
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: inventory-svcacc
|
@@ -34,7 +34,7 @@ persistence:
|
|||||||
jobservice:
|
jobservice:
|
||||||
jobLog:
|
jobLog:
|
||||||
existingClaim: ""
|
existingClaim: ""
|
||||||
storageClass: "cephfs"
|
storageClass: "longhorn"
|
||||||
subPath: ""
|
subPath: ""
|
||||||
accessMode: ReadWriteMany
|
accessMode: ReadWriteMany
|
||||||
size: 5Gi
|
size: 5Gi
|
||||||
|
@@ -4,7 +4,7 @@ kind: Kustomization
|
|||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
- ./descheduler.yaml
|
- ./descheduler.yml
|
||||||
- ./kube-state-metrics.yaml
|
- ./kube-state-metrics.yml
|
||||||
- ./metrics-server.yaml
|
- ./metrics-server.yml
|
||||||
# - ./nvidia-device-plugin.yml
|
- ./nvidia-device-plugin.yml
|
||||||
|
1
longhorn-system/.gitignore
vendored
Normal file
1
longhorn-system/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
longhorn.yaml
|
41
longhorn-system/README.md
Normal file
41
longhorn-system/README.md
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
# Longhorn distributed block storage system
|
||||||
|
|
||||||
|
## For users
|
||||||
|
|
||||||
|
You should really avoid using Longhorn as it has over time
|
||||||
|
[proven to be unreliable system](https://www.reddit.com/r/kubernetes/comments/1cbggo8/longhorn_is_unreliable/).
|
||||||
|
Prefer using remote databases in your application via
|
||||||
|
the Kubernetes operator pattern.
|
||||||
|
|
||||||
|
Use Longhorn for applications that need persistent storage, but are unable
|
||||||
|
to provide replication in the application layer:
|
||||||
|
|
||||||
|
* Applications that insist writing into filesystem
|
||||||
|
* Applications that serve Git repositories (eg Gitea)
|
||||||
|
* Applications that check out Git repositories (eg Woodpecker, Drone and CI systems)
|
||||||
|
* Applications that need to use SQLite
|
||||||
|
|
||||||
|
Instead of using built-in `longhorn` storage class, please add new storage class
|
||||||
|
with suitable replication, data locality parameters and reclaim policy
|
||||||
|
[here](https://git.k-space.ee/k-space/kube/src/branch/master/storage-class.yaml)
|
||||||
|
|
||||||
|
Longhorn backups are made once per day and it's configured to be uploaded to
|
||||||
|
the Minio S3 bucket hosted at nas.k-space.ee
|
||||||
|
|
||||||
|
|
||||||
|
## For administrators
|
||||||
|
|
||||||
|
Longhorn was last upgraded with following snippet:
|
||||||
|
|
||||||
|
```
|
||||||
|
wget https://raw.githubusercontent.com/longhorn/longhorn/v1.8.2/deploy/longhorn.yaml
|
||||||
|
patch -p0 < changes.diff
|
||||||
|
kubectl -n longhorn-system apply -f longhorn.yaml -f application-extras.yml -f backup.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
After initial deployment `dedicated=storage:NoSchedule` was specified
|
||||||
|
for `Kubernetes Taint Toleration` under `Setting -> General` on
|
||||||
|
[Longhorn Dashboard](https://longhorn.k-space.ee/).
|
||||||
|
Suitable nodes were tagged with `storage` and Longhorn scheduling was disabled on others.
|
||||||
|
This is to prevent scheduling Longhorn data on arbitrary Kubernetes nodes as
|
||||||
|
`storage[1-4].kube.k-space.ee` nodes are the ones which have additional 200G volume mounted at `/mnt/persistent/`
|
138
longhorn-system/application-extras.yml
Normal file
138
longhorn-system/application-extras.yml
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
---
|
||||||
|
apiVersion: codemowers.cloud/v1beta1
|
||||||
|
kind: OIDCMiddlewareClient
|
||||||
|
metadata:
|
||||||
|
name: ui
|
||||||
|
spec:
|
||||||
|
displayName: Longhorn
|
||||||
|
uri: 'https://longhorn.k-space.ee'
|
||||||
|
allowedGroups:
|
||||||
|
- k-space:kubernetes:admins
|
||||||
|
headerMapping:
|
||||||
|
email: Remote-Email
|
||||||
|
groups: Remote-Groups
|
||||||
|
name: Remote-Name
|
||||||
|
user: Remote-Username
|
||||||
|
---
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: longhorn-dashboard
|
||||||
|
namespace: longhorn-system
|
||||||
|
annotations:
|
||||||
|
kubernetes.io/ingress.class: traefik
|
||||||
|
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
|
||||||
|
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||||
|
traefik.ingress.kubernetes.io/router.middlewares: longhorn-system-ui@kubernetescrd
|
||||||
|
spec:
|
||||||
|
rules:
|
||||||
|
- host: longhorn.k-space.ee
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- pathType: Prefix
|
||||||
|
path: "/"
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: longhorn-frontend
|
||||||
|
port:
|
||||||
|
number: 80
|
||||||
|
tls:
|
||||||
|
- hosts:
|
||||||
|
- "*.k-space.ee"
|
||||||
|
---
|
||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: PodMonitor
|
||||||
|
metadata:
|
||||||
|
name: manager
|
||||||
|
spec:
|
||||||
|
selector: {}
|
||||||
|
podMetricsEndpoints:
|
||||||
|
- port: manager
|
||||||
|
---
|
||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: PrometheusRule
|
||||||
|
metadata:
|
||||||
|
name: longhorn
|
||||||
|
spec:
|
||||||
|
# Copied from https://longhorn.io/docs/1.2.4/monitoring/alert-rules-example/
|
||||||
|
groups:
|
||||||
|
- name: longhorn
|
||||||
|
rules:
|
||||||
|
- alert: LonghornVolumeActualSpaceUsedWarning
|
||||||
|
annotations:
|
||||||
|
description: The accumulated snapshots for volume use up more space than the volume's capacity
|
||||||
|
summary: The actual used space of Longhorn volume is twice the size of the volume capacity.
|
||||||
|
expr: longhorn_volume_actual_size_bytes > longhorn_volume_capacity_bytes * 2
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
issue: The actual used space of Longhorn volume {{$labels.volume}} on {{$labels.node}} is high.
|
||||||
|
severity: warning
|
||||||
|
- alert: LonghornVolumeStatusCritical
|
||||||
|
annotations:
|
||||||
|
description: Longhorn volume {{$labels.volume}} on {{$labels.node}} is Fault for
|
||||||
|
more than 2 minutes.
|
||||||
|
summary: Longhorn volume {{$labels.volume}} is Fault
|
||||||
|
expr: longhorn_volume_robustness == 3
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
issue: Longhorn volume {{$labels.volume}} is Fault.
|
||||||
|
severity: critical
|
||||||
|
- alert: LonghornVolumeStatusWarning
|
||||||
|
annotations:
|
||||||
|
description: Longhorn volume {{$labels.volume}} on {{$labels.node}} is Degraded for
|
||||||
|
more than 5 minutes.
|
||||||
|
summary: Longhorn volume {{$labels.volume}} is Degraded
|
||||||
|
expr: longhorn_volume_robustness == 2
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
issue: Longhorn volume {{$labels.volume}} is Degraded.
|
||||||
|
severity: warning
|
||||||
|
- alert: LonghornNodeStorageWarning
|
||||||
|
annotations:
|
||||||
|
description: The used storage of node {{$labels.node}} is at {{$value}}% capacity for
|
||||||
|
more than 5 minutes.
|
||||||
|
summary: The used storage of node is over 70% of the capacity.
|
||||||
|
expr: (longhorn_node_storage_usage_bytes / longhorn_node_storage_capacity_bytes) * 100 > 70
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
issue: The used storage of node {{$labels.node}} is high.
|
||||||
|
severity: warning
|
||||||
|
- alert: LonghornDiskStorageWarning
|
||||||
|
annotations:
|
||||||
|
description: The used storage of disk {{$labels.disk}} on node {{$labels.node}} is at {{$value}}% capacity for
|
||||||
|
more than 5 minutes.
|
||||||
|
summary: The used storage of disk is over 70% of the capacity.
|
||||||
|
expr: (longhorn_disk_usage_bytes / longhorn_disk_capacity_bytes) * 100 > 70
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
issue: The used storage of disk {{$labels.disk}} on node {{$labels.node}} is high.
|
||||||
|
severity: warning
|
||||||
|
- alert: LonghornNodeDown
|
||||||
|
annotations:
|
||||||
|
description: There are {{$value}} Longhorn nodes which have been offline for more than 5 minutes.
|
||||||
|
summary: Longhorn nodes is offline
|
||||||
|
expr: (avg(longhorn_node_count_total) or on() vector(0)) - (count(longhorn_node_status{condition="ready"} == 1) or on() vector(0)) > 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
issue: There are {{$value}} Longhorn nodes are offline
|
||||||
|
severity: critical
|
||||||
|
- alert: LonghornIntanceManagerCPUUsageWarning
|
||||||
|
annotations:
|
||||||
|
description: Longhorn instance manager {{$labels.instance_manager}} on {{$labels.node}} has CPU Usage / CPU request is {{$value}}% for
|
||||||
|
more than 5 minutes.
|
||||||
|
summary: Longhorn instance manager {{$labels.instance_manager}} on {{$labels.node}} has CPU Usage / CPU request is over 300%.
|
||||||
|
expr: (longhorn_instance_manager_cpu_usage_millicpu/longhorn_instance_manager_cpu_requests_millicpu) * 100 > 300
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
issue: Longhorn instance manager {{$labels.instance_manager}} on {{$labels.node}} consumes 3 times the CPU request.
|
||||||
|
severity: warning
|
||||||
|
- alert: LonghornNodeCPUUsageWarning
|
||||||
|
annotations:
|
||||||
|
description: Longhorn node {{$labels.node}} has CPU Usage / CPU capacity is {{$value}}% for
|
||||||
|
more than 5 minutes.
|
||||||
|
summary: Longhorn node {{$labels.node}} experiences high CPU pressure for more than 5m.
|
||||||
|
expr: (longhorn_node_cpu_usage_millicpu / longhorn_node_cpu_capacity_millicpu) * 100 > 90
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
issue: Longhorn node {{$labels.node}} experiences high CPU pressure.
|
||||||
|
severity: warning
|
46
longhorn-system/backup.yaml
Normal file
46
longhorn-system/backup.yaml
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
apiVersion: codemowers.cloud/v1beta1
|
||||||
|
kind: MinioBucketClaim
|
||||||
|
metadata:
|
||||||
|
name: backup
|
||||||
|
spec:
|
||||||
|
capacity: 1Ti
|
||||||
|
class: external
|
||||||
|
---
|
||||||
|
apiVersion: longhorn.io/v1beta2
|
||||||
|
kind: Setting
|
||||||
|
metadata:
|
||||||
|
name: backup-target
|
||||||
|
namespace: longhorn-system
|
||||||
|
value: 's3://longhorn-system-a4b235c5-7919-4cb0-9949-259e60c579f1@us-east1/'
|
||||||
|
---
|
||||||
|
apiVersion: longhorn.io/v1beta2
|
||||||
|
kind: Setting
|
||||||
|
metadata:
|
||||||
|
name: backup-target-credential-secret
|
||||||
|
namespace: longhorn-system
|
||||||
|
value: 'miniobucket-backup-owner-secrets'
|
||||||
|
---
|
||||||
|
apiVersion: longhorn.io/v1beta1
|
||||||
|
kind: RecurringJob
|
||||||
|
metadata:
|
||||||
|
name: backup
|
||||||
|
namespace: longhorn-system
|
||||||
|
spec:
|
||||||
|
cron: "0 2 * * *"
|
||||||
|
task: backup
|
||||||
|
groups:
|
||||||
|
- default
|
||||||
|
retain: 1
|
||||||
|
concurrency: 4
|
||||||
|
---
|
||||||
|
apiVersion: longhorn.io/v1beta1
|
||||||
|
kind: RecurringJob
|
||||||
|
metadata:
|
||||||
|
name: trim
|
||||||
|
namespace: longhorn-system
|
||||||
|
spec:
|
||||||
|
cron: "0 * * * *"
|
||||||
|
task: trim
|
||||||
|
groups:
|
||||||
|
- default
|
53
longhorn-system/changes.diff
Normal file
53
longhorn-system/changes.diff
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
--- longhorn.yaml 2024-07-07 14:16:47.953593433 +0300
|
||||||
|
+++ longhorn.modded 2024-07-07 14:18:51.103452617 +0300
|
||||||
|
@@ -86,14 +86,14 @@
|
||||||
|
storageclass.kubernetes.io/is-default-class: "true"
|
||||||
|
provisioner: driver.longhorn.io
|
||||||
|
allowVolumeExpansion: true
|
||||||
|
- reclaimPolicy: "Delete"
|
||||||
|
+ reclaimPolicy: "Retain"
|
||||||
|
volumeBindingMode: Immediate
|
||||||
|
parameters:
|
||||||
|
- numberOfReplicas: "3"
|
||||||
|
+ numberOfReplicas: "2"
|
||||||
|
staleReplicaTimeout: "30"
|
||||||
|
fromBackup: ""
|
||||||
|
- fsType: "ext4"
|
||||||
|
- dataLocality: "disabled"
|
||||||
|
+ fsType: "xfs"
|
||||||
|
+ dataLocality: "best-effort"
|
||||||
|
unmapMarkSnapChainRemoved: "ignored"
|
||||||
|
---
|
||||||
|
# Source: longhorn/templates/crds.yaml
|
||||||
|
@@ -4379,6 +4379,15 @@
|
||||||
|
app.kubernetes.io/version: v1.6.2
|
||||||
|
app: longhorn-manager
|
||||||
|
spec:
|
||||||
|
+ tolerations:
|
||||||
|
+ - key: dedicated
|
||||||
|
+ operator: Equal
|
||||||
|
+ value: nvr
|
||||||
|
+ effect: NoSchedule
|
||||||
|
+ - key: arch
|
||||||
|
+ operator: Equal
|
||||||
|
+ value: arm64
|
||||||
|
+ effect: NoSchedule
|
||||||
|
containers:
|
||||||
|
- name: longhorn-manager
|
||||||
|
image: longhornio/longhorn-manager:v1.6.2
|
||||||
|
@@ -4484,6 +4493,15 @@
|
||||||
|
app.kubernetes.io/version: v1.6.2
|
||||||
|
app: longhorn-driver-deployer
|
||||||
|
spec:
|
||||||
|
+ tolerations:
|
||||||
|
+ - key: dedicated
|
||||||
|
+ operator: Equal
|
||||||
|
+ value: nvr
|
||||||
|
+ effect: NoSchedule
|
||||||
|
+ - key: arch
|
||||||
|
+ operator: Equal
|
||||||
|
+ value: arm64
|
||||||
|
+ effect: NoSchedule
|
||||||
|
initContainers:
|
||||||
|
- name: wait-longhorn-manager
|
||||||
|
image: longhornio/longhorn-manager:v1.6.2
|
@@ -40,15 +40,6 @@ spec:
|
|||||||
---
|
---
|
||||||
apiVersion: metallb.io/v1beta1
|
apiVersion: metallb.io/v1beta1
|
||||||
kind: IPAddressPool
|
kind: IPAddressPool
|
||||||
metadata:
|
|
||||||
name: unifi
|
|
||||||
namespace: metallb-system
|
|
||||||
spec:
|
|
||||||
addresses:
|
|
||||||
- 172.21.102.0/24
|
|
||||||
---
|
|
||||||
apiVersion: metallb.io/v1beta1
|
|
||||||
kind: IPAddressPool
|
|
||||||
metadata:
|
metadata:
|
||||||
name: bind-secondary-external
|
name: bind-secondary-external
|
||||||
namespace: metallb-system
|
namespace: metallb-system
|
||||||
|
@@ -36,7 +36,7 @@ metadata:
|
|||||||
name: nyancat
|
name: nyancat
|
||||||
namespace: nyancat
|
namespace: nyancat
|
||||||
annotations:
|
annotations:
|
||||||
metallb.io/address-pool: zoo
|
metallb.universe.tf/address-pool: zoo
|
||||||
external-dns.alpha.kubernetes.io/hostname: nyancat.k-space.ee
|
external-dns.alpha.kubernetes.io/hostname: nyancat.k-space.ee
|
||||||
spec:
|
spec:
|
||||||
type: LoadBalancer
|
type: LoadBalancer
|
||||||
|
@@ -1,236 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: mongo
|
|
||||||
annotations:
|
|
||||||
kubernetes.io/description: |
|
|
||||||
Storage class for Mongo and similar applications that
|
|
||||||
implement high availability in application layer.
|
|
||||||
provisioner: rook-ceph.rbd.csi.ceph.com
|
|
||||||
parameters:
|
|
||||||
clusterID: rook-ceph
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/fstype: xfs
|
|
||||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
|
||||||
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
|
||||||
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
|
||||||
imageFeatures: layering
|
|
||||||
imageFormat: '2'
|
|
||||||
pool: ks-nvme
|
|
||||||
reclaimPolicy: Retain
|
|
||||||
allowVolumeExpansion: true
|
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: prometheus
|
|
||||||
annotations:
|
|
||||||
kubernetes.io/description: |
|
|
||||||
Storage class for Prometheus and similar applications that
|
|
||||||
implement high availability in application layer.
|
|
||||||
provisioner: rook-ceph.rbd.csi.ceph.com
|
|
||||||
parameters:
|
|
||||||
clusterID: rook-ceph
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/fstype: xfs
|
|
||||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
|
||||||
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
|
||||||
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
|
||||||
imageFeatures: layering
|
|
||||||
imageFormat: '2'
|
|
||||||
pool: ks-nvme
|
|
||||||
reclaimPolicy: Retain
|
|
||||||
allowVolumeExpansion: true
|
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: woodpecker
|
|
||||||
annotations:
|
|
||||||
kubernetes.io/description: |
|
|
||||||
Storage class for Drone, Woodpecker and similar application
|
|
||||||
pipeline runs where Git repos are checked out to.
|
|
||||||
This storage class uses XFS, has no block level redundancy and it's
|
|
||||||
deleted as soon as the pod exits.
|
|
||||||
reclaimPolicy: Delete
|
|
||||||
volumeBindingMode: Immediate
|
|
||||||
allowVolumeExpansion: true
|
|
||||||
provisioner: rook-ceph.rbd.csi.ceph.com
|
|
||||||
parameters:
|
|
||||||
clusterID: rook-ceph
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/fstype: xfs
|
|
||||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
|
||||||
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
|
||||||
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
|
||||||
imageFeatures: layering
|
|
||||||
imageFormat: '2'
|
|
||||||
pool: ks-nvme
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: gitea
|
|
||||||
annotations:
|
|
||||||
kubernetes.io/description: |
|
|
||||||
Storage class for Gitea and similar applications needing
|
|
||||||
block device level replication with 3 replicas using XFS filesystem and
|
|
||||||
best effort data locality.
|
|
||||||
provisioner: rook-ceph.rbd.csi.ceph.com
|
|
||||||
reclaimPolicy: Retain
|
|
||||||
volumeBindingMode: Immediate
|
|
||||||
allowVolumeExpansion: true
|
|
||||||
parameters:
|
|
||||||
clusterID: rook-ceph
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/fstype: xfs
|
|
||||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
|
||||||
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
|
||||||
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
|
||||||
imageFeatures: layering
|
|
||||||
imageFormat: '2'
|
|
||||||
pool: ks-nvme
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: nextcloud
|
|
||||||
annotations:
|
|
||||||
kubernetes.io/description: |
|
|
||||||
Storage class for Nextcloud needing
|
|
||||||
block device level replication with 3 replicas using XFS filesystem and
|
|
||||||
best effort data locality.
|
|
||||||
provisioner: rook-ceph.rbd.csi.ceph.com
|
|
||||||
reclaimPolicy: Retain
|
|
||||||
volumeBindingMode: Immediate
|
|
||||||
allowVolumeExpansion: true
|
|
||||||
parameters:
|
|
||||||
clusterID: rook-ceph
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/fstype: xfs
|
|
||||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
|
||||||
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
|
||||||
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
|
||||||
imageFeatures: layering
|
|
||||||
imageFormat: '2'
|
|
||||||
pool: ks-nvme
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: rabbitmq
|
|
||||||
annotations:
|
|
||||||
kubernetes.io/description: |
|
|
||||||
Storage class for RabbitMQ and similar applications
|
|
||||||
deployed in highly available fashion utilizing application level
|
|
||||||
replication needing persistent volume.
|
|
||||||
provisioner: rook-ceph.rbd.csi.ceph.com
|
|
||||||
parameters:
|
|
||||||
clusterID: rook-ceph
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/fstype: xfs
|
|
||||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
|
||||||
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
|
||||||
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
|
||||||
imageFeatures: layering
|
|
||||||
imageFormat: '2'
|
|
||||||
pool: ks-nvme
|
|
||||||
reclaimPolicy: Retain
|
|
||||||
allowVolumeExpansion: true
|
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: unifi
|
|
||||||
annotations:
|
|
||||||
kubernetes.io/description: |
|
|
||||||
Storage class for Unifi and similar applications
|
|
||||||
deployed in highly available fashion utilizing application level
|
|
||||||
replication needing persistent volume.
|
|
||||||
provisioner: rook-ceph.rbd.csi.ceph.com
|
|
||||||
parameters:
|
|
||||||
clusterID: rook-ceph
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/fstype: xfs
|
|
||||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
|
||||||
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
|
||||||
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
|
||||||
imageFeatures: layering
|
|
||||||
imageFormat: '2'
|
|
||||||
pool: ks-nvme
|
|
||||||
reclaimPolicy: Retain
|
|
||||||
allowVolumeExpansion: true
|
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: hdd-ceph
|
|
||||||
annotations:
|
|
||||||
kubernetes.io/description: |
|
|
||||||
Generic HDD storage on CEPH.
|
|
||||||
provisioner: rook-ceph.rbd.csi.ceph.com
|
|
||||||
parameters:
|
|
||||||
clusterID: rook-ceph
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/fstype: xfs
|
|
||||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
|
||||||
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
|
||||||
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
|
||||||
imageFeatures: layering
|
|
||||||
imageFormat: '2'
|
|
||||||
pool: ks-hdd
|
|
||||||
reclaimPolicy: Retain
|
|
||||||
allowVolumeExpansion: true
|
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: "null"
|
|
||||||
annotations:
|
|
||||||
kubernetes.io/description: |
|
|
||||||
Storage class for applications insisting on having a PV, but actually do
|
|
||||||
not and for data that can be discarded immediately
|
|
||||||
provisioner: rancher.io/local-path
|
|
||||||
reclaimPolicy: Delete
|
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: cephfs
|
|
||||||
provisioner: rook-ceph.cephfs.csi.ceph.com
|
|
||||||
parameters:
|
|
||||||
clusterID: rook-ceph
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
|
|
||||||
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
|
|
||||||
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
|
||||||
fsName: ks-fs
|
|
||||||
pool: ks-fs_data
|
|
||||||
reclaimPolicy: Retain
|
|
||||||
allowVolumeExpansion: true
|
|
||||||
volumeBindingMode: Immediate
|
|
||||||
|
125
storage-class.yaml
Normal file
125
storage-class.yaml
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: mongo
|
||||||
|
annotations:
|
||||||
|
kubernetes.io/description: |
|
||||||
|
Storage class for Mongo and similar applications that
|
||||||
|
implement high availability in application layer.
|
||||||
|
provisioner: rook-ceph.rbd.csi.ceph.com
|
||||||
|
parameters:
|
||||||
|
clusterID: rook-ceph
|
||||||
|
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
|
||||||
|
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
||||||
|
csi.storage.k8s.io/fstype: xfs
|
||||||
|
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
||||||
|
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
||||||
|
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
||||||
|
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
||||||
|
imageFeatures: layering
|
||||||
|
imageFormat: '2'
|
||||||
|
pool: ks-nvme
|
||||||
|
reclaimPolicy: Retain
|
||||||
|
allowVolumeExpansion: true
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: prometheus
|
||||||
|
annotations:
|
||||||
|
kubernetes.io/description: |
|
||||||
|
Storage class for Prometheus and similar applications that
|
||||||
|
implement high availability in application layer.
|
||||||
|
provisioner: rook-ceph.rbd.csi.ceph.com
|
||||||
|
parameters:
|
||||||
|
clusterID: rook-ceph
|
||||||
|
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
|
||||||
|
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
||||||
|
csi.storage.k8s.io/fstype: xfs
|
||||||
|
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
||||||
|
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
||||||
|
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
||||||
|
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
||||||
|
imageFeatures: layering
|
||||||
|
imageFormat: '2'
|
||||||
|
pool: ks-nvme
|
||||||
|
reclaimPolicy: Retain
|
||||||
|
allowVolumeExpansion: true
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: woodpecker
|
||||||
|
annotations:
|
||||||
|
kubernetes.io/description: |
|
||||||
|
Storage class for Drone, Woodpecker and similar application
|
||||||
|
pipeline runs where Git repos are checked out to.
|
||||||
|
This storage class uses XFS, has no block level redundancy and it's
|
||||||
|
deleted as soon as the pod exits.
|
||||||
|
provisioner: driver.longhorn.io
|
||||||
|
reclaimPolicy: Delete
|
||||||
|
volumeBindingMode: Immediate
|
||||||
|
allowVolumeExpansion: true
|
||||||
|
parameters:
|
||||||
|
dataLocality: best-effort
|
||||||
|
numberOfReplicas: "1"
|
||||||
|
fsType: "xfs"
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: gitea
|
||||||
|
annotations:
|
||||||
|
kubernetes.io/description: |
|
||||||
|
Storage class for Gitea and similar applications needing
|
||||||
|
block device level replication with 3 replicas using XFS filesystem and
|
||||||
|
best effort data locality.
|
||||||
|
provisioner: driver.longhorn.io
|
||||||
|
reclaimPolicy: Retain
|
||||||
|
volumeBindingMode: Immediate
|
||||||
|
allowVolumeExpansion: true
|
||||||
|
parameters:
|
||||||
|
dataLocality: best-effort
|
||||||
|
numberOfReplicas: "3"
|
||||||
|
fsType: "xfs"
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: rabbitmq
|
||||||
|
annotations:
|
||||||
|
kubernetes.io/description: |
|
||||||
|
Storage class for RabbitMQ and similar applications
|
||||||
|
deployed in highly available fashion utilizing application level
|
||||||
|
replication needing persistent volume.
|
||||||
|
provisioner: rook-ceph.rbd.csi.ceph.com
|
||||||
|
parameters:
|
||||||
|
clusterID: rook-ceph
|
||||||
|
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
|
||||||
|
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
||||||
|
csi.storage.k8s.io/fstype: xfs
|
||||||
|
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
||||||
|
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
||||||
|
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
||||||
|
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
||||||
|
imageFeatures: layering
|
||||||
|
imageFormat: '2'
|
||||||
|
pool: ks-nvme
|
||||||
|
reclaimPolicy: Retain
|
||||||
|
allowVolumeExpansion: true
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: "null"
|
||||||
|
annotations:
|
||||||
|
kubernetes.io/description: |
|
||||||
|
Storage class for applications insisting on having a PV, but actually do
|
||||||
|
not and for data that can be discarded immediately
|
||||||
|
provisioner: rancher.io/local-path
|
||||||
|
reclaimPolicy: Delete
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
@@ -199,7 +199,6 @@ spec:
|
|||||||
- cidr: 172.21.53.1/32
|
- cidr: 172.21.53.1/32
|
||||||
- cidr: 172.21.53.2/32
|
- cidr: 172.21.53.2/32
|
||||||
- cidr: 172.21.53.3/32
|
- cidr: 172.21.53.3/32
|
||||||
- cidr: 172.21.102.1/32
|
|
||||||
- cidr: 193.40.103.36/32
|
- cidr: 193.40.103.36/32
|
||||||
- cidr: 193.40.103.37/32
|
- cidr: 193.40.103.37/32
|
||||||
- cidr: 193.40.103.38/32
|
- cidr: 193.40.103.38/32
|
||||||
|
@@ -1,9 +0,0 @@
|
|||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
|
|
||||||
namespace: &ns unifi
|
|
||||||
|
|
||||||
resources:
|
|
||||||
- ssh://git@git.k-space.ee/secretspace/kube/unifi # secrets: unifi, unifi-mongo
|
|
||||||
- unifi-mongo.yaml
|
|
||||||
- unifi.yaml
|
|
@@ -1,53 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: StatefulSet
|
|
||||||
metadata:
|
|
||||||
name: unifi-db
|
|
||||||
namespace: unifi
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: unifi-db
|
|
||||||
replicas: 1
|
|
||||||
minReadySeconds: 10
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: unifi-db
|
|
||||||
spec:
|
|
||||||
terminationGracePeriodSeconds: 10
|
|
||||||
containers:
|
|
||||||
- name: mongodb
|
|
||||||
image: mongo:8
|
|
||||||
ports:
|
|
||||||
- containerPort: 27017
|
|
||||||
name: mongo
|
|
||||||
envFrom:
|
|
||||||
- secretRef:
|
|
||||||
name: unifi-mongo
|
|
||||||
volumeMounts:
|
|
||||||
- name: data
|
|
||||||
mountPath: /data/db
|
|
||||||
volumeClaimTemplates:
|
|
||||||
- metadata:
|
|
||||||
name: data
|
|
||||||
spec:
|
|
||||||
accessModes: [ "ReadWriteOnce" ]
|
|
||||||
storageClassName: unifi
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 10Gi
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: unifi-db
|
|
||||||
namespace: unifi
|
|
||||||
spec:
|
|
||||||
ports:
|
|
||||||
- port: 27017
|
|
||||||
name: mongo
|
|
||||||
targetPort: 27017
|
|
||||||
selector:
|
|
||||||
app: unifi-db
|
|
||||||
type: ClusterIP
|
|
176
unifi/unifi.yaml
176
unifi/unifi.yaml
@@ -1,176 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
metadata:
|
|
||||||
name: unifi-app
|
|
||||||
namespace: unifi
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 10Gi
|
|
||||||
storageClassName: unifi
|
|
||||||
---
|
|
||||||
kind: StatefulSet
|
|
||||||
apiVersion: apps/v1
|
|
||||||
metadata:
|
|
||||||
name: unifi
|
|
||||||
namespace: unifi
|
|
||||||
spec:
|
|
||||||
serviceName: "unifi"
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
name: unifi
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
name: unifi
|
|
||||||
labels:
|
|
||||||
name: unifi
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: unifi
|
|
||||||
image: lscr.io/linuxserver/unifi-network-application:latest
|
|
||||||
env:
|
|
||||||
- name: PUID
|
|
||||||
value: '1000'
|
|
||||||
- name: GUID
|
|
||||||
value: '1000'
|
|
||||||
- name: TZ
|
|
||||||
value: Etc/UTC
|
|
||||||
envFrom:
|
|
||||||
- secretRef:
|
|
||||||
name: unifi
|
|
||||||
ports:
|
|
||||||
- containerPort: 3478
|
|
||||||
protocol: UDP
|
|
||||||
- containerPort: 10001
|
|
||||||
protocol: UDP
|
|
||||||
- containerPort: 8080
|
|
||||||
protocol: TCP
|
|
||||||
- containerPort: 8443
|
|
||||||
protocol: TCP
|
|
||||||
- containerPort: 1900
|
|
||||||
protocol: UDP
|
|
||||||
- containerPort: 8843
|
|
||||||
protocol: TCP
|
|
||||||
- containerPort: 8880
|
|
||||||
protocol: TCP
|
|
||||||
- containerPort: 6789
|
|
||||||
protocol: TCP
|
|
||||||
- containerPort: 5514
|
|
||||||
protocol: UDP
|
|
||||||
volumeMounts:
|
|
||||||
- name: unifi-persistent-storage
|
|
||||||
mountPath: /config
|
|
||||||
volumes:
|
|
||||||
- name: unifi-persistent-storage
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: unifi-app
|
|
||||||
---
|
|
||||||
kind: Service
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: lb-unifi
|
|
||||||
namespace: unifi
|
|
||||||
annotations:
|
|
||||||
metallb.io/allow-shared-ip: 'true'
|
|
||||||
traefik.ingress.kubernetes.io/service.serverstransport: unifi-unifi@kubernetescrd
|
|
||||||
spec:
|
|
||||||
type: LoadBalancer
|
|
||||||
externalTrafficPolicy: Local
|
|
||||||
loadBalancerIP: 172.21.102.1
|
|
||||||
selector:
|
|
||||||
name: unifi
|
|
||||||
ports:
|
|
||||||
- name: '8080'
|
|
||||||
protocol: TCP
|
|
||||||
port: 8080
|
|
||||||
targetPort: 8080
|
|
||||||
- name: '8443'
|
|
||||||
protocol: TCP
|
|
||||||
port: 8443
|
|
||||||
targetPort: 8443
|
|
||||||
- name: '1900'
|
|
||||||
protocol: TCP
|
|
||||||
port: 1900
|
|
||||||
targetPort: 1900
|
|
||||||
- name: '8843'
|
|
||||||
protocol: TCP
|
|
||||||
port: 8843
|
|
||||||
targetPort: 8843
|
|
||||||
- name: '8880'
|
|
||||||
protocol: TCP
|
|
||||||
port: 8880
|
|
||||||
targetPort: 8880
|
|
||||||
- name: '6789'
|
|
||||||
protocol: TCP
|
|
||||||
port: 6789
|
|
||||||
targetPort: 6789
|
|
||||||
---
|
|
||||||
kind: Service
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: lb-unifi-udp
|
|
||||||
namespace: unifi
|
|
||||||
annotations:
|
|
||||||
metallb.io/allow-shared-ip: 'true'
|
|
||||||
spec:
|
|
||||||
type: LoadBalancer
|
|
||||||
externalTrafficPolicy: Local
|
|
||||||
loadBalancerIP: 172.21.102.1
|
|
||||||
selector:
|
|
||||||
name: unifi
|
|
||||||
ports:
|
|
||||||
- name: '3478'
|
|
||||||
protocol: UDP
|
|
||||||
port: 3478
|
|
||||||
targetPort: 3478
|
|
||||||
- name: '10001'
|
|
||||||
protocol: UDP
|
|
||||||
port: 10001
|
|
||||||
targetPort: 10001
|
|
||||||
- name: '5514'
|
|
||||||
protocol: UDP
|
|
||||||
port: 5514
|
|
||||||
targetPort: 5514
|
|
||||||
---
|
|
||||||
apiVersion: traefik.io/v1alpha1
|
|
||||||
kind: ServersTransport
|
|
||||||
metadata:
|
|
||||||
name: unifi
|
|
||||||
namespace: unifi
|
|
||||||
spec:
|
|
||||||
insecureSkipVerify: true
|
|
||||||
---
|
|
||||||
apiVersion: networking.k8s.io/v1
|
|
||||||
kind: Ingress
|
|
||||||
metadata:
|
|
||||||
name: unifi
|
|
||||||
namespace: unifi
|
|
||||||
annotations:
|
|
||||||
traefik.ingress.kubernetes.io/router.tls: "true"
|
|
||||||
kubernetes.io/ingress.class: traefik
|
|
||||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
|
||||||
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
|
|
||||||
spec:
|
|
||||||
rules:
|
|
||||||
- host: "unifi.k-space.ee"
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
- pathType: Prefix
|
|
||||||
path: /
|
|
||||||
backend:
|
|
||||||
service:
|
|
||||||
name: lb-unifi
|
|
||||||
port:
|
|
||||||
number: 8443
|
|
||||||
---
|
|
||||||
apiVersion: codemowers.cloud/v1beta1
|
|
||||||
kind: OIDCMiddlewareClient
|
|
||||||
metadata:
|
|
||||||
name: unifi
|
|
||||||
namespace: unifi
|
|
||||||
spec:
|
|
||||||
uri: 'https://unifi.k-space.ee/'
|
|
@@ -5,7 +5,7 @@ metadata:
|
|||||||
name: wildduck
|
name: wildduck
|
||||||
annotations:
|
annotations:
|
||||||
external-dns.alpha.kubernetes.io/hostname: mail.k-space.ee
|
external-dns.alpha.kubernetes.io/hostname: mail.k-space.ee
|
||||||
metallb.io/address-pool: wildduck
|
metallb.universe.tf/address-pool: wildduck
|
||||||
spec:
|
spec:
|
||||||
loadBalancerIP: 193.40.103.25
|
loadBalancerIP: 193.40.103.25
|
||||||
type: LoadBalancer
|
type: LoadBalancer
|
||||||
|
Reference in New Issue
Block a user