42 Commits

Author SHA1 Message Date
Erki Aas
e08abcf3fe Fix nextcloud installation, use dedicated DB 2025-11-28 15:39:58 +02:00
Erki Aas
20889f093e Migrate wiki to dedicated mariadb 2025-11-27 21:26:09 +02:00
Erki Aas
98f934b705 Migrate gitea to dedicated mariadb 2025-11-27 21:20:58 +02:00
Erki Aas
200086a3c7 Migrate freescout to upstream image and local storage 2025-11-27 21:11:09 +02:00
Erki Aas
f9074ba60d Add mariadb for freescout 2025-11-27 19:52:14 +02:00
Erki Aas
ffbef05bce Add mariadb for freescout 2025-11-27 19:44:21 +02:00
Erki Aas
46a4d3bd2b Add mariadb for freescout 2025-11-27 19:40:35 +02:00
Erki Aas
e0e7a4ab21 Add mariadb for freescout 2025-11-27 19:37:25 +02:00
Erki Aas
75dacabc72 Add mariadb for freescout 2025-11-27 19:34:08 +02:00
Erki Aas
42383a8009 Add rotmower 2025-11-18 19:08:11 +02:00
Erki Aas
e75272c5ff Add rotmower 2025-11-18 19:05:33 +02:00
Erki Aas
25531431f7 Add rotmower 2025-11-18 18:59:16 +02:00
Erki Aas
0c073d4ef2 Add rotmower 2025-11-18 18:57:40 +02:00
Erki Aas
ae0b3d3939 Add rotmower 2025-11-18 18:55:22 +02:00
addf02c355 pve93 hw failure, don't attempt cluster via it 2025-11-15 19:55:09 +02:00
Erki Aas
d123517e63 Increase/remove prometheus resource limits 2025-11-13 19:57:07 +02:00
Erki Aas
a491e5961b Add scrapeconfig support 2025-11-13 19:50:17 +02:00
Erki Aas
f9601bf5a5 Add ceph monitoring 2025-11-13 19:39:21 +02:00
Erki Aas
ae16e4c83f Add ceph monitoring 2025-11-13 19:28:01 +02:00
5961739b07 gitea v1.25.0 2025-11-01 11:53:39 +02:00
cd633fed02 gitea v1.24.7
pls auto-update where
2025-11-01 11:31:31 +02:00
eb475b6cb0 Fix pve nodes 2025-09-25 22:12:28 +00:00
f0294a74fa Add pbs.infra.k-space.ee 2025-09-25 22:07:41 +00:00
9b07f8885e Add pbs.infra.k-space.ee 2025-09-25 22:06:05 +00:00
c1df0fd9be Add pbs.proxmox.infra.k-space.ee 2025-09-25 22:05:40 +00:00
fd58faeccb Switch to non-deprecated metallb annotations
Fixes issue #118
2025-08-26 19:58:08 +03:00
Erki Aas
4b7073997c Add unifi controller 2025-08-14 22:22:50 +03:00
Erki Aas
f27a92a545 Add unifi controller 2025-08-14 21:48:48 +03:00
Erki Aas
f823300169 Add unifi controller 2025-08-14 21:42:32 +03:00
Erki Aas
c0f79a229c Add unifi controller 2025-08-14 21:06:59 +03:00
Erki Aas
d8120a3b0d Add unifi controller 2025-08-14 20:34:28 +03:00
Erki Aas
07c04bf216 Add unifi controller 2025-08-14 20:31:19 +03:00
Erki Aas
16fd71d6f0 Add unifi controller 2025-08-14 20:29:41 +03:00
Erki Aas
da0f3ea05f Add unifi controller 2025-08-14 20:28:42 +03:00
Erki Aas
1204039ba3 Remove longhorn 2025-08-14 18:58:54 +03:00
Erki Aas
a85367da34 Add cephfs storage class 2025-08-14 18:56:21 +03:00
Erki Aas
9a02fcdef0 Add cephfs storage class 2025-08-14 18:55:26 +03:00
Erki Aas
b6d4ee2b05 Migrate all storage classes to ceph 2025-08-14 18:55:07 +03:00
Erki Aas
9c66882d83 Use cephfs for harbor job pvc 2025-08-14 18:40:34 +03:00
07d4039ffe there is no nvidia
old cameras will transcode on CPU

fixup kube-system to yaml
2025-08-14 02:02:26 +03:00
aff54f33e1 storage-classes to rook 2025-08-14 01:54:36 +03:00
c65a2330af frigate to VM and hdd-ceph 2025-08-14 01:53:15 +03:00
49 changed files with 1337 additions and 748 deletions

View File

@@ -5,8 +5,8 @@ metadata:
namespace: freeswitch
annotations:
external-dns.alpha.kubernetes.io/hostname: freeswitch.k-space.ee
metallb.universe.tf/address-pool: eenet
metallb.universe.tf/ip-allocated-from-pool: eenet
metallb.io/address-pool: eenet
metallb.io/ip-allocated-from-pool: eenet
spec:
ports:
- name: sip-internal-udp

View File

@@ -0,0 +1,20 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: rotmower
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: rotmower
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: rotmower
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -0,0 +1,20 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: unifi
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: unifi
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: unifi
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -146,7 +146,7 @@ metadata:
name: filebeat-syslog-udp
annotations:
external-dns.alpha.kubernetes.io/hostname: syslog.k-space.ee
metallb.universe.tf/allow-shared-ip: syslog.k-space.ee
metallb.io/allow-shared-ip: syslog.k-space.ee
spec:
type: LoadBalancer
externalTrafficPolicy: Local
@@ -165,7 +165,7 @@ metadata:
name: filebeat-syslog-tcp
annotations:
external-dns.alpha.kubernetes.io/hostname: syslog.k-space.ee
metallb.universe.tf/allow-shared-ip: syslog.k-space.ee
metallb.io/allow-shared-ip: syslog.k-space.ee
spec:
type: LoadBalancer
externalTrafficPolicy: Local

View File

@@ -25,6 +25,7 @@ spec:
uri: https://freescout.k-space.ee
redirectUris:
- https://freescout.k-space.ee/oauth_callback
- https://freescout.k-space.ee/oauth-login/callback/mlz500opr
allowedGroups:
- k-space:floor
grantTypes:
@@ -85,14 +86,14 @@ spec:
- /bin/bash
- '-c'
- >-
mysql -u kspace_freescout kspace_freescout -h mariadb.infra.k-space.ee
mysql -u freescout freescout -h mariadb
-p${MYSQL_PWD} < /tmp/update.sql
env:
- name: MYSQL_PWD
valueFrom:
secretKeyRef:
name: freescout-secrets
key: DB_PASS
name: mariadb-secrets
key: MYSQL_PASSWORD
resources: {}
volumeMounts:
- name: tmp
@@ -140,7 +141,7 @@ spec:
ports:
- protocol: TCP
port: 80
targetPort: 8080
targetPort: 80
---
apiVersion: apps/v1
kind: Deployment
@@ -160,120 +161,59 @@ spec:
labels:
app: freescout
spec:
containers:
containers:
- name: freescout
image: harbor.k-space.ee/k-space/freescout
image: ghcr.io/tiredofit/docker-freescout:php8.3-1.17.135
ports:
- containerPort: 8080
- containerPort: 80
env:
- name: DISPLAY_ERRORS
value: 'true'
value: 'false'
- name: APP_DEBUG
value: 'false'
- name: SITE_URL
value: 'https://freescout.k-space.ee'
- name: APP_URL
value: 'https://freescout.k-space.ee'
- name: DB_HOST
value: mariadb.infra.k-space.ee
value: mariadb
- name: DB_PORT
value: "3306"
- name: DB_DATABASE
value: kspace_freescout
- name: DB_USERNAME
value: kspace_freescout
- name: ADMIN_EMAIL
value: lauri@k-space.ee
- name: ADMIN_PASS
value: Salakala1!
- name: DB_NAME
value: freescout
- name: DB_USER
value: freescout
- name: TIMEZONE
value: Europe/Tallinn
- name: FREESCOUT_ATTACHMENTS_DRIVER
value: s3
- name: DB_PASSWORD
- name: DB_PASS
valueFrom:
secretKeyRef:
name: freescout-secrets
key: DB_PASS
- name: AWS_USE_PATH_STYLE_ENDPOINT
value: "true"
- name: AWS_BUCKET
valueFrom:
secretKeyRef:
name: miniobucket-attachments-owner-secrets
key: BUCKET_NAME
- name: APP_KEY
valueFrom:
secretKeyRef:
name: freescout-app
key: APP_KEY
envFrom:
- secretRef:
name: miniobucket-attachments-owner-secrets
name: mariadb-secrets
key: MYSQL_PASSWORD
volumeMounts:
- name: freescout-attachments
mountPath: /www/html/storage/app/attachment
subPath: attachment
- name: freescout-attachments
mountPath: /www/html/Modules
subPath: Modules
volumes:
- name: freescout-attachments
persistentVolumeClaim:
claimName: freescout-attachments
---
apiVersion: batch/v1
kind: CronJob
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: freescout-cron
name: freescout-attachments
namespace: freescout
spec:
schedule: "0,30 * * * *" # Should be every minute in theory, keeps hanging
jobTemplate:
spec:
activeDeadlineSeconds: 1800 # this is unholy https://github.com/freescout-helpdesk/freescout/blob/dist/app/Console/Kernel.php
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: freescout-cron
image: harbor.k-space.ee/k-space/freescout
imagePullPolicy: Always
command:
- php
- artisan
- schedule:run
env:
- name: DISPLAY_ERRORS
value: 'true'
- name: SITE_URL
value: 'https://freescout.k-space.ee'
- name: APP_URL
value: 'https://freescout.k-space.ee'
- name: DB_HOST
value: mariadb.infra.k-space.ee
- name: DB_PORT
value: "3306"
- name: DB_DATABASE
value: kspace_freescout
- name: DB_USERNAME
value: kspace_freescout
- name: ADMIN_EMAIL
value: lauri@k-space.ee
- name: ADMIN_PASS
value: Salakala1!
- name: TIMEZONE
value: Europe/Tallinn
- name: FREESCOUT_ATTACHMENTS_DRIVER
value: s3
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: freescout-secrets
key: DB_PASS
- name: AWS_USE_PATH_STYLE_ENDPOINT
value: "true"
- name: AWS_BUCKET
valueFrom:
secretKeyRef:
name: miniobucket-attachments-owner-secrets
key: BUCKET_NAME
- name: APP_KEY
valueFrom:
secretKeyRef:
name: freescout-app
key: APP_KEY
envFrom:
- secretRef:
name: miniobucket-attachments-owner-secrets
restartPolicy: Never
storageClassName: ceph-rbd
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
apiVersion: codemowers.cloud/v1beta1
kind: MinioBucketClaim

99
freescout/mariadb.yml Normal file
View File

@@ -0,0 +1,99 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mariadb
spec:
revisionHistoryLimit: 0
serviceName: mariadb
selector:
matchLabels:
app: mariadb
replicas: 1
template:
metadata:
labels:
app: mariadb
annotations:
prometheus.io/port: '9104'
prometheus.io/scrape: 'true'
spec:
containers:
- name: exporter
image: mirror.gcr.io/prom/mysqld-exporter:latest
args:
- --mysqld.username
- exporter
- name: mariadb
image: mirror.gcr.io/library/mariadb:12.1
imagePullPolicy: Always
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mariadb-secrets
key: MYSQL_ROOT_PASSWORD
- name: MYSQL_USER
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: MYSQL_DATABASE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: mariadb-secrets
key: MYSQL_PASSWORD
volumeMounts:
- name: mariadb-data
mountPath: /var/lib/mysql
- name: mariadb-init
mountPath: /docker-entrypoint-initdb.d
volumes:
- name: mariadb-init
configMap:
name: mariadb-init-config
volumeClaimTemplates:
- metadata:
name: mariadb-data
spec:
storageClassName: ceph-rbd
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
metadata:
name: mariadb-secrets
spec:
size: 32
mapping:
- key: MYSQL_ROOT_PASSWORD
value: "%(plaintext)s"
- key: MYSQL_PASSWORD
value: "%(plaintext)s"
---
apiVersion: v1
kind: Service
metadata:
name: mariadb
spec:
ports:
- protocol: TCP
port: 3306
selector:
app: mariadb
---
apiVersion: v1
kind: ConfigMap
metadata:
name: mariadb-init-config
data:
initdb.sql: |
CREATE USER 'exporter'@'127.0.0.1' WITH MAX_USER_CONNECTIONS 3;
GRANT PROCESS, REPLICATION CLIENT, SLAVE MONITOR, SELECT ON *.* TO 'exporter'@'127.0.0.1';

View File

@@ -14,3 +14,7 @@ kustomize build . --enable-helm
- Amcrest 5MP Turret PoE Outdoor IP Camera with Mic/Audio, 98ft NightVision, 132° FOV, MicroSD (256GB) IP5M-T1179EW-AI-V3 white
Cameras are enumerated (with credentials) in secretspace.
## Coral setup
1. Map USB to VM (#TODO: blog post coming up for exact steps)
2. `k label no worker91.kube.k-space.ee coral.ai/tpu=Exists`

View File

@@ -16,5 +16,4 @@ resources:
- ssh://git@git.k-space.ee/secretspace/kube/frigate # secrets (.env): go2rtc-config, frigate-mqtt-secret, frigate-rtsp-secret
- ./auth.yml
- ./rabbitmq.yml
- ./storage.yml
- ./transcode.yml

View File

@@ -1,32 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: frigate-storage
spec:
persistentVolumeReclaimPolicy: Retain
capacity:
storage: 1Ti
accessModes:
- ReadWriteMany
storageClassName: ""
nfs:
server: 172.21.0.7
path: /nas/k6/frigate
mountOptions:
- vers=4
- minorversion=1
- noac
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: frigate-storage
spec:
volumeName: frigate-storage
storageClassName: ""
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Ti

View File

@@ -29,13 +29,6 @@ spec:
values:
- go2rtc
topologyKey: "kubernetes.io/hostname"
nodeSelector:
dedicated: nvr
tolerations:
- key: dedicated
operator: Equal
value: nvr
effect: NoSchedule
containers:
- name: go2rtc
image: alexxit/go2rtc
@@ -50,9 +43,9 @@ spec:
- mountPath: /config/go2rtc.yaml
subPath: config.yml
name: config
resources:
limits:
nvidia.com/gpu: 1
# resources:
# limits:
# nvidia.com/gpu: 1
volumes:
- name: config
secret:

View File

@@ -125,24 +125,16 @@ ingress:
- "*.k-space.ee"
persistence:
config:
# WIP :)
enabled: false
storageClass: "null"
accessMode: ReadWriteOnce
size: 1000Mi
skipuninstall: false
media:
enabled: true
existingClaim: "frigate-storage"
skipuninstall: true
storageClass: "hdd-ceph"
size: 1Ti
# Force application to run on nvr node
nodeSelector:
dedicated: nvr
coral.ai/tpu: Exists
tolerations:
- key: dedicated
- key: coral.ai/tpu
operator: Equal
value: nvr
value: Exists
effect: NoSchedule

View File

@@ -92,12 +92,12 @@ spec:
- name: MYSQL_PWD
valueFrom:
secretKeyRef:
name: gitea-secrets
key: GITEA__DATABASE__PASSWD
name: mariadb-secrets
key: MYSQL_PASSWORD
command:
- /bin/bash
- -c
- mysql -u kspace_git kspace_git -h mariadb.infra.k-space.ee -p${MYSQL_PWD} < /tmp/update.sql
- mysql -u gitea gitea -h mariadb -p${MYSQL_PWD} < /tmp/update.sql
---
apiVersion: apps/v1
kind: StatefulSet
@@ -125,7 +125,8 @@ spec:
runAsNonRoot: true
containers:
- name: gitea
image: docker.gitea.com/gitea:1.24.3-rootless
# https://github.com/go-gitea/gitea/releases
image: docker.gitea.com/gitea:1.25.0-rootless
imagePullPolicy: IfNotPresent
securityContext:
readOnlyRootFilesystem: true
@@ -163,11 +164,11 @@ spec:
- name: GITEA__DATABASE__DB_TYPE
value: mysql
- name: GITEA__DATABASE__HOST
value: mariadb.infra.k-space.ee:3306
value: mariadb:3306
- name: GITEA__DATABASE__NAME
value: kspace_git
value: gitea
- name: GITEA__DATABASE__USER
value: kspace_git
value: gitea
- name: GITEA__DATABASE__SSL_MODE
value: disable
- name: GITEA__DATABASE__LOG_SQL
@@ -208,8 +209,8 @@ spec:
- name: GITEA__DATABASE__PASSWD
valueFrom:
secretKeyRef:
name: gitea-secrets
key: GITEA__DATABASE__PASSWD
name: mariadb-secrets
key: MYSQL_PASSWORD
- name: GITEA__MAILER__PASSWD
valueFrom:
secretKeyRef:

99
gitea/mariadb.yml Normal file
View File

@@ -0,0 +1,99 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mariadb
spec:
revisionHistoryLimit: 0
serviceName: mariadb
selector:
matchLabels:
app: mariadb
replicas: 1
template:
metadata:
labels:
app: mariadb
annotations:
prometheus.io/port: '9104'
prometheus.io/scrape: 'true'
spec:
containers:
- name: exporter
image: mirror.gcr.io/prom/mysqld-exporter:latest
args:
- --mysqld.username
- exporter
- name: mariadb
image: mirror.gcr.io/library/mariadb:12.1
imagePullPolicy: Always
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mariadb-secrets
key: MYSQL_ROOT_PASSWORD
- name: MYSQL_USER
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: MYSQL_DATABASE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: mariadb-secrets
key: MYSQL_PASSWORD
volumeMounts:
- name: mariadb-data
mountPath: /var/lib/mysql
- name: mariadb-init
mountPath: /docker-entrypoint-initdb.d
volumes:
- name: mariadb-init
configMap:
name: mariadb-init-config
volumeClaimTemplates:
- metadata:
name: mariadb-data
spec:
storageClassName: ceph-rbd
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
metadata:
name: mariadb-secrets
spec:
size: 32
mapping:
- key: MYSQL_ROOT_PASSWORD
value: "%(plaintext)s"
- key: MYSQL_PASSWORD
value: "%(plaintext)s"
---
apiVersion: v1
kind: Service
metadata:
name: mariadb
spec:
ports:
- protocol: TCP
port: 3306
selector:
app: mariadb
---
apiVersion: v1
kind: ConfigMap
metadata:
name: mariadb-init-config
data:
initdb.sql: |
CREATE USER 'exporter'@'127.0.0.1' WITH MAX_USER_CONNECTIONS 3;
GRANT PROCESS, REPLICATION CLIENT, SLAVE MONITOR, SELECT ON *.* TO 'exporter'@'127.0.0.1';

View File

@@ -1,20 +1,8 @@
## hackerspace / inventory
## inventory.k-space.ee
Reads-writes to mongo.
<!-- Referenced/linked by https://wiki.k-space.ee/en/hosting/doors -->
A component of inventory is 'doorboy' (https://wiki.k-space.ee/en/hosting/doors)
## [doorboy-proxy](https://github.com/k-space/doorboy-proxy)
- Dispatches open events (from mongodb) to door controllers.
- Handles Slack open events (to mongodb).
- Forwards logs from door controllers to mongodb.
- Broadcasts mongodb logs to Slack.
See also:
- inventory-app door components
- https://wiki.k-space.ee/en/hosting/doors
## [inventory-app](https://github.com/k-space/inventory-app) (inventory.k-space.ee)
- Inventory
- Manages door keycards.
- Forwards door opens from website to mongodb (what are picked up by doorboy-proxy).
## [goredirect](https://github.com/k-space/goredirect) (k6.ee)
## k6.ee
Reads from mongo, HTTP redirect to //inventory.k-space.ee/m/inventory/{uuid}/view

View File

@@ -26,7 +26,6 @@ spec:
- doorboy-proxy
topologyKey: topology.kubernetes.io/zone
weight: 100
serviceAccountName: inventory-svcacc
containers:
- name: doorboy-proxy
image: harbor.k-space.ee/k-space/doorboy-proxy:latest
@@ -34,14 +33,21 @@ spec:
- secretRef:
name: inventory-mongodb
- secretRef:
name: doorboy-godoor
- secretRef:
name: doorboy-slack
name: doorboy-api
env:
- name: OIDC_USERS_NAMESPACE
value: passmower
- name: SLACK_CHANNEL_ID
value: CDL9H8Q9W
- name: FLOOR_ACCESS_GROUP
value: 'k-space:floor'
- name: WORKSHOP_ACCESS_GROUP
value: 'k-space:workshop'
- name: CARD_URI
value: 'https://inventory.k-space.ee/cards'
- name: SWIPE_URI
value: 'https://inventory.k-space.ee/m/doorboy/swipe'
- name: INVENTORY_API_KEY
valueFrom:
secretKeyRef:
name: inventory-api-key
key: INVENTORY_API_KEY
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true

View File

@@ -1,24 +1,37 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCClient
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: inventory-app
name: members-inventory-redirect
spec:
uri: 'https://inventory.k-space.ee'
redirectUris:
- 'https://inventory.k-space.ee/login-callback'
grantTypes:
- 'authorization_code'
- 'refresh_token'
responseTypes:
- 'code'
availableScopes:
- 'openid'
- 'profile'
- 'groups'
- 'offline_access'
tokenEndpointAuthMethod: 'client_secret_basic'
pkce: false
redirectRegex:
regex: ^https://members.k-space.ee/(.*)
replacement: https://inventory.k-space.ee/${1}
permanent: false
---
# Creates a dummy/stub in auth.k-space.ee user-facing service listing (otherwise only inventory.k-space.ee is listed).
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: doorboy
spec:
displayName: Doorboy
uri: 'https://inventory.k-space.ee/m/doorboy'
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: members-inventory
spec:
entryPoints:
- websecure
routes:
- match: Host(`members.k-space.ee`)
kind: Rule
middlewares:
- name: members-inventory-redirect
services:
- kind: TraefikService
name: api@internal
---
apiVersion: codemowers.cloud/v1beta1
kind: MinioBucketClaim

View File

@@ -1,35 +0,0 @@
---
# Creates a dummy/stub in auth.k-space.ee user-facing service listing (otherwise only inventory.k-space.ee is listed).
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: doorboy
spec:
displayName: Doorboy
uri: 'https://inventory.k-space.ee/m/doorboy'
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: members-inventory-redirect
spec:
redirectRegex:
regex: ^https://members.k-space.ee/(.*)
replacement: https://inventory.k-space.ee/${1}
permanent: false
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: members-inventory
spec:
entryPoints:
- websecure
routes:
- match: Host(`members.k-space.ee`)
kind: Rule
middlewares:
- name: members-inventory-redirect
services:
- kind: TraefikService
name: api@internal

View File

@@ -20,12 +20,36 @@ spec:
- image: harbor.k-space.ee/k-space/inventory-app:latest
imagePullPolicy: Always
env:
- name: ENVIRONMENT_TYPE
value: PROD
- name: PYTHONUNBUFFERED
value: "1"
- name: INVENTORY_ASSETS_BASE_URL
value: https://external.minio-clusters.k-space.ee/hackerspace-701d9303-0f27-4829-a2be-b1084021ad91/
- name: MACADDRESS_OUTLINK_BASEURL
value: https://grafana.k-space.ee/d/ddwyidbtbc16oa/ip-usage?orgId=1&from=now-2y&to=now&timezone=browser&var-Filters=mac%7C%3D%7C
- name: OIDC_USERS_NAMESPACE
value: passmower
- name: SECRET_KEY
valueFrom:
secretKeyRef:
key: SECRET_KEY
name: inventory-secrets
- name: INVENTORY_API_KEY
valueFrom:
secretKeyRef:
key: INVENTORY_API_KEY
name: inventory-api-key
- name: SLACK_DOORLOG_CALLBACK
valueFrom:
secretKeyRef:
key: SLACK_DOORLOG_CALLBACK
name: slack-secrets
- name: SLACK_VERIFICATION_TOKEN
valueFrom:
secretKeyRef:
key: SLACK_VERIFICATION_TOKEN
name: slack-secrets
envFrom:
- secretRef:
name: miniobucket-inventory-external-owner-secrets
@@ -98,3 +122,59 @@ spec:
tls:
- hosts:
- "*.k-space.ee"
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCClient
metadata:
name: inventory-app
spec:
uri: 'https://inventory.k-space.ee'
redirectUris:
- 'https://inventory.k-space.ee/login-callback'
grantTypes:
- 'authorization_code'
- 'refresh_token'
responseTypes:
- 'code'
availableScopes:
- 'openid'
- 'profile'
- 'groups'
- 'offline_access'
tokenEndpointAuthMethod: 'client_secret_basic'
pkce: false
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: inventory-role
namespace: hackerspace
rules:
- verbs:
- get
- list
- watch
apiGroups:
- codemowers.cloud
resources:
- oidcusers
- oidcusers/status
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: inventory-roles
namespace: hackerspace
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: inventory-role
subjects:
- kind: ServiceAccount
name: inventory-svcacc
namespace: hackerspace
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: inventory-svcacc

View File

@@ -1,13 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: hackerspace
resources:
- ssh://git@git.k-space.ee/secretspace/kube/hackerspace # secrets: inventory-mongodb, inventory-s3, doorboy-godoor, doorboy-slack
- ./doorboy.yaml
- ./svcacc.yaml
- ./inventory.yaml
- ./inventory-extras.yaml
- ./inventory-redirects.yaml
- ./goredirect.yaml

View File

@@ -1,35 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: inventory-role
namespace: hackerspace
rules:
- verbs:
- get
- list
- watch
apiGroups:
- codemowers.cloud
resources:
- oidcusers
- oidcusers/status
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: inventory-roles
namespace: hackerspace
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: inventory-role
subjects:
- kind: ServiceAccount
name: inventory-svcacc
namespace: hackerspace
---
# used by inventory and doorboy
apiVersion: v1
kind: ServiceAccount
metadata:
name: inventory-svcacc

View File

@@ -34,7 +34,7 @@ persistence:
jobservice:
jobLog:
existingClaim: ""
storageClass: "longhorn"
storageClass: "cephfs"
subPath: ""
accessMode: ReadWriteMany
size: 5Gi

View File

@@ -4,7 +4,7 @@ kind: Kustomization
namespace: kube-system
resources:
- ./descheduler.yml
- ./kube-state-metrics.yml
- ./metrics-server.yml
- ./nvidia-device-plugin.yml
- ./descheduler.yaml
- ./kube-state-metrics.yaml
- ./metrics-server.yaml
# - ./nvidia-device-plugin.yml

View File

@@ -1 +0,0 @@
longhorn.yaml

View File

@@ -1,41 +0,0 @@
# Longhorn distributed block storage system
## For users
You should really avoid using Longhorn as it has over time
[proven to be unreliable system](https://www.reddit.com/r/kubernetes/comments/1cbggo8/longhorn_is_unreliable/).
Prefer using remote databases in your application via
the Kubernetes operator pattern.
Use Longhorn for applications that need persistent storage, but are unable
to provide replication in the application layer:
* Applications that insist writing into filesystem
* Applications that serve Git repositories (eg Gitea)
* Applications that check out Git repositories (eg Woodpecker, Drone and CI systems)
* Applications that need to use SQLite
Instead of using built-in `longhorn` storage class, please add new storage class
with suitable replication, data locality parameters and reclaim policy
[here](https://git.k-space.ee/k-space/kube/src/branch/master/storage-class.yaml)
Longhorn backups are made once per day and it's configured to be uploaded to
the Minio S3 bucket hosted at nas.k-space.ee
## For administrators
Longhorn was last upgraded with following snippet:
```
wget https://raw.githubusercontent.com/longhorn/longhorn/v1.8.2/deploy/longhorn.yaml
patch -p0 < changes.diff
kubectl -n longhorn-system apply -f longhorn.yaml -f application-extras.yml -f backup.yaml
```
After initial deployment `dedicated=storage:NoSchedule` was specified
for `Kubernetes Taint Toleration` under `Setting -> General` on
[Longhorn Dashboard](https://longhorn.k-space.ee/).
Suitable nodes were tagged with `storage` and Longhorn scheduling was disabled on others.
This is to prevent scheduling Longhorn data on arbitrary Kubernetes nodes as
`storage[1-4].kube.k-space.ee` nodes are the ones which have additional 200G volume mounted at `/mnt/persistent/`

View File

@@ -1,138 +0,0 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: ui
spec:
displayName: Longhorn
uri: 'https://longhorn.k-space.ee'
allowedGroups:
- k-space:kubernetes:admins
headerMapping:
email: Remote-Email
groups: Remote-Groups
name: Remote-Name
user: Remote-Username
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: longhorn-dashboard
namespace: longhorn-system
annotations:
kubernetes.io/ingress.class: traefik
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: longhorn-system-ui@kubernetescrd
spec:
rules:
- host: longhorn.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: longhorn-frontend
port:
number: 80
tls:
- hosts:
- "*.k-space.ee"
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: manager
spec:
selector: {}
podMetricsEndpoints:
- port: manager
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: longhorn
spec:
# Copied from https://longhorn.io/docs/1.2.4/monitoring/alert-rules-example/
groups:
- name: longhorn
rules:
- alert: LonghornVolumeActualSpaceUsedWarning
annotations:
description: The accumulated snapshots for volume use up more space than the volume's capacity
summary: The actual used space of Longhorn volume is twice the size of the volume capacity.
expr: longhorn_volume_actual_size_bytes > longhorn_volume_capacity_bytes * 2
for: 5m
labels:
issue: The actual used space of Longhorn volume {{$labels.volume}} on {{$labels.node}} is high.
severity: warning
- alert: LonghornVolumeStatusCritical
annotations:
description: Longhorn volume {{$labels.volume}} on {{$labels.node}} is Fault for
more than 2 minutes.
summary: Longhorn volume {{$labels.volume}} is Fault
expr: longhorn_volume_robustness == 3
for: 5m
labels:
issue: Longhorn volume {{$labels.volume}} is Fault.
severity: critical
- alert: LonghornVolumeStatusWarning
annotations:
description: Longhorn volume {{$labels.volume}} on {{$labels.node}} is Degraded for
more than 5 minutes.
summary: Longhorn volume {{$labels.volume}} is Degraded
expr: longhorn_volume_robustness == 2
for: 5m
labels:
issue: Longhorn volume {{$labels.volume}} is Degraded.
severity: warning
- alert: LonghornNodeStorageWarning
annotations:
description: The used storage of node {{$labels.node}} is at {{$value}}% capacity for
more than 5 minutes.
summary: The used storage of node is over 70% of the capacity.
expr: (longhorn_node_storage_usage_bytes / longhorn_node_storage_capacity_bytes) * 100 > 70
for: 5m
labels:
issue: The used storage of node {{$labels.node}} is high.
severity: warning
- alert: LonghornDiskStorageWarning
annotations:
description: The used storage of disk {{$labels.disk}} on node {{$labels.node}} is at {{$value}}% capacity for
more than 5 minutes.
summary: The used storage of disk is over 70% of the capacity.
expr: (longhorn_disk_usage_bytes / longhorn_disk_capacity_bytes) * 100 > 70
for: 5m
labels:
issue: The used storage of disk {{$labels.disk}} on node {{$labels.node}} is high.
severity: warning
- alert: LonghornNodeDown
annotations:
description: There are {{$value}} Longhorn nodes which have been offline for more than 5 minutes.
summary: Longhorn nodes is offline
expr: (avg(longhorn_node_count_total) or on() vector(0)) - (count(longhorn_node_status{condition="ready"} == 1) or on() vector(0)) > 0
for: 5m
labels:
issue: There are {{$value}} Longhorn nodes are offline
severity: critical
- alert: LonghornIntanceManagerCPUUsageWarning
annotations:
description: Longhorn instance manager {{$labels.instance_manager}} on {{$labels.node}} has CPU Usage / CPU request is {{$value}}% for
more than 5 minutes.
summary: Longhorn instance manager {{$labels.instance_manager}} on {{$labels.node}} has CPU Usage / CPU request is over 300%.
expr: (longhorn_instance_manager_cpu_usage_millicpu/longhorn_instance_manager_cpu_requests_millicpu) * 100 > 300
for: 5m
labels:
issue: Longhorn instance manager {{$labels.instance_manager}} on {{$labels.node}} consumes 3 times the CPU request.
severity: warning
- alert: LonghornNodeCPUUsageWarning
annotations:
description: Longhorn node {{$labels.node}} has CPU Usage / CPU capacity is {{$value}}% for
more than 5 minutes.
summary: Longhorn node {{$labels.node}} experiences high CPU pressure for more than 5m.
expr: (longhorn_node_cpu_usage_millicpu / longhorn_node_cpu_capacity_millicpu) * 100 > 90
for: 5m
labels:
issue: Longhorn node {{$labels.node}} experiences high CPU pressure.
severity: warning

View File

@@ -1,46 +0,0 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: MinioBucketClaim
metadata:
name: backup
spec:
capacity: 1Ti
class: external
---
apiVersion: longhorn.io/v1beta2
kind: Setting
metadata:
name: backup-target
namespace: longhorn-system
value: 's3://longhorn-system-a4b235c5-7919-4cb0-9949-259e60c579f1@us-east1/'
---
apiVersion: longhorn.io/v1beta2
kind: Setting
metadata:
name: backup-target-credential-secret
namespace: longhorn-system
value: 'miniobucket-backup-owner-secrets'
---
apiVersion: longhorn.io/v1beta1
kind: RecurringJob
metadata:
name: backup
namespace: longhorn-system
spec:
cron: "0 2 * * *"
task: backup
groups:
- default
retain: 1
concurrency: 4
---
apiVersion: longhorn.io/v1beta1
kind: RecurringJob
metadata:
name: trim
namespace: longhorn-system
spec:
cron: "0 * * * *"
task: trim
groups:
- default

View File

@@ -1,53 +0,0 @@
--- longhorn.yaml 2024-07-07 14:16:47.953593433 +0300
+++ longhorn.modded 2024-07-07 14:18:51.103452617 +0300
@@ -86,14 +86,14 @@
storageclass.kubernetes.io/is-default-class: "true"
provisioner: driver.longhorn.io
allowVolumeExpansion: true
- reclaimPolicy: "Delete"
+ reclaimPolicy: "Retain"
volumeBindingMode: Immediate
parameters:
- numberOfReplicas: "3"
+ numberOfReplicas: "2"
staleReplicaTimeout: "30"
fromBackup: ""
- fsType: "ext4"
- dataLocality: "disabled"
+ fsType: "xfs"
+ dataLocality: "best-effort"
unmapMarkSnapChainRemoved: "ignored"
---
# Source: longhorn/templates/crds.yaml
@@ -4379,6 +4379,15 @@
app.kubernetes.io/version: v1.6.2
app: longhorn-manager
spec:
+ tolerations:
+ - key: dedicated
+ operator: Equal
+ value: nvr
+ effect: NoSchedule
+ - key: arch
+ operator: Equal
+ value: arm64
+ effect: NoSchedule
containers:
- name: longhorn-manager
image: longhornio/longhorn-manager:v1.6.2
@@ -4484,6 +4493,15 @@
app.kubernetes.io/version: v1.6.2
app: longhorn-driver-deployer
spec:
+ tolerations:
+ - key: dedicated
+ operator: Equal
+ value: nvr
+ effect: NoSchedule
+ - key: arch
+ operator: Equal
+ value: arm64
+ effect: NoSchedule
initContainers:
- name: wait-longhorn-manager
image: longhornio/longhorn-manager:v1.6.2

View File

@@ -40,6 +40,15 @@ spec:
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: unifi
namespace: metallb-system
spec:
addresses:
- 172.21.102.0/24
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: bind-secondary-external
namespace: metallb-system

18
monitoring/ceph.yaml Normal file
View File

@@ -0,0 +1,18 @@
---
apiVersion: monitoring.coreos.com/v1alpha1
kind: ScrapeConfig
metadata:
name: nodes-ceph
namespace: monitoring
spec:
metricsPath: /metrics
scheme: HTTP
scrapeInterval: 3s
staticConfigs:
- labels:
job: ceph-exporter
targets:
- pve90.proxmox.infra.k-space.ee:9283
- pve91.proxmox.infra.k-space.ee:9283
- pve92.proxmox.infra.k-space.ee:9283
- pve93.proxmox.infra.k-space.ee:9283

View File

@@ -8,11 +8,11 @@ spec:
targets:
staticConfig:
static:
- nas.mgmt.k-space.ee:9100
- pve1.proxmox.infra.k-space.ee:9100
- pve2.proxmox.infra.k-space.ee:9100
- pve8.proxmox.infra.k-space.ee:9100
- pve9.proxmox.infra.k-space.ee:9100
- pve90.proxmox.infra.k-space.ee:9100
- pve91.proxmox.infra.k-space.ee:9100
- pve92.proxmox.infra.k-space.ee:9100
- pve93.proxmox.infra.k-space.ee:9100
- pbs.infra.k-space.ee:9100
relabelingConfigs:
- sourceLabels: [__param_target]
targetLabel: instance

22
monitoring/pbs.yaml Normal file
View File

@@ -0,0 +1,22 @@
---
apiVersion: monitoring.coreos.com/v1
kind: Probe
metadata:
name: pbs-proxmox
spec:
scrapeTimeout: 30s
targets:
staticConfig:
static:
- pbs.infra.k-space.ee:10019
relabelingConfigs:
- sourceLabels: [__param_target]
targetLabel: instance
- sourceLabels: [__param_target]
targetLabel: __address__
prober:
url: localhost
path: /metrics
metricRelabelings:
- sourceLabels: [__address__]
targetLabel: target

View File

@@ -58,10 +58,9 @@ spec:
retentionSize: 8GB
resources:
limits:
cpu: 500m
memory: 2Gi
memory: 4Gi
requests:
cpu: 100m
cpu: 1000m
memory: 700Mi
storage:
volumeClaimTemplate:
@@ -72,6 +71,8 @@ spec:
requests:
storage: 10Gi
storageClassName: prometheus
scrapeConfigSelector: {}
scrapeConfigNamespaceSelector: {}
---
apiVersion: v1
kind: ServiceAccount

View File

@@ -45,7 +45,8 @@ spec:
displayName: Nextcloud
uri: https://nextcloud.k-space.ee
redirectUris:
- https://nextcloud.k-space.ee/apps/oidc_login/oidc
- https://nextcloud.k-space.ee/apps/user_oidc/code
- https://nextcloud.k-space.ee/apps/user_oidc
allowedGroups:
- k-space:floor
grantTypes:
@@ -78,16 +79,33 @@ spec:
spec:
enableServiceLinks: false
initContainers:
- name: fix-permissions
image: busybox
command: ["sh", "-c", "chown -R 1000:1000 /var/www/html/"]
securityContext:
runAsUser: 0
volumeMounts:
- mountPath: /var/www/html/
name: data
- name: setup-php-config
image: mirror.gcr.io/library/nextcloud:31.0.2@sha256:c418eb78924b446c744eee30587564a8d5ef625ad64369c81936bd1491f589e3
image: mirror.gcr.io/library/nextcloud:32.0.2@sha256:8cb1dc8c26944115469dd22f4965d2ed35bab9cf8c48d2bb052c8e9f83821ded
command: [ "/bin/sh","-c" ]
args: ["cp -r /usr/local/etc/php/conf.d/. /config/"]
volumeMounts:
- mountPath: /config
name: php-config
- name: setup-nextcloud-config
image: busybox
command: [ "/bin/sh","-c" ]
args: ["cp -r /config/. /var/www/html/config/"]
volumeMounts:
- mountPath: /config
name: config
- name: data
mountPath: /var/www/html
containers:
- name: nextcloud
image: mirror.gcr.io/library/nextcloud:31.0.2@sha256:c418eb78924b446c744eee30587564a8d5ef625ad64369c81936bd1491f589e3
image: mirror.gcr.io/library/nextcloud:32.0.2@sha256:8cb1dc8c26944115469dd22f4965d2ed35bab9cf8c48d2bb052c8e9f83821ded
resources:
limits:
cpu: 4000m
@@ -95,14 +113,14 @@ spec:
requests:
cpu: 400m
memory: 500Mi
readinessProbe:
livenessProbe:
exec:
command:
- /usr/local/bin/php
- /var/www/html/cron.php
initialDelaySeconds: 1
initialDelaySeconds: 300
periodSeconds: 300
timeoutSeconds: 30
timeoutSeconds: 300
env:
- name: OIDC_CLIENT_ID
valueFrom:
@@ -127,13 +145,13 @@ spec:
- name: UPLOAD_LIMIT
value: 10G
- name: MYSQL_USER
value: kspace_nextcloud
value: nextcloud
- name: MYSQL_DATABASE
value: kspace_nextcloud
value: nextcloud
- name: MYSQL_HOST
value: mariadb.infra.k-space.ee
value: mariadb
- name: NEXTCLOUD_ADMIN_USER
value: admin
value: k6_admin
- name: NEXTCLOUD_TRUSTED_DOMAINS
value: nextcloud.k-space.ee nextcloud # This is for reference - these values are not actually changed by env after installation.
- name: OBJECTSTORE_S3_HOST
@@ -173,7 +191,7 @@ spec:
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: nextcloud-imported-secrets
name: mariadb-secrets
key: MYSQL_PASSWORD
- name: OBJECTSTORE_S3_SECRET
valueFrom:
@@ -186,12 +204,6 @@ spec:
volumeMounts:
- mountPath: /var/www/html
name: data
- mountPath: /var/www/html/config/oidc.config.php
name: config
subPath: oidc.config.php
- mountPath: /var/www/html/config/override.config.php
name: config
subPath: override.config.php
- name: php-config
mountPath: /usr/local/etc/php/conf.d/
securityContext:
@@ -279,12 +291,6 @@ kind: ConfigMap
metadata:
name: nextcloud-config
data:
override.config.php: |-
<?php
$CONFIG = array (
'skeletondirectory' => '',
'default_phone_region' => 'EE',
);
oidc.config.php: |-
<?php
$CONFIG = array (

99
nextcloud/mariadb.yml Normal file
View File

@@ -0,0 +1,99 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mariadb
spec:
revisionHistoryLimit: 0
serviceName: mariadb
selector:
matchLabels:
app: mariadb
replicas: 1
template:
metadata:
labels:
app: mariadb
annotations:
prometheus.io/port: '9104'
prometheus.io/scrape: 'true'
spec:
containers:
- name: exporter
image: mirror.gcr.io/prom/mysqld-exporter:latest
args:
- --mysqld.username
- exporter
- name: mariadb
image: mirror.gcr.io/library/mariadb:12.1
imagePullPolicy: Always
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mariadb-secrets
key: MYSQL_ROOT_PASSWORD
- name: MYSQL_USER
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: MYSQL_DATABASE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: mariadb-secrets
key: MYSQL_PASSWORD
volumeMounts:
- name: mariadb-data
mountPath: /var/lib/mysql
- name: mariadb-init
mountPath: /docker-entrypoint-initdb.d
volumes:
- name: mariadb-init
configMap:
name: mariadb-init-config
volumeClaimTemplates:
- metadata:
name: mariadb-data
spec:
storageClassName: ceph-rbd
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
metadata:
name: mariadb-secrets
spec:
size: 32
mapping:
- key: MYSQL_ROOT_PASSWORD
value: "%(plaintext)s"
- key: MYSQL_PASSWORD
value: "%(plaintext)s"
---
apiVersion: v1
kind: Service
metadata:
name: mariadb
spec:
ports:
- protocol: TCP
port: 3306
selector:
app: mariadb
---
apiVersion: v1
kind: ConfigMap
metadata:
name: mariadb-init-config
data:
initdb.sql: |
CREATE USER 'exporter'@'127.0.0.1' WITH MAX_USER_CONNECTIONS 3;
GRANT PROCESS, REPLICATION CLIENT, SLAVE MONITOR, SELECT ON *.* TO 'exporter'@'127.0.0.1';

View File

@@ -36,7 +36,7 @@ metadata:
name: nyancat
namespace: nyancat
annotations:
metallb.universe.tf/address-pool: zoo
metallb.io/address-pool: zoo
external-dns.alpha.kubernetes.io/hostname: nyancat.k-space.ee
spec:
type: LoadBalancer

View File

@@ -188,12 +188,13 @@ spec:
service:
name: pve92
port: {number: 8006}
- pathType: Prefix
path: "/"
backend:
service:
name: pve93
port: {number: 8006}
#TODO: hw failure, disabled for now
# - pathType: Prefix
# path: "/"
# backend:
# service:
# name: pve93
# port: {number: 8006}
tls:
- hosts:
- "*.k-space.ee"

View File

@@ -0,0 +1,236 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: mongo
annotations:
kubernetes.io/description: |
Storage class for Mongo and similar applications that
implement high availability in application layer.
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: prometheus
annotations:
kubernetes.io/description: |
Storage class for Prometheus and similar applications that
implement high availability in application layer.
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: woodpecker
annotations:
kubernetes.io/description: |
Storage class for Drone, Woodpecker and similar application
pipeline runs where Git repos are checked out to.
This storage class uses XFS, has no block level redundancy and it's
deleted as soon as the pod exits.
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gitea
annotations:
kubernetes.io/description: |
Storage class for Gitea and similar applications needing
block device level replication with 3 replicas using XFS filesystem and
best effort data locality.
provisioner: rook-ceph.rbd.csi.ceph.com
reclaimPolicy: Retain
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nextcloud
annotations:
kubernetes.io/description: |
Storage class for Nextcloud needing
block device level replication with 3 replicas using XFS filesystem and
best effort data locality.
provisioner: rook-ceph.rbd.csi.ceph.com
reclaimPolicy: Retain
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rabbitmq
annotations:
kubernetes.io/description: |
Storage class for RabbitMQ and similar applications
deployed in highly available fashion utilizing application level
replication needing persistent volume.
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: unifi
annotations:
kubernetes.io/description: |
Storage class for Unifi and similar applications
deployed in highly available fashion utilizing application level
replication needing persistent volume.
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: hdd-ceph
annotations:
kubernetes.io/description: |
Generic HDD storage on CEPH.
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-hdd
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: "null"
annotations:
kubernetes.io/description: |
Storage class for applications insisting on having a PV, but actually do
not and for data that can be discarded immediately
provisioner: rancher.io/local-path
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: cephfs
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
fsName: ks-fs
pool: ks-fs_data
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: Immediate

124
rotmower/application.yaml Normal file
View File

@@ -0,0 +1,124 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: rotmower
spec:
displayName: rotmower
uri: 'https://rotmower.k-space.ee'
headerMapping:
email: Remote-Email
groups: Remote-Groups
name: Remote-Name
user: Remote-Username
allowedGroups:
- k-space:kubernetes:admins
- github.com:codemowers:admins
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: rotmower
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.middlewares: rotmower-rotmower@kubernetescrd
spec:
rules:
- host: rotmower.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: rotmower
port:
number: 8080
tls:
- hosts:
- "*.k-space.ee"
---
apiVersion: v1
kind: Service
metadata:
name: rotmower
spec:
type: ClusterIP
selector:
app: rotmower
ports:
- protocol: TCP
port: 8080
targetPort: 8080
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: rotmower
namespace: rotmower
labels:
app: rotmower
spec:
replicas: 1
selector:
matchLabels:
app: rotmower
template:
metadata:
labels:
app: rotmower
spec:
serviceAccountName: rotmower
containers:
- name: rotmower
image: docker.io/codemowers/rotmower:latest
args:
- web
- --host
- 0.0.0.0
ports:
- containerPort: 8080
name: http
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rotmower
namespace: rotmower
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rotmower
rules:
- apiGroups: [""]
resources: ["pods", "secrets"]
verbs: ["get", "list"]
- apiGroups: ["apps"]
resources: ["deployments", "replicasets", "daemonsets", "statefulsets"]
verbs: ["get", "list"]
- apiGroups: ["batch"]
resources: ["jobs", "cronjobs"]
verbs: ["get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rotmower
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rotmower
subjects:
- kind: ServiceAccount
name: rotmower
namespace: rotmower

View File

@@ -1,125 +0,0 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: mongo
annotations:
kubernetes.io/description: |
Storage class for Mongo and similar applications that
implement high availability in application layer.
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: prometheus
annotations:
kubernetes.io/description: |
Storage class for Prometheus and similar applications that
implement high availability in application layer.
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: woodpecker
annotations:
kubernetes.io/description: |
Storage class for Drone, Woodpecker and similar application
pipeline runs where Git repos are checked out to.
This storage class uses XFS, has no block level redundancy and it's
deleted as soon as the pod exits.
provisioner: driver.longhorn.io
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
dataLocality: best-effort
numberOfReplicas: "1"
fsType: "xfs"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gitea
annotations:
kubernetes.io/description: |
Storage class for Gitea and similar applications needing
block device level replication with 3 replicas using XFS filesystem and
best effort data locality.
provisioner: driver.longhorn.io
reclaimPolicy: Retain
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
dataLocality: best-effort
numberOfReplicas: "3"
fsType: "xfs"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rabbitmq
annotations:
kubernetes.io/description: |
Storage class for RabbitMQ and similar applications
deployed in highly available fashion utilizing application level
replication needing persistent volume.
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: xfs
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
imageFeatures: layering
imageFormat: '2'
pool: ks-nvme
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: "null"
annotations:
kubernetes.io/description: |
Storage class for applications insisting on having a PV, but actually do
not and for data that can be discarded immediately
provisioner: rancher.io/local-path
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer

View File

@@ -199,6 +199,7 @@ spec:
- cidr: 172.21.53.1/32
- cidr: 172.21.53.2/32
- cidr: 172.21.53.3/32
- cidr: 172.21.102.1/32
- cidr: 193.40.103.36/32
- cidr: 193.40.103.37/32
- cidr: 193.40.103.38/32

9
unifi/kustomization.yaml Normal file
View File

@@ -0,0 +1,9 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: &ns unifi
resources:
- ssh://git@git.k-space.ee/secretspace/kube/unifi # secrets: unifi, unifi-mongo
- unifi-mongo.yaml
- unifi.yaml

53
unifi/unifi-mongo.yaml Normal file
View File

@@ -0,0 +1,53 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: unifi-db
namespace: unifi
spec:
selector:
matchLabels:
app: unifi-db
replicas: 1
minReadySeconds: 10
template:
metadata:
labels:
app: unifi-db
spec:
terminationGracePeriodSeconds: 10
containers:
- name: mongodb
image: mongo:8
ports:
- containerPort: 27017
name: mongo
envFrom:
- secretRef:
name: unifi-mongo
volumeMounts:
- name: data
mountPath: /data/db
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: unifi
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: Service
metadata:
name: unifi-db
namespace: unifi
spec:
ports:
- port: 27017
name: mongo
targetPort: 27017
selector:
app: unifi-db
type: ClusterIP

176
unifi/unifi.yaml Normal file
View File

@@ -0,0 +1,176 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: unifi-app
namespace: unifi
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: unifi
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: unifi
namespace: unifi
spec:
serviceName: "unifi"
replicas: 1
selector:
matchLabels:
name: unifi
template:
metadata:
name: unifi
labels:
name: unifi
spec:
containers:
- name: unifi
image: lscr.io/linuxserver/unifi-network-application:latest
env:
- name: PUID
value: '1000'
- name: GUID
value: '1000'
- name: TZ
value: Etc/UTC
envFrom:
- secretRef:
name: unifi
ports:
- containerPort: 3478
protocol: UDP
- containerPort: 10001
protocol: UDP
- containerPort: 8080
protocol: TCP
- containerPort: 8443
protocol: TCP
- containerPort: 1900
protocol: UDP
- containerPort: 8843
protocol: TCP
- containerPort: 8880
protocol: TCP
- containerPort: 6789
protocol: TCP
- containerPort: 5514
protocol: UDP
volumeMounts:
- name: unifi-persistent-storage
mountPath: /config
volumes:
- name: unifi-persistent-storage
persistentVolumeClaim:
claimName: unifi-app
---
kind: Service
apiVersion: v1
metadata:
name: lb-unifi
namespace: unifi
annotations:
metallb.io/allow-shared-ip: 'true'
traefik.ingress.kubernetes.io/service.serverstransport: unifi-unifi@kubernetescrd
spec:
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: 172.21.102.1
selector:
name: unifi
ports:
- name: '8080'
protocol: TCP
port: 8080
targetPort: 8080
- name: '8443'
protocol: TCP
port: 8443
targetPort: 8443
- name: '1900'
protocol: TCP
port: 1900
targetPort: 1900
- name: '8843'
protocol: TCP
port: 8843
targetPort: 8843
- name: '8880'
protocol: TCP
port: 8880
targetPort: 8880
- name: '6789'
protocol: TCP
port: 6789
targetPort: 6789
---
kind: Service
apiVersion: v1
metadata:
name: lb-unifi-udp
namespace: unifi
annotations:
metallb.io/allow-shared-ip: 'true'
spec:
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: 172.21.102.1
selector:
name: unifi
ports:
- name: '3478'
protocol: UDP
port: 3478
targetPort: 3478
- name: '10001'
protocol: UDP
port: 10001
targetPort: 10001
- name: '5514'
protocol: UDP
port: 5514
targetPort: 5514
---
apiVersion: traefik.io/v1alpha1
kind: ServersTransport
metadata:
name: unifi
namespace: unifi
spec:
insecureSkipVerify: true
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: unifi
namespace: unifi
annotations:
traefik.ingress.kubernetes.io/router.tls: "true"
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: "unifi.k-space.ee"
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: lb-unifi
port:
number: 8443
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: unifi
namespace: unifi
spec:
uri: 'https://unifi.k-space.ee/'

View File

@@ -85,18 +85,18 @@ spec:
- name: DB_TYPE
value: mysql
- name: DB_HOST
value: mariadb.infra.k-space.ee
value: mariadb
- name: DB_PORT
value: "3306"
- name: DB_USER
value: kspace_wiki
value: wiki
- name: DB_NAME
value: kspace_wiki
value: wiki
- name: DB_PASS
valueFrom:
secretKeyRef:
name: wikijs-secrets
key: DB_PASS
name: mariadb-secrets
key: MYSQL_PASSWORD
ports:
- containerPort: 3000
name: http

99
wiki/mariadb.yml Normal file
View File

@@ -0,0 +1,99 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mariadb
spec:
revisionHistoryLimit: 0
serviceName: mariadb
selector:
matchLabels:
app: mariadb
replicas: 1
template:
metadata:
labels:
app: mariadb
annotations:
prometheus.io/port: '9104'
prometheus.io/scrape: 'true'
spec:
containers:
- name: exporter
image: mirror.gcr.io/prom/mysqld-exporter:latest
args:
- --mysqld.username
- exporter
- name: mariadb
image: mirror.gcr.io/library/mariadb:12.1
imagePullPolicy: Always
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mariadb-secrets
key: MYSQL_ROOT_PASSWORD
- name: MYSQL_USER
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: MYSQL_DATABASE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: mariadb-secrets
key: MYSQL_PASSWORD
volumeMounts:
- name: mariadb-data
mountPath: /var/lib/mysql
- name: mariadb-init
mountPath: /docker-entrypoint-initdb.d
volumes:
- name: mariadb-init
configMap:
name: mariadb-init-config
volumeClaimTemplates:
- metadata:
name: mariadb-data
spec:
storageClassName: ceph-rbd
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
metadata:
name: mariadb-secrets
spec:
size: 32
mapping:
- key: MYSQL_ROOT_PASSWORD
value: "%(plaintext)s"
- key: MYSQL_PASSWORD
value: "%(plaintext)s"
---
apiVersion: v1
kind: Service
metadata:
name: mariadb
spec:
ports:
- protocol: TCP
port: 3306
selector:
app: mariadb
---
apiVersion: v1
kind: ConfigMap
metadata:
name: mariadb-init-config
data:
initdb.sql: |
CREATE USER 'exporter'@'127.0.0.1' WITH MAX_USER_CONNECTIONS 3;
GRANT PROCESS, REPLICATION CLIENT, SLAVE MONITOR, SELECT ON *.* TO 'exporter'@'127.0.0.1';

View File

@@ -5,7 +5,7 @@ metadata:
name: wildduck
annotations:
external-dns.alpha.kubernetes.io/hostname: mail.k-space.ee
metallb.universe.tf/address-pool: wildduck
metallb.io/address-pool: wildduck
spec:
loadBalancerIP: 193.40.103.25
type: LoadBalancer