1
0
forked from k-space/kube

Compare commits

..

175 Commits

Author SHA1 Message Date
Arti Zirk
af3bd7bb41 Re-enable sw_core mikrotik-exporter
Hopefully this time, after RouterOS upgrades, they will not leak memory
anymore. (:
2025-07-22 18:11:50 +03:00
31800f8ffb proxmox-csi to argocd
The images were not pinned to a version before.
proxmox-csi was at :edge, and
its dependencies pinned to outdated/incompatible.
2025-07-22 02:02:44 +03:00
24b57de126 trim spaces in codebase 2025-07-22 01:44:26 +03:00
6317daefa1 wildduck: disable zonemta dns caching 2025-07-20 13:58:09 +03:00
31558db1d4 wildduck: improve config and add healthchecks 2025-07-20 13:30:13 +03:00
efb467e425 dragonfly: scale to 1 instance (hotfix)
eaas: dragonfly has 1 rw instance and applications
don't realize and change over from ro to rw when
the leader changes.
2025-07-17 10:52:21 +03:00
130839ff7f rm reloader: very outdated and no consumers 2025-07-17 00:51:21 +03:00
6e6b3743a0 disable freeswitch
reported not working and not tracked with anything
2025-07-16 23:29:44 +03:00
6f2220445d disable _asterisk and destroy namespace
decision with eaas, currently broken, nobody has shown interest
and trying to maintain kube as a first priority
2025-07-16 23:03:54 +03:00
bc731d98ec rm camtiler ecosystem in favour for frigate 2025-07-13 13:35:49 +03:00
3a0747d9b8 frigate: add remaining old cameras 2025-07-13 01:16:27 +03:00
792a0864a4 frigate: Fix TPU configuration 2025-07-12 22:04:34 +03:00
17f95e14cc frigate: WIP stuff until CEPH arrives 2025-07-12 21:39:39 +03:00
d3b85e4f24 frigate: Schedule frigate to dedicated nvr node 2025-07-12 20:48:02 +03:00
8525cef4fc frigate: argo autosync 2025-07-12 20:39:37 +03:00
c519fd3d6c frigate: rename rstp to rtsp 2025-07-12 20:38:44 +03:00
4408c22c5b argocd: Add Frigate 2025-07-12 20:28:58 +03:00
2041f5f80a frigate: Migrate to Kustomize 2025-07-12 20:27:50 +03:00
84b259ace4 argocd: metallb: Enable prune 2025-07-12 19:01:39 +03:00
f9fe0379da inventory: add refresh tokens 2025-07-12 18:55:06 +03:00
0359eedcb5 argocd: Add metallb 2025-07-12 18:39:44 +03:00
a03ea7d208 metallb: Migrate to kustomize + helm 2025-07-12 18:38:50 +03:00
c7cb495451 argo: Add Harbor 2025-07-12 16:39:15 +03:00
a6439a3bd1 harbor: Migrate to kustomize + ArgoCD
Still some stuff missing like proper DragonFly resources
2025-07-12 16:27:17 +03:00
754b2180fd goredirect: target k6.ee directly traefik.k-space.ee
will be interesting how the cname works out
for ingress, it must be the same IP space as traefik is on, otherwise dns points to ip with nothing
2025-07-09 20:34:09 +03:00
4f35c87a6c tidy f0b78f7b172719fc4ad20e508f7585d955fb77b0 migration leftovers 2025-07-09 13:39:14 +03:00
266b8ee6aa redeploy (and update) frigate
longhorn is stuck in a loop attaching/detacching its storage
2025-06-30 03:08:42 +03:00
f726f8886a longhorn v1.8.2 2025-06-29 23:50:11 +03:00
fe128cf65e longhorn v1.7.3 2025-06-29 23:44:07 +03:00
7232957a04 traefik: move to kustomize
Closes 
2025-06-29 18:51:56 +03:00
43ad7586ce traefik: fix dashboard root redirect
Closes 
2025-06-29 16:48:39 +03:00
1b34a48e81 rosdump: move (secretful) result to secretspace 2025-06-29 16:26:05 +03:00
0d18bfd7cc rosdump: ignore ssh host keys 2025-06-29 16:26:05 +03:00
94751c44f9 rosdump: update device targets
- devices moved to sec, update DNS
- fix kube net policy CIDR
2025-06-29 16:25:32 +03:00
de36d70e68 rosdump: fixup 7d71f1b29c0ba98dfac46c23f99b56e5891116cb 2025-06-29 16:24:44 +03:00
efc2598160 k6: add-slug changed to directly add-by-slug
See also: inventory-app: 9c6902c5a2a90a6bd6a8fa93554f4dc353d9f777^
2025-06-24 18:45:42 +03:00
db935de1a5 gitea does not go through traefik 2025-06-18 19:48:53 +03:00
885f4b505e revert 28daa56badd0a57d3a2b9dd9c5a373d9445d6f5d 2025-06-18 18:46:06 +03:00
aab40b012d cert-manager to argo kustomize helm 2025-06-18 18:21:35 +03:00
28daa56bad cert-manager: rename to default-cluster-cert-issuer
much easier, vs ctrl-f for 'default'
2025-06-18 17:53:28 +03:00
a1e1dcf827 traefik: drop harbor already default issuer 2025-06-18 16:55:50 +03:00
bb1c313a37 inventory: add MACADDRESS_OUTLINK_BASEURL env 2025-05-25 17:25:19 +03:00
d7d83b37f4 freescout: not quite OIDC 2025-05-21 21:29:58 +03:00
0ac4364157 passmower: disable NORMALIZE_EMAIL_ADDRESSES
see comment in file
2025-05-21 20:48:53 +03:00
b8e525c3e0 passmower: texts: K-SPACE in all capital 2025-05-21 19:53:11 +03:00
92db22fd09 docs: there is no keydb 2025-05-03 16:26:32 +03:00
4466878b54 docs: drone is replaced 2025-05-03 15:11:11 +03:00
9b93075543 move members repo to secretspace 2025-05-03 15:05:59 +03:00
ce2e6568b1 wildduck: add mailservice group
2025-04-22 12:33:45 +03:00
f82caf1751 rm unused kdoorpi
- door are outside of this cluster
- kdoorpi is superseeded by godoor
- 0 pods running
2025-04-21 03:16:51 +03:00
d9877a9fc5 tigera-operator: v3.29.3 2025-04-20 22:03:54 +03:00
13cfeeff2b tigera-operator: v3.28.4 2025-04-20 22:03:54 +03:00
21e70685f3 tigera-operator: sync configuration drift 2025-04-20 22:03:50 +03:00
6d7cdbd9c6 tigera-operator to argo (v3.28.1) 2025-04-20 21:32:02 +03:00
10585c7aff dragonfly: v1.1.11 2025-04-20 19:27:28 +03:00
bc301104fe dragonfly: to argo (v1.1.6) 2025-04-20 19:27:24 +03:00
853c9717a9 rm unused opensearch
formerly about to be used by graylog,
which itself has been replaced twice over
2025-04-20 19:18:59 +03:00
ec81c34086 ripe87 to argo 2025-04-20 19:18:59 +03:00
0b713ab321 shared/minio is already dead 2025-04-20 19:18:59 +03:00
541607a7bd cpng: v1.25.1 2025-04-20 19:18:59 +03:00
d9dce6cadf cnpg to argo (v1.24.1) 2025-04-20 19:18:59 +03:00
0447abecdc rm postgres-operator (4th competing postgres?) 2025-04-20 19:18:59 +03:00
61f7d724b5 argo: secret-claim-operator to git 2025-04-20 19:18:59 +03:00
f899283fdb argo: tidy 2025-04-20 19:18:59 +03:00
fb3123966e keydb (and redis) is dead 2025-04-20 19:18:54 +03:00
5b29fbe7cd prometheus-operator: v0.82.0 2025-04-20 19:06:37 +03:00
9fb356b5a6 prometheus-operator: v0.81.0 2025-04-20 19:06:37 +03:00
908f482396 prometheus-operator: v0.80.1 2025-04-20 19:06:37 +03:00
715cb5ce4b prometheus-operator: v0.79.2 2025-04-20 19:06:37 +03:00
48915ec26c prometheus-operator: v0.78.2 2025-04-20 19:06:37 +03:00
06324bb583 prometheus-operator: v0.77.2 2025-04-20 19:06:37 +03:00
877662445a prometheus-operator: v0.76.2 2025-04-20 19:06:37 +03:00
22b67fa4fc prometheus-operator: migrate to argo+kustomize
v0.75.1 - same as in cluster currently
2025-04-20 19:06:37 +03:00
006240ee1a sync cluster deviation: pve-csi storageclass provisioners
minio-clusters: kustomization; disable unused and outdated shared and dedicated
2025-04-20 19:06:37 +03:00
2a26b4e94c traefik: drop already-enforced router.tls=true annotation 2025-04-20 19:06:37 +03:00
4e59984fe4 woodpecker: fixup assumptions 2025-04-20 19:06:32 +03:00
7eadbee7a2 argo: enable helm in kustomize + update 2025-04-20 19:01:39 +03:00
a94fddff1e woodpecker: recreate to v3 on kustomize 2025-04-20 19:01:39 +03:00
bf44e4fa9b partial revert 3243ed1066786288956ecd7afbedf05104018721 2025-04-20 19:01:39 +03:00
f7f7d52e70 Revert "convert reloader to helm"
Failed sync attempt to 2.1.0: one or more objects failed to apply,
reason: Deployment.apps "reloader-reloader" is invalid:
spec.template.metadata.labels: Invalid value:
map[string]string{"app.kubernetes.io/instance":"reloader",
"app.kubernetes.io/managed-by":"Helm",
"app.kubernetes.io/name":"reloader",
"app.kubernetes.io/version":"v1.4.0", "group":"com.stakater.platform",
"helm.sh/chart":"reloader-2.1.0", "provider":"stakater",
"version":"v1.4.0"}: `selector` does not match template `labels`
(retried 5 times).

This reverts commit db1f33df6d28da34a973678ff576032a445dd39f.
2025-04-20 19:01:39 +03:00
cf9d686882 mirror.gcr.io
and explicit latest tag
2025-04-20 19:01:39 +03:00
5bd0a57417 explicitly use docker library 2025-04-20 19:01:39 +03:00
e22713b282 pin and update 2025-04-20 19:01:39 +03:00
37a8031bc4 minor version updates 2025-04-20 19:01:39 +03:00
095e00b516 nextcloud: 31.0.2 2025-04-20 19:01:39 +03:00
4d84a0a5ca nextcloud: 30.0.8 2025-04-20 19:01:39 +03:00
73f03dbb2a nextcloud: 29.0.14 2025-04-20 19:01:39 +03:00
0c5d2bc792 nextcloud: 28.0.14 2025-04-20 19:01:38 +03:00
6cf53505ad nextcloud: 27.1.13 2025-04-20 19:01:38 +03:00
a694463fad nextcloud 26.0.13 2025-04-20 19:01:38 +03:00
d1eeba377d nextcloud: current version 2025-04-20 19:01:38 +03:00
0628cb94e4 convert reloader to helm 2025-04-20 19:01:38 +03:00
376e74a985 harbor update 2025-04-20 19:01:38 +03:00
6eb0c20175 disable discourse
- posts and user list manually exported
- not in argo
- outdated version
- e-mail is broken
- nobody has accessed in 6mo
- no posts, apart from the initial admin
2025-04-20 19:01:38 +03:00
4bf08fdc7f disable camtiler 2025-04-20 19:01:30 +03:00
f05b1f1324 openebs already disabled 2025-04-18 23:10:38 +03:00
5fa3144e23 logging namespace already disabled 2025-04-18 23:10:38 +03:00
48054078e2 local-path-storage already unused, for 2y 2025-04-18 23:10:38 +03:00
4cf4aecea9 playground is already disabled 2025-04-18 23:10:38 +03:00
8d1c24b80f disable whoami-oidc (broken) 2025-04-18 23:10:38 +03:00
0dcd26fe4f traefik: combined tls 2025-04-18 19:21:24 +03:00
e33053bf79 goredirect: bind workaround 2025-04-18 19:18:56 +03:00
e632b90d2b bind: enable k6.ee 2025-04-18 18:47:22 +03:00
3b5df4cd43 bind: cleanup mail.k-space.ee present in wildduck/dns.yaml 2025-04-18 18:41:18 +03:00
a280a19772 inventory: k6 tls 2025-04-18 18:41:18 +03:00
19e6f53d96 inventory: rm namespace
provided by argo / kubectl command anyway
except for role-bindings, they don't get it
2025-04-18 18:41:16 +03:00
e9efee4853 inventory: fix orphaned selectors 2025-04-18 16:56:19 +03:00
a33d0d12b0 gitea: also disable passkeys ot enforce OIDC 2025-04-18 14:46:58 +03:00
dc42a9612a gitea: update and disable passwd login
Closes 
2025-04-18 14:38:49 +03:00
6f48e3a53a Inventory Minio Quota 1 → 10 Gi
Closes 
2025-04-11 16:28:58 +03:00
09423ace42 rm unneeded deprecated flag 2025-03-27 09:06:07 +02:00
bb802882ae add Aktiva to non-SSO listing 2025-02-25 23:10:51 +02:00
4a7dfd6435 fix passmower email login link 2025-01-09 13:02:54 +02:00
fb7504cfee force traefik to all worker nodes 2025-01-02 20:35:22 +02:00
a4b9bdf89d frigate: make config storage larger 2025-01-02 20:24:17 +02:00
602b4a03f6 frigate: use coral for detect, nvidia gpu for transcode and longhorn for config storage 2025-01-02 20:19:48 +02:00
f9ad582136 allow scheduling longhorn on nvr 2025-01-02 20:19:48 +02:00
305b8ec038 add nvidia-device-plugin to use nvr gpu 2025-01-02 20:19:48 +02:00
7d71f1b29c fix rosdump 2025-01-02 20:19:48 +02:00
0e79aa8f4e passmower: 4/4 replicas (for pve localhost) 2025-01-02 01:25:04 +02:00
a784f00c71 argo: autosync passmower 2025-01-02 01:19:22 +02:00
b71a872c09 argo: passmower helm + extras didn't work out
Kustomize should be able to auto-generate Helm as well.
2025-01-02 01:02:23 +02:00
21beb2332c argo: add passmower 2025-01-02 00:53:04 +02:00
8eed4f66c1 pve: add pve2 2025-01-02 00:24:56 +02:00
75b9948997 pve: fmt port.number on same line 2025-01-02 00:24:47 +02:00
e4dfde9562 argo docs 2 2024-12-15 06:34:47 +02:00
a82193f059 add argocd-image-updater 2024-12-15 06:28:42 +02:00
68a75b8389 migrate OIDC codemowers.io/v1alpha1 to v1beta1 2024-12-15 05:39:41 +02:00
5368fe90eb argo: add localhost callback for CLI login 2024-12-15 05:39:41 +02:00
cded6fde3f fixup argo docs 2024-12-15 05:39:41 +02:00
402ff86fde grafana: disable non-oauth login 2024-12-15 01:46:22 +02:00
272f60ab73 monitoring: mikrotik-exporter fix 2024-11-22 08:16:12 +02:00
9bcad2481b monitoring: Update node-exporter 2024-11-22 05:59:34 +02:00
c04a7b7f67 monitoring: Update mikrotik-exporter 2024-11-22 05:59:08 +02:00
c23fa07c5e monitoring: Update mikrotik-exporter 2024-11-19 15:48:31 +02:00
c1822888ec dont compile discourse assets 2024-10-25 14:44:27 +03:00
e26cac6d86 add discourse 2024-10-25 14:35:20 +03:00
d7ba4bc90e upgrade cnpg 2024-10-25 14:03:50 +03:00
da4df6c21d frigate: move storage to dedicated nfs share and offload transcoding to separate go2rtc deployment 2024-10-19 13:51:13 +03:00
2964034cd3 fix rosdump scheduling 2024-10-18 18:45:42 +03:00
ae525380b1 fix gitea oidc reg 2024-10-18 18:44:27 +03:00
4b9c3ad394 monitoring: Temporarily disable monitoring of core switches 2024-10-15 10:07:28 +03:00
dbebb39749 gitea: Bump version 2024-10-02 08:15:20 +03:00
6f15e45402 freeswitch: fix network policy 2024-10-01 22:32:16 +03:00
36bf431259 freeswitch: fix network policy 2024-10-01 20:27:08 +03:00
c14a313c57 frigate: enable recording and use openvino 2024-09-29 23:06:41 +03:00
15a2fd9375 add frigate 2024-09-29 21:34:31 +03:00
5bd6cf2317 freeswitch: add gitignore 2024-09-29 19:05:42 +03:00
407f691152 add freeswitch 2024-09-29 19:05:42 +03:00
e931f490c2 asterisk: update network policy 2024-09-29 19:05:42 +03:00
b96e8d16a6 expose harbor via traefik 2024-09-29 19:05:42 +03:00
15d4d44be7 expose traefik via ingress 2024-09-29 19:05:42 +03:00
52ce6eab0a expose harbor via traefik 2024-09-29 19:04:51 +03:00
e89d045f38 goredirect: add nopath env var 2024-09-13 21:54:49 +03:00
7e70315514 monitoring: Fix snmp-exporter 2024-09-12 22:15:10 +03:00
af5a048bcd replace ups 2024-09-12 21:54:46 +03:00
0005219f81 monitoring: Fix mikrotik-exporter formatting 2024-09-12 21:48:43 +03:00
813bb32e48 monitoring: Update UPS-es 2024-09-12 21:47:20 +03:00
0efae7baf9 unschedule harbor from storage nodes 2024-09-12 19:48:51 +03:00
be90b4e266 monitoring: Update mikrotik-exporter 2024-09-09 22:19:46 +03:00
999d17c384 rosdump: Use codemowers/git image 2024-09-09 08:45:21 +03:00
bacef8d438 remove logmower 2024-09-08 23:54:32 +03:00
60d1ba9b18 monitoring: Bump mikrotik-exporter again 2024-09-06 12:10:45 +03:00
dcb80e6638 monitoring: Bump mikrotik-exporter 2024-09-06 11:55:49 +03:00
95e0f97db2 grafana: Specify OIDC scopes on Grafana side 2024-09-05 09:32:34 +03:00
f5a7b44ae6 grafana: Add groups OIDC scope 2024-09-05 09:29:16 +03:00
be7e1d9459 grafana: Assign editor role for hackerspace members 2024-09-05 09:23:41 +03:00
cd807ebcde grafana: Allow OIDC assignment to admin role 2024-09-05 09:04:02 +03:00
eaac7f61a7 monitoring: Pin specific mikrotik-exporter image 2024-09-04 23:29:37 +03:00
a0d5a585e4 add and configure calico ippool 2024-09-04 23:12:35 +03:00
1f8f288f95 monitoring: Update Mikrotik exporter 2024-09-04 22:33:15 +03:00
9de1881647 monitoring: Enable Prometheus admin API 2024-09-04 22:28:01 +03:00
28904cdd63 make calico use ipip encapsulation 2024-09-04 22:27:36 +03:00
0df188db36 monitoring: README syntax fix 2024-09-04 07:12:56 +03:00
a42b79b5ac monitoring: Add doc.crds.dev ref 2024-09-04 07:12:21 +03:00
200 changed files with 3206 additions and 5039 deletions
.gitignoreCLUSTER.mdREADME.md
_disabled
argocd
bind
camtiler
cert-manager
cnpg-system
default
dragonfly-operator-system
elastic-system
etherpad
freescout
frigate
gitea
grafana
hackerspace
harbor

4
.gitignore vendored

@ -5,6 +5,10 @@
*.save
*.1
# Kustomize with Helm and secrets:
charts/
*.env
### IntelliJ IDEA ###
.idea
*.iml

@ -35,7 +35,6 @@ users:
- get-token
- --oidc-issuer-url=https://auth.k-space.ee/
- --oidc-client-id=passmower.kubelogin
- --oidc-use-pkce
- --oidc-extra-scope=profile,email,groups
- --listen-address=127.0.0.1:27890
command: kubectl

@ -6,15 +6,17 @@ Kubernetes manifests, Ansible [playbooks](ansible/README.md), and documentation
- Debugging Kubernetes [on Wiki](https://wiki.k-space.ee/en/hosting/debugging-kubernetes)
- Need help? → [`#kube`](https://k-space-ee.slack.com/archives/C02EYV1NTM2)
Jump to docs: [inventory-app](hackerspace/README.md) / [cameras](camtiler/README.md) / [doors](https://wiki.k-space.ee/en/hosting/doors) / [list of apps](https://auth.k-space.ee) // [all infra](ansible/inventory.yml) / [network](https://wiki.k-space.ee/en/hosting/network/sensitive) / [retro](https://wiki.k-space.ee/en/hosting/retro) / [non-infra](https://wiki.k-space.ee)
Jump to docs: [inventory-app](hackerspace/README.md) / [cameras](_disabled/camtiler/README.md) / [doors](https://wiki.k-space.ee/en/hosting/doors) / [list of apps](https://auth.k-space.ee) // [all infra](ansible/inventory.yml) / [network](https://wiki.k-space.ee/en/hosting/network/sensitive) / [retro](https://wiki.k-space.ee/en/hosting/retro) / [non-infra](https://wiki.k-space.ee)
Tip: Search the repo for `kind: xyz` for examples.
## Supporting services
- Build [Git](https://git.k-space.ee) repositories with [Woodpecker](https://woodpecker.k-space.ee).
- Build [Git](https://git.k-space.ee) repositories with [Woodpecker](https://woodpecker.k-space.ee)[^nodrone].
- Passmower: Authz with `kind: OIDCClient` (or `kind: OIDCMiddlewareClient`[^authz]).
- Traefik[^nonginx]: Expose services with `kind: Service` + `kind: Ingress` (TLS and DNS **included**).
[^nodrone]: Replaces Drone CI.
### Additional
- bind: Manage _additional_ DNS records with `kind: DNSEndpoint`.
- [Prometheus](https://wiki.k-space.ee/en/hosting/monitoring): Collect metrics with `kind: PodMonitor` (alerts with `kind: PrometheusRule`).
@ -32,19 +34,20 @@ Static routes for 193.40.103.36/30 have been added in pve nodes to make them com
<!-- Linked to by https://wiki.k-space.ee/e/en/hosting/storage -->
### Databases / -stores:
- KeyDB: `kind: KeydbClaim` (replaces Redis[^redisdead])
- Dragonfly: `kind: Dragonfly` (replaces Redis[^redisdead])
- Longhorn: `storageClassName: longhorn` (filesystem storage)
- Mongo[^mongoproblems]: `kind: MongoDBCommunity` (NAS* `inventory-mongodb`)
- Minio S3: `kind: MinioBucketClaim` with `class: dedicated` (NAS*: `class: external`)
- MariaDB*: search for `mysql`, `mariadb`[^mariadb] (replaces MySQL)
- Postgres*: hardcoded to [harbor/application.yml](harbor/application.yml)
- Seeded secrets: `kind: SecretClaim` (generates random secret in templated format)
- Secrets in git: https://git.k-space.ee/secretspace (members personal info, API credentials, see argocd/deploy_key.pub comment)
\* External, hosted directly on [nas.k-space.ee](https://wiki.k-space.ee/en/hosting/storage)
[^mariadb]: As of 2024-07-30 used by auth, authelia, bitwarden, etherpad, freescout, git, grafana, nextcloud, wiki, woodpecker
[^redisdead]: Redis has been replaced as redis-operatori couldn't handle itself: didn't reconcile after reboots, master URI was empty, and clients complained about missing masters. ArgoCD still hosts its own Redis.
[^redisdead]: Redis has been replaced as redis-operatori couldn't handle itself: didn't reconcile after reboots, master URI was empty, and clients complained about missing masters. Dragonfly replaces KeyDB.
[^mongoproblems]: Mongo problems: Incompatible with rawfile csi (wiredtiger.wt corrupts), complicated resizing (PVCs from statefulset PVC template).

@ -9,3 +9,5 @@ Should ArgoCD be down manifests here can be applied with:
```
kubectl apply -n asterisk -f application.yaml
```
asterisk-secrets was dumped to git.k-space.ee/secretspace/kube:_disabled/asterisk

@ -0,0 +1,39 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: asterisk
spec:
podSelector:
matchLabels:
app: asterisk
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: monitoring
podSelector:
matchLabels:
app.kubernetes.io/name: prometheus
- from:
- ipBlock:
cidr: 100.101.0.0/16
- from:
- ipBlock:
cidr: 100.102.0.0/16
- from:
- ipBlock:
cidr: 81.90.125.224/32 # Lauri home
- from:
- ipBlock:
cidr: 172.20.8.241/32 # Erki A
- from:
- ipBlock:
cidr: 212.47.211.10/32 # Elisa SIP
egress:
- to:
- ipBlock:
cidr: 212.47.211.10/32 # Elisa SIP

@ -0,0 +1,382 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: discourse
annotations:
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
tls:
- hosts:
- "*.k-space.ee"
secretName:
rules:
- host: "discourse.k-space.ee"
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: discourse
port:
name: http
---
apiVersion: v1
kind: Service
metadata:
name: discourse
spec:
type: ClusterIP
ipFamilyPolicy: SingleStack
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
selector:
app.kubernetes.io/instance: discourse
app.kubernetes.io/name: discourse
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: discourse
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: discourse
annotations:
reloader.stakater.com/auto: "true"
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: discourse
app.kubernetes.io/name: discourse
strategy:
type: Recreate
template:
metadata:
labels:
app.kubernetes.io/instance: discourse
app.kubernetes.io/name: discourse
spec:
serviceAccountName: discourse
securityContext:
fsGroup: 0
fsGroupChangePolicy: Always
initContainers:
containers:
- name: discourse
image: docker.io/bitnami/discourse:3.3.2-debian-12-r0
imagePullPolicy: "IfNotPresent"
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- CHOWN
- SYS_CHROOT
- FOWNER
- SETGID
- SETUID
- DAC_OVERRIDE
drop:
- ALL
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
seLinuxOptions: {}
seccompProfile:
type: RuntimeDefault
env:
- name: BITNAMI_DEBUG
value: "true"
- name: DISCOURSE_USERNAME
valueFrom:
secretKeyRef:
name: discourse-password
key: username
- name: DISCOURSE_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-password
key: password
- name: DISCOURSE_PORT_NUMBER
value: "8080"
- name: DISCOURSE_EXTERNAL_HTTP_PORT_NUMBER
value: "80"
- name: DISCOURSE_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-postgresql
key: password
- name: POSTGRESQL_CLIENT_CREATE_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-postgres-superuser
key: password
- name: POSTGRESQL_CLIENT_POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-postgres-superuser
key: password
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-redis
key: redis-password
envFrom:
- configMapRef:
name: discourse
- secretRef:
name: discourse-email
ports:
- name: http
containerPort: 8080
protocol: TCP
livenessProbe:
tcpSocket:
port: http
initialDelaySeconds: 500
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
readinessProbe:
httpGet:
path: /srv/status
port: http
initialDelaySeconds: 100
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
resources:
limits:
cpu: "6.0"
ephemeral-storage: 2Gi
memory: 12288Mi
requests:
cpu: "1.0"
ephemeral-storage: 50Mi
memory: 3072Mi
volumeMounts:
- name: discourse-data
mountPath: /bitnami/discourse
subPath: discourse
- name: sidekiq
image: docker.io/bitnami/discourse:3.3.2-debian-12-r0
imagePullPolicy: "IfNotPresent"
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- CHOWN
- SYS_CHROOT
- FOWNER
- SETGID
- SETUID
- DAC_OVERRIDE
drop:
- ALL
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
seLinuxOptions: {}
seccompProfile:
type: RuntimeDefault
command:
- /opt/bitnami/scripts/discourse/entrypoint.sh
args:
- /opt/bitnami/scripts/discourse-sidekiq/run.sh
env:
- name: BITNAMI_DEBUG
value: "true"
- name: DISCOURSE_USERNAME
valueFrom:
secretKeyRef:
name: discourse-password
key: username
- name: DISCOURSE_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-password
key: password
- name: DISCOURSE_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-postgresql
key: password
- name: DISCOURSE_POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-postgres-superuser
key: password
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-redis
key: redis-password
envFrom:
- configMapRef:
name: discourse
- secretRef:
name: discourse-email
livenessProbe:
exec:
command: ["/bin/sh", "-c", "pgrep -f ^sidekiq"]
initialDelaySeconds: 500
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command: ["/bin/sh", "-c", "pgrep -f ^sidekiq"]
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
resources:
limits:
cpu: 750m
ephemeral-storage: 2Gi
memory: 768Mi
requests:
cpu: 500m
ephemeral-storage: 50Mi
memory: 512Mi
volumeMounts:
- name: discourse-data
mountPath: /bitnami/discourse
subPath: discourse
volumes:
- name: discourse-data
persistentVolumeClaim:
claimName: discourse-data
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: discourse-data
namespace: discourse
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "3Gi"
storageClassName: "proxmox-nas"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: discourse
namespace: discourse
data:
DISCOURSE_HOST: "discourse.k-space.ee"
DISCOURSE_SKIP_INSTALL: "yes"
DISCOURSE_PRECOMPILE_ASSETS: "no"
DISCOURSE_SITE_NAME: "K-Space Discourse"
DISCOURSE_USERNAME: "k-space"
DISCOURSE_EMAIL: "dos4dev@k-space.ee"
DISCOURSE_REDIS_HOST: "discourse-redis"
DISCOURSE_REDIS_PORT_NUMBER: "6379"
DISCOURSE_DATABASE_HOST: "discourse-postgres-rw"
DISCOURSE_DATABASE_PORT_NUMBER: "5432"
DISCOURSE_DATABASE_NAME: "discourse"
DISCOURSE_DATABASE_USER: "discourse"
POSTGRESQL_CLIENT_DATABASE_HOST: "discourse-postgres-rw"
POSTGRESQL_CLIENT_DATABASE_PORT_NUMBER: "5432"
POSTGRESQL_CLIENT_POSTGRES_USER: "postgres"
POSTGRESQL_CLIENT_CREATE_DATABASE_NAME: "discourse"
POSTGRESQL_CLIENT_CREATE_DATABASE_EXTENSIONS: "hstore,pg_trgm"
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCClient
metadata:
name: discourse
namespace: discourse
spec:
displayName: Discourse
uri: https://discourse.k-space.ee
redirectUris:
- https://discourse.k-space.ee/auth/oidc/callback
allowedGroups:
- k-space:floor
- k-space:friends
grantTypes:
- authorization_code
- refresh_token
responseTypes:
- code
availableScopes:
- openid
- profile
pkce: false
---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
metadata:
name: discourse-redis
namespace: discourse
spec:
size: 32
mapping:
- key: redis-password
value: "%(plaintext)s"
- key: REDIS_URI
value: "redis://:%(plaintext)s@discourse-redis"
---
apiVersion: dragonflydb.io/v1alpha1
kind: Dragonfly
metadata:
name: discourse-redis
namespace: discourse
spec:
authentication:
passwordFromSecret:
key: redis-password
name: discourse-redis
replicas: 3
resources:
limits:
cpu: 1000m
memory: 1Gi
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app: discourse-redis
app.kubernetes.io/part-of: dragonfly
---
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: discourse-postgres
namespace: discourse
spec:
instances: 1
enableSuperuserAccess: true
bootstrap:
initdb:
database: discourse
owner: discourse
secret:
name: discourse-postgresql
dataChecksums: true
encoding: 'UTF8'
storage:
size: 10Gi
storageClass: postgres

1
_disabled/freeswitch/.gitignore vendored Normal file

@ -0,0 +1 @@
PASSWORDS.xml

@ -0,0 +1,14 @@
<include>
<X-PRE-PROCESS cmd="set" data="default_password=">
<X-PRE-PROCESS cmd="set" data="ipcall_password="/>
<X-PRE-PROCESS cmd="set" data="1000_password="/>
<X-PRE-PROCESS cmd="set" data="1001_password="/>
<X-PRE-PROCESS cmd="set" data="1002_password="/>
<X-PRE-PROCESS cmd="set" data="1003_password="/>
<X-PRE-PROCESS cmd="set" data="1004_password="/>
<X-PRE-PROCESS cmd="set" data="1005_password="/>
<X-PRE-PROCESS cmd="set" data="1006_password="/>
<X-PRE-PROCESS cmd="set" data="1007_password="/>
<X-PRE-PROCESS cmd="set" data="1008_password="/>
<X-PRE-PROCESS cmd="set" data="1009_password="/>
</include>

@ -0,0 +1,7 @@
```
kubectl -n freeswitch create secret generic freeswitch-passwords --from-file freeswitch/PASSWORDS.xml
```
PASSWORDS.xml is in git.k-space.ee/secretspace/kube:_disabled/freeswitch
freeswitch-sounds was extracted form of http://files.freeswitch.org/releases/sounds/freeswitch-sounds-en-us-callie-32000-1.0.53.tar.gz (with /us/ at root of the volume)

@ -0,0 +1,567 @@
apiVersion: v1
kind: Service
metadata:
name: freeswitch
namespace: freeswitch
annotations:
external-dns.alpha.kubernetes.io/hostname: freeswitch.k-space.ee
metallb.universe.tf/address-pool: eenet
metallb.universe.tf/ip-allocated-from-pool: eenet
spec:
ports:
- name: sip-internal-udp
protocol: UDP
port: 5060
targetPort: 5060
nodePort: 31787
- name: sip-nat-udp
protocol: UDP
port: 5070
targetPort: 5070
nodePort: 32241
- name: sip-external-udp
protocol: UDP
port: 5080
targetPort: 5080
nodePort: 31354
- name: sip-data-10000
protocol: UDP
port: 10000
targetPort: 10000
nodePort: 30786
- name: sip-data-10001
protocol: UDP
port: 10001
targetPort: 10001
nodePort: 31788
- name: sip-data-10002
protocol: UDP
port: 10002
targetPort: 10002
nodePort: 30247
- name: sip-data-10003
protocol: UDP
port: 10003
targetPort: 10003
nodePort: 32389
- name: sip-data-10004
protocol: UDP
port: 10004
targetPort: 10004
nodePort: 30723
- name: sip-data-10005
protocol: UDP
port: 10005
targetPort: 10005
nodePort: 30295
- name: sip-data-10006
protocol: UDP
port: 10006
targetPort: 10006
nodePort: 30782
- name: sip-data-10007
protocol: UDP
port: 10007
targetPort: 10007
nodePort: 32165
- name: sip-data-10008
protocol: UDP
port: 10008
targetPort: 10008
nodePort: 30282
- name: sip-data-10009
protocol: UDP
port: 10009
targetPort: 10009
nodePort: 31325
- name: sip-data-10010
protocol: UDP
port: 10010
targetPort: 10010
nodePort: 31234
selector:
app: freeswitch
type: LoadBalancer
externalTrafficPolicy: Local
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
internalTrafficPolicy: Cluster
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: freeswitch-sounds
namespace: freeswitch
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi
storageClassName: longhorn
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: freeswitch
namespace: freeswitch
labels:
app: freeswitch
annotations:
reloader.stakater.com/auto: "true" # reloader is disabled in cluster, (re)deploy it to use
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: freeswitch
template:
metadata:
labels:
app: freeswitch
spec:
volumes:
- name: config
configMap:
name: freeswitch-config
defaultMode: 420
- name: directory
configMap:
name: freeswitch-directory
defaultMode: 420
- name: sounds
persistentVolumeClaim:
claimName: freeswitch-sounds
- name: passwords
secret:
secretName: freeswitch-passwords
containers:
- name: freeswitch
image: mirror.gcr.io/dheaps/freeswitch:latest
env:
- name: SOUND_TYPES
value: en-us-callie
- name: SOUND_RATES
value: "32000"
resources: {}
volumeMounts:
- name: config
mountPath: /etc/freeswitch/sip_profiles/external/ipcall.xml
subPath: ipcall.xml
- name: config
mountPath: /etc/freeswitch/dialplan/default/00_outbound_ipcall.xml
subPath: 00_outbound_ipcall.xml
- name: config
mountPath: /etc/freeswitch/dialplan/public.xml
subPath: dialplan.xml
- name: config
mountPath: /etc/freeswitch/autoload_configs/switch.conf.xml
subPath: switch.xml
- name: config
mountPath: /etc/freeswitch/vars.xml
subPath: vars.xml
- name: passwords
mountPath: /etc/freeswitch/PASSWORDS.xml
subPath: PASSWORDS.xml
- name: directory
mountPath: /etc/freeswitch/directory/default
- name: sounds
mountPath: /usr/share/freeswitch/sounds
---
apiVersion: v1
kind: ConfigMap
metadata:
name: freeswitch-config
namespace: freeswitch
data:
dialplan.xml: |
<!--
NOTICE:
This context is usually accessed via the external sip profile listening on port 5080.
It is recommended to have separate inbound and outbound contexts. Not only for security
but clearing up why you would need to do such a thing. You don't want outside un-authenticated
callers hitting your default context which allows dialing calls thru your providers and results
in Toll Fraud.
-->
<!-- http://wiki.freeswitch.org/wiki/Dialplan_XML -->
<include>
<context name="public">
<extension name="unloop">
<condition field="${unroll_loops}" expression="^true$"/>
<condition field="${sip_looped_call}" expression="^true$">
<action application="deflect" data="${destination_number}"/>
</condition>
</extension>
<!--
Tag anything pass thru here as an outside_call so you can make sure not
to create any routing loops based on the conditions that it came from
the outside of the switch.
-->
<extension name="outside_call" continue="true">
<condition>
<action application="set" data="outside_call=true"/>
<action application="export" data="RFC2822_DATE=${strftime(%a, %d %b %Y %T %z)}"/>
</condition>
</extension>
<extension name="call_debug" continue="true">
<condition field="${call_debug}" expression="^true$" break="never">
<action application="info"/>
</condition>
</extension>
<extension name="public_extensions">
<condition field="destination_number" expression="^(10[01][0-9])$">
<action application="transfer" data="$1 XML default"/>
</condition>
</extension>
<extension name="public_conference_extensions">
<condition field="destination_number" expression="^(3[5-8][01][0-9])$">
<action application="transfer" data="$1 XML default"/>
</condition>
</extension>
<!--
You can place files in the public directory to get included.
-->
<X-PRE-PROCESS cmd="include" data="public/*.xml"/>
<!--
If you have made it this far lets challenge the caller and if they authenticate
lets try what they dialed in the default context. (commented out by default)
-->
<!-- TODO:
<extension name="check_auth" continue="true">
<condition field="${sip_authorized}" expression="^true$" break="never">
<anti-action application="respond" data="407"/>
</condition>
</extension>
-->
<extension name="transfer_to_default">
<condition>
<!-- TODO: proper ring grouping -->
<action application="bridge" data="user/1004@freeswitch.k-space.ee,user/1003@freeswitch.k-space.ee,sofia/gateway/ipcall/53543824"/>
</condition>
</extension>
</context>
</include>
ipcall.xml: |
<include>
<gateway name="ipcall">
<param name="proxy" value="sip.ipcall.ee"/>
<param name="register" value="true"/>
<param name="realm" value="sip.ipcall.ee"/>
<param name="username" value="6659652"/>
<param name="password" value="$${ipcall_password}"/>
<param name="from-user" value="6659652"/>
<param name="from-domain" value="sip.ipcall.ee"/>
<param name="extension" value="ring_group/default"/>
</gateway>
</include>
00_outbound_ipcall.xml: |
<extension name="outbound">
<!-- TODO: check toll_allow ? -->
<condition field="destination_number" expression="^(\d+)$">
<action application="set" data="sip_invite_domain=sip.ipcall.ee"/>
<action application="bridge" data="sofia/gateway/ipcall/${destination_number}"/>
</condition>
</extension>
switch.xml: |
<configuration name="switch.conf" description="Core Configuration">
<cli-keybindings>
<key name="1" value="help"/>
<key name="2" value="status"/>
<key name="3" value="show channels"/>
<key name="4" value="show calls"/>
<key name="5" value="sofia status"/>
<key name="6" value="reloadxml"/>
<key name="7" value="console loglevel 0"/>
<key name="8" value="console loglevel 7"/>
<key name="9" value="sofia status profile internal"/>
<key name="10" value="sofia profile internal siptrace on"/>
<key name="11" value="sofia profile internal siptrace off"/>
<key name="12" value="version"/>
</cli-keybindings>
<default-ptimes>
</default-ptimes>
<settings>
<param name="colorize-console" value="true"/>
<param name="dialplan-timestamps" value="false"/>
<param name="max-db-handles" value="50"/>
<param name="db-handle-timeout" value="10"/>
<param name="max-sessions" value="1000"/>
<param name="sessions-per-second" value="30"/>
<param name="loglevel" value="debug"/>
<param name="mailer-app" value="sendmail"/>
<param name="mailer-app-args" value="-t"/>
<param name="dump-cores" value="yes"/>
<param name="rtp-start-port" value="10000"/>
<param name="rtp-end-port" value="10010"/>
</settings>
</configuration>
vars.xml: |
<include>
<X-PRE-PROCESS cmd="set" data="disable_system_api_commands=true"/>
<X-PRE-PROCESS cmd="set" data="sound_prefix=$${sounds_dir}/en/us/callie"/>
<X-PRE-PROCESS cmd="set" data="domain=freeswitch.k-space.ee"/>
<X-PRE-PROCESS cmd="set" data="domain_name=$${domain}"/>
<X-PRE-PROCESS cmd="set" data="hold_music=local_stream://moh"/>
<X-PRE-PROCESS cmd="set" data="use_profile=external"/>
<X-PRE-PROCESS cmd="set" data="rtp_sdes_suites=AEAD_AES_256_GCM_8|AEAD_AES_128_GCM_8|AES_CM_256_HMAC_SHA1_80|AES_CM_192_HMAC_SHA1_80|AES_CM_128_HMAC_SHA1_80|AES_CM_256_HMAC_SHA1_32|AES_CM_192_HMAC_SHA1_32|AES_CM_128_HMAC_SHA1_32|AES_CM_128_NULL_AUTH"/>
<X-PRE-PROCESS cmd="set" data="global_codec_prefs=OPUS,G722,PCMU,PCMA,H264,VP8"/>
<X-PRE-PROCESS cmd="set" data="outbound_codec_prefs=OPUS,G722,PCMU,PCMA,H264,VP8"/>
<X-PRE-PROCESS cmd="set" data="xmpp_client_profile=xmppc"/>
<X-PRE-PROCESS cmd="set" data="xmpp_server_profile=xmpps"/>
<X-PRE-PROCESS cmd="set" data="bind_server_ip=auto"/>
<X-PRE-PROCESS cmd="stun-set" data="external_rtp_ip=host:freeswitch.k-space.ee"/>
<X-PRE-PROCESS cmd="stun-set" data="external_sip_ip=host:freeswitch.k-space.ee"/>
<X-PRE-PROCESS cmd="set" data="unroll_loops=true"/>
<X-PRE-PROCESS cmd="set" data="outbound_caller_name=FreeSWITCH"/>
<X-PRE-PROCESS cmd="set" data="outbound_caller_id=0000000000"/>
<X-PRE-PROCESS cmd="set" data="call_debug=false"/>
<X-PRE-PROCESS cmd="set" data="console_loglevel=info"/>
<X-PRE-PROCESS cmd="set" data="default_areacode=372"/>
<X-PRE-PROCESS cmd="set" data="default_country=EE"/>
<X-PRE-PROCESS cmd="set" data="presence_privacy=false"/>
<X-PRE-PROCESS cmd="set" data="au-ring=%(400,200,383,417);%(400,2000,383,417)"/>
<X-PRE-PROCESS cmd="set" data="be-ring=%(1000,3000,425)"/>
<X-PRE-PROCESS cmd="set" data="ca-ring=%(2000,4000,440,480)"/>
<X-PRE-PROCESS cmd="set" data="cn-ring=%(1000,4000,450)"/>
<X-PRE-PROCESS cmd="set" data="cy-ring=%(1500,3000,425)"/>
<X-PRE-PROCESS cmd="set" data="cz-ring=%(1000,4000,425)"/>
<X-PRE-PROCESS cmd="set" data="de-ring=%(1000,4000,425)"/>
<X-PRE-PROCESS cmd="set" data="dk-ring=%(1000,4000,425)"/>
<X-PRE-PROCESS cmd="set" data="dz-ring=%(1500,3500,425)"/>
<X-PRE-PROCESS cmd="set" data="eg-ring=%(2000,1000,475,375)"/>
<X-PRE-PROCESS cmd="set" data="es-ring=%(1500,3000,425)"/>
<X-PRE-PROCESS cmd="set" data="fi-ring=%(1000,4000,425)"/>
<X-PRE-PROCESS cmd="set" data="fr-ring=%(1500,3500,440)"/>
<X-PRE-PROCESS cmd="set" data="hk-ring=%(400,200,440,480);%(400,3000,440,480)"/>
<X-PRE-PROCESS cmd="set" data="hu-ring=%(1250,3750,425)"/>
<X-PRE-PROCESS cmd="set" data="il-ring=%(1000,3000,400)"/>
<X-PRE-PROCESS cmd="set" data="in-ring=%(400,200,425,375);%(400,2000,425,375)"/>
<X-PRE-PROCESS cmd="set" data="jp-ring=%(1000,2000,420,380)"/>
<X-PRE-PROCESS cmd="set" data="ko-ring=%(1000,2000,440,480)"/>
<X-PRE-PROCESS cmd="set" data="pk-ring=%(1000,2000,400)"/>
<X-PRE-PROCESS cmd="set" data="pl-ring=%(1000,4000,425)"/>
<X-PRE-PROCESS cmd="set" data="ro-ring=%(1850,4150,475,425)"/>
<X-PRE-PROCESS cmd="set" data="rs-ring=%(1000,4000,425)"/>
<X-PRE-PROCESS cmd="set" data="ru-ring=%(800,3200,425)"/>
<X-PRE-PROCESS cmd="set" data="sa-ring=%(1200,4600,425)"/>
<X-PRE-PROCESS cmd="set" data="tr-ring=%(2000,4000,450)"/>
<X-PRE-PROCESS cmd="set" data="uk-ring=%(400,200,400,450);%(400,2000,400,450)"/>
<X-PRE-PROCESS cmd="set" data="us-ring=%(2000,4000,440,480)"/>
<X-PRE-PROCESS cmd="set" data="bong-ring=v=-7;%(100,0,941.0,1477.0);v=-7;>=2;+=.1;%(1400,0,350,440)"/>
<X-PRE-PROCESS cmd="set" data="beep=%(1000,0,640)"/>
<X-PRE-PROCESS cmd="set" data="sit=%(274,0,913.8);%(274,0,1370.6);%(380,0,1776.7)"/>
<X-PRE-PROCESS cmd="set" data="df_us_ssn=(?!219099999|078051120)(?!666|000|9\d{2})\d{3}(?!00)\d{2}(?!0{4})\d{4}"/>
<X-PRE-PROCESS cmd="set" data="df_luhn=?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|6(?:011|5[0-9]{2})[0-9]{12}|(?:2131|1800|35\d{3})\d{11}"/>
<XX-PRE-PROCESS cmd="set" data="digits_dialed_filter=(($${df_luhn})|($${df_us_ssn}))"/>
<X-PRE-PROCESS cmd="set" data="default_provider=sip.ipcall.ee"/>
<X-PRE-PROCESS cmd="set" data="default_provider_username="/>
<X-PRE-PROCESS cmd="set" data="default_provider_password="/>
<X-PRE-PROCESS cmd="set" data="default_provider_from_domain=sip.ipcall.ee"/>
<X-PRE-PROCESS cmd="set" data="default_provider_register=true"/>
<X-PRE-PROCESS cmd="set" data="default_provider_contact=1004"/>
<X-PRE-PROCESS cmd="set" data="sip_tls_version=tlsv1,tlsv1.1,tlsv1.2"/>
<X-PRE-PROCESS cmd="set" data="sip_tls_ciphers=ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH"/>
<X-PRE-PROCESS cmd="set" data="internal_auth_calls=true"/>
<X-PRE-PROCESS cmd="set" data="internal_sip_port=5060"/>
<X-PRE-PROCESS cmd="set" data="internal_tls_port=5061"/>
<X-PRE-PROCESS cmd="set" data="internal_ssl_enable=false"/>
<X-PRE-PROCESS cmd="set" data="external_auth_calls=false"/>
<X-PRE-PROCESS cmd="set" data="external_sip_port=5080"/>
<X-PRE-PROCESS cmd="set" data="external_tls_port=5081"/>
<X-PRE-PROCESS cmd="set" data="external_ssl_enable=false"/>
<X-PRE-PROCESS cmd="set" data="rtp_video_max_bandwidth_in=3mb"/>
<X-PRE-PROCESS cmd="set" data="rtp_video_max_bandwidth_out=3mb"/>
<X-PRE-PROCESS cmd="set" data="suppress_cng=true"/>
<X-PRE-PROCESS cmd="set" data="rtp_liberal_dtmf=true"/>
<X-PRE-PROCESS cmd="set" data="video_mute_png=$${images_dir}/default-mute.png"/>
<X-PRE-PROCESS cmd="set" data="video_no_avatar_png=$${images_dir}/default-avatar.png"/>
<X-PRE-PROCESS cmd="include" data="PASSWORDS.xml"/>
</include>
---
apiVersion: v1
kind: ConfigMap
metadata:
name: freeswitch-directory
namespace: freeswitch
data:
1000.xml: |
<include>
<user id="1000">
<params>
<param name="password" value="$${1000_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1000"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1000"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1001.xml: |
<include>
<user id="1001">
<params>
<param name="password" value="$${1001_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1001"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1001"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1002.xml: |
<include>
<user id="1002">
<params>
<param name="password" value="$${1002_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1002"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1002"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1003.xml: |
<include>
<user id="1003">
<params>
<param name="password" value="$${1003_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1003"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value="Erki A"/>
<variable name="effective_caller_id_number" value="1003"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1004.xml: |
<include>
<user id="1004">
<params>
<param name="password" value="$${1004_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1004"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value="Erki A"/>
<variable name="effective_caller_id_number" value="1004"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1005.xml: |
<include>
<user id="1005">
<params>
<param name="password" value="$${1005_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1005"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1005"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1006.xml: |
<include>
<user id="1006">
<params>
<param name="password" value="$${1006_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1006"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1006"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1007.xml: |
<include>
<user id="1007">
<params>
<param name="password" value="$${1007_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1007"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1007"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1008.xml: |
<include>
<user id="1008">
<params>
<param name="password" value="$${1008_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1008"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1008"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1009.xml: |
<include>
<user id="1009">
<params>
<param name="password" value="$${1009_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1009"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1009"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>

@ -2,11 +2,11 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: asterisk
name: freeswitch
spec:
podSelector:
matchLabels:
app: asterisk
app: freeswitch
policyTypes:
- Ingress
- Egress
@ -32,14 +32,18 @@ spec:
cidr: 172.20.8.241/32 # Erki A
- from:
- ipBlock:
cidr: 195.222.16.36/32 # Elisa SIP
cidr: 212.47.211.10/32 # Elisa SIP
- from:
- ipBlock:
cidr: 195.222.16.38/32 # Elisa SIP
cidr: 212.47.211.10/32 # Elisa SIP
egress:
- to:
- ipBlock:
cidr: 195.222.16.36/32 # Elisa SIP
cidr: 212.47.211.10/32 # Elisa SIP
- to:
- ipBlock:
cidr: 195.222.16.38/32 # Elisa SIP
- to:
ports:
- port: 53
protocol: UDP

@ -62,7 +62,7 @@ spec:
serviceAccountName: local-path-provisioner-service-account
containers:
- name: local-path-provisioner
image: rancher/local-path-provisioner:v0.0.22
image: mirror.gcr.io/rancher/local-path-provisioner:v0.0.22
imagePullPolicy: IfNotPresent
command:
- local-path-provisioner
@ -151,7 +151,7 @@ data:
spec:
containers:
- name: helper-pod
image: busybox
image: mirror.gcr.io/library/busybox
imagePullPolicy: IfNotPresent

@ -1,10 +1,11 @@
---
apiVersion: codemowers.io/v1alpha1
kind: OIDCGWClient
apiVersion: codemowers.cloud/v1beta1
kind: OIDCClient
metadata:
name: whoami-oidc
namespace: whoami-oidc
spec:
displayName: Whoami (oidc-tester-app)
displayName: Whoami OIDC
uri: https://whoami-oidc.k-space.ee
redirectUris:
- https://whoami-oidc.k-space.ee/auth/callback
@ -16,7 +17,6 @@ spec:
availableScopes:
- openid
- profile
tokenEndpointAuthMethod: client_secret_post
pkce: false
---
apiVersion: apps/v1

@ -1,68 +1,11 @@
# Workflow
Most applications in our Kubernetes cluster are managed by ArgoCD.
Most notably operators are NOT managed by ArgoCD.
Adding to `applications/`: `kubectl apply -f newapp.yaml`
# Deployment
To deploy ArgoCD:
```bash
helm repo add argo-cd https://argoproj.github.io/argo-helm
kubectl create secret -n argocd generic argocd-secret # Initialize empty secret for sessions
helm template -n argocd --release-name k6 argo-cd/argo-cd --include-crds -f values.yaml > argocd.yml
kubectl apply -f argocd.yml -f application-extras.yml -n argocd
kubectl -n argocd rollout restart deployment/k6-argocd-redis
kubectl -n argocd rollout restart deployment/k6-argocd-repo-server
kubectl -n argocd rollout restart deployment/k6-argocd-server
kubectl -n argocd rollout restart deployment/k6-argocd-notifications-controller
kubectl -n argocd rollout restart statefulset/k6-argocd-application-controller
kubectl label -n argocd secret oidc-client-argocd-owner-secrets app.kubernetes.io/part-of=argocd
```
# Setting up Git secrets
Generate SSH key to access Gitea:
## Managing applications
Update apps (see TODO below):
```
ssh-keygen -t ecdsa -f id_ecdsa -C argocd.k-space.ee -P ''
kubectl -n argocd create secret generic gitea-kube \
--from-literal=type=git \
--from-literal=url=git@git.k-space.ee:k-space/kube \
--from-file=sshPrivateKey=id_ecdsa
kubectl -n argocd create secret generic gitea-kube-staging \
--from-literal=type=git \
--from-literal=url=git@git.k-space.ee:k-space/kube-staging \
--from-file=sshPrivateKey=id_ecdsa
kubectl -n argocd create secret generic gitea-kube-members \
--from-literal=type=git \
--from-literal=url=git@git.k-space.ee:k-space/kube-members \
--from-file=sshPrivateKey=id_ecdsa
kubectl -n argocd create secret generic gitea-members \
--from-literal=type=git \
--from-literal=url=git@git.k-space.ee:k-space/kube-members \
--from-file=sshPrivateKey=id_ecdsa
kubectl label -n argocd secret gitea-kube argocd.argoproj.io/secret-type=repository
kubectl label -n argocd secret gitea-kube-staging argocd.argoproj.io/secret-type=repository
kubectl label -n argocd secret gitea-kube-members argocd.argoproj.io/secret-type=repository
kubectl label -n argocd secret gitea-members argocd.argoproj.io/secret-type=repository
rm -fv id_ecdsa
```
Have Gitea admin reset password for user `argocd` and log in with that account.
Add the SSH key for user `argocd` from file `id_ecdsa.pub`.
Delete any other SSH keys associated with Gitea user `argocd`.
# Managing applications
To update apps:
```
for j in asterisk bind camtiler etherpad freescout gitea grafana hackerspace nextcloud nyancat rosdump traefik wiki wildduck woodpecker; do
for j in asterisk bind camtiler etherpad freescout gitea grafana hackerspace nextcloud nyancat rosdump traefik wiki wildduck; do
cat << EOF >> applications/$j.yaml
---
apiVersion: argoproj.io/v1alpha1
@ -70,6 +13,10 @@ kind: Application
metadata:
name: $j
namespace: argocd
annotations:
# Works with only Kustomize and Helm. Kustomize is easy, see https://github.com/argoproj-labs/argocd-image-updater/tree/master/manifests/base for an example.
argocd-image-updater.argoproj.io/image-list: TODO:^2 # semver 2.*.*
argocd-image-updater.argoproj.io/write-back-method: git
spec:
project: k-space.ee
source:
@ -88,3 +35,24 @@ EOF
done
find applications -name "*.yaml" -exec kubectl apply -n argocd -f {} \;
```
### Repository secrets
1. Generate keys locally with `ssh-keygen -f argo`
2. Add `argo.pub` in `git.k-space.ee/<your>/<repo>` → Settings → Deploy keys
3. Add `argo` (private key) at https://argocd.k-space.ee/settings/repos along with referenced repo.
## Argo Deployment
To deploy ArgoCD itself:
```bash
helm repo add argo-cd https://argoproj.github.io/argo-helm
kubectl create secret -n argocd generic argocd-secret # Empty secret for sessions
kubectl label -n argocd secret oidc-client-argocd-owner-secrets app.kubernetes.io/part-of=argocd
helm template -n argocd --release-name k6 argo-cd/argo-cd --include-crds -f values.yaml > argocd.yml
kubectl apply -f argocd.yml -f application-extras.yml -f redis.yaml -f monitoring.yml -n argocd
kubectl -n argocd rollout restart deployment/k6-argocd-redis deployment/k6-argocd-repo-server deployment/k6-argocd-server deployment/k6-argocd-notifications-controller statefulset/k6-argocd-application-controller
```
WARN: ArgoCD doesn't host its own redis, Dragonfly must be able to independently cold-start.

@ -9,6 +9,7 @@ spec:
uri: https://argocd.k-space.ee
redirectUris:
- https://argocd.k-space.ee/auth/callback
- http://localhost:8085/auth/callback
allowedGroups:
- k-space:kubernetes:admins
grantTypes:

@ -2,17 +2,17 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: postgres-clusters
name: argocd-image-updater
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: postgres-clusters
targetRevision: HEAD
repoURL: 'https://github.com/argoproj-labs/argocd-image-updater.git'
path: manifests/base
targetRevision: stable
destination:
server: 'https://kubernetes.default.svc'
namespace: postgres-clusters
namespace: argocd
syncPolicy:
automated:
prune: true

@ -1,15 +0,0 @@
# ---
# apiVersion: argoproj.io/v1alpha1
# kind: Application
# metadata:
# name: camtiler
# namespace: argocd
# spec:
# project: k-space.ee
# source:
# repoURL: 'git@git.k-space.ee:k-space/kube.git'
# path: camtiler
# targetRevision: HEAD
# destination:
# server: 'https://kubernetes.default.svc'
# namespace: camtiler

@ -0,0 +1,21 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cert-manager
namespace: argocd
spec:
project: k-space.ee
source:
# also depends on git@git.k-space.ee:secretspace/kube.git
repoURL: git@git.k-space.ee:k-space/kube.git
targetRevision: HEAD
path: cert-manager
destination:
server: 'https://kubernetes.default.svc'
namespace: cert-manager
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

@ -0,0 +1,23 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cnpg # aka in-cluster postgres
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: https://github.com/cloudnative-pg/cloudnative-pg
targetRevision: v1.25.1
path: releases
directory:
include: 'cnpg-1.25.1.yaml'
destination:
server: 'https://kubernetes.default.svc'
namespace: cnpg-system
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true # Resource is too big to fit in 262144 bytes allowed annotation size.

@ -0,0 +1,23 @@
# See [/dragonfly/README.md](/dragonfly-operator-system/README.md)
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: dragonfly # replaces redis and keydb
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: https://github.com/dragonflydb/dragonfly-operator
targetRevision: v1.1.11 # https://github.com/dragonflydb/dragonfly-operator/releases
path: manifests
directory:
include: 'dragonfly-operator.yaml'
destination:
server: 'https://kubernetes.default.svc'
namespace: dragonfly-operator-system
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

@ -0,0 +1,21 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: frigate
namespace: argocd
spec:
project: k-space.ee
source:
# also depends on git@git.k-space.ee:secretspace/kube.git
repoURL: git@git.k-space.ee:k-space/kube.git
targetRevision: HEAD
path: frigate
destination:
server: 'https://kubernetes.default.svc'
namespace: frigate
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

@ -0,0 +1,21 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: harbor
namespace: argocd
spec:
project: k-space.ee
source:
# also depends on git@git.k-space.ee:secretspace/kube.git
repoURL: git@git.k-space.ee:k-space/kube.git
targetRevision: HEAD
path: harbor
destination:
server: 'https://kubernetes.default.svc'
namespace: harbor-operator
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

@ -5,7 +5,7 @@ metadata:
name: kubernetes-dashboard
namespace: argocd
spec:
project: default
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: kubernetes-dashboard

@ -0,0 +1,21 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: metallb
namespace: argocd
spec:
project: k-space.ee
source:
# also depends on git@git.k-space.ee:secretspace/kube.git
repoURL: git@git.k-space.ee:k-space/kube.git
targetRevision: HEAD
path: metallb
destination:
server: 'https://kubernetes.default.svc'
namespace: metallb-system
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

@ -7,7 +7,7 @@ metadata:
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/members.git'
repoURL: 'git@git.k-space.ee:secretspace/members.git'
path: members
targetRevision: HEAD
destination:

@ -2,19 +2,17 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: logmower
name: passmower
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: logmower
path: passmower
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: logmower
namespace: passmower
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

@ -2,17 +2,17 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: asterisk
name: pgweb
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: asterisk
path: pgweb
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: asterisk
namespace: pgweb
syncPolicy:
automated:
prune: true

@ -0,0 +1,24 @@
# Note: Do not put any Prometheus instances or exporters in this namespace, instead have them in `monitoring` namespace
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: prometheus-operator
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: https://github.com/prometheus-operator/prometheus-operator.git
targetRevision: v0.82.0
path: .
kustomize:
namespace: prometheus-operator
destination:
server: 'https://kubernetes.default.svc'
namespace: prometheus-operator
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true # Resource is too big to fit in 262144 bytes allowed annotation size.

@ -0,0 +1,21 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: proxmox-csi
namespace: argocd
spec:
project: k-space.ee
source:
# also depends on git@git.k-space.ee:secretspace/kube.git
repoURL: git@git.k-space.ee:k-space/kube.git
targetRevision: HEAD
path: proxmox-csi
destination:
server: 'https://kubernetes.default.svc'
namespace: csi-proxmox
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

@ -1,20 +0,0 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: redis-clusters
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: redis-clusters
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: redis-clusters
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

@ -2,17 +2,17 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: reloader
name: ripe87
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: reloader
path: ripe87
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: reloader
namespace: ripe87
syncPolicy:
automated:
prune: true

@ -0,0 +1,20 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: secret-claim-operator
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: https://github.com/codemowers/operatorlib
path: samples/secret-claim-operator
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: secret-claim-operator
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

@ -0,0 +1,24 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: tigera-operator
namespace: argocd
spec:
project: k-space.ee
source:
# also depends on git@git.k-space.ee:secretspace/kube.git
repoURL: git@git.k-space.ee:k-space/kube.git
targetRevision: HEAD
path: tigera-operator
destination:
server: 'https://kubernetes.default.svc'
namespace: tigera-operator
# also houses calico-system and calico-apiserver
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true # Resource is too big to fit in 262144 bytes allowed annotation size.
- Force=true # `--force-conflicts`, according to https://docs.tigera.io/calico/latest/operations/upgrading/kubernetes-upgrade

@ -5,7 +5,7 @@ metadata:
name: whoami
namespace: argocd
spec:
project: default
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: whoami

@ -7,9 +7,10 @@ metadata:
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: woodpecker
# also depends on git@git.k-space.ee:secretspace/kube.git
repoURL: git@git.k-space.ee:k-space/kube.git
targetRevision: HEAD
path: woodpecker
destination:
server: 'https://kubernetes.default.svc'
namespace: woodpecker

2
argocd/deploy_key.pub Normal file

@ -0,0 +1,2 @@
# used for git.k-space: k-space/kube, secretspace/kube, secretspace/members
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOxYpFf85Vnxw7WNb/V5dtZT0PJ4VbBhdBNscDd8TVv/ argocd.k-space.ee

@ -14,13 +14,11 @@ externalRedis:
existingSecret: argocd-redis
server:
# HTTPS is implemented by Traefik
ingress:
enabled: true
annotations:
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
hosts:
- argocd.k-space.ee
tls:
@ -69,7 +67,12 @@ configs:
p, role:developers, applications, action/apps/Deployment/restart, default/camtiler, allow
p, role:developers, applications, sync, default/camtiler, allow
p, role:developers, applications, update, default/camtiler, allow
# argocd-image-updater
p, role:image-updater, applications, get, */*, allow
p, role:image-updater, applications, update, */*, allow
g, image-updater, role:image-updater
cm:
kustomize.buildOptions: --enable-helm
admin.enabled: "false"
resource.customizations: |
# https://github.com/argoproj/argo-cd/issues/1704

@ -36,7 +36,7 @@ which are internally exposed IP-s of the secondaries.
To configure TSIG secrets:
```
```sh
kubectl create secret generic -n bind bind-readonly-secret \
--from-file=readonly.key
kubectl create secret generic -n bind bind-readwrite-secret \
@ -45,9 +45,8 @@ kubectl create secret generic -n bind external-dns
kubectl -n bind delete secret tsig-secret
kubectl -n bind create secret generic tsig-secret \
--from-literal=TSIG_SECRET=$(cat readwrite.key | grep secret | cut -d '"' -f 2)
kubectl -n cert-manager delete secret tsig-secret
kubectl -n cert-manager create secret generic tsig-secret \
--from-literal=TSIG_SECRET=$(cat readwrite.key | grep secret | cut -d '"' -f 2)
# ^ same tsig-secret is in git.k-space.ee/secretspace/kube cert-manager
```
# Serving additional zones

@ -50,7 +50,7 @@ spec:
spec:
containers:
- name: bind-secondary
image: internetsystemsconsortium/bind9:9.20
image: mirror.gcr.io/internetsystemsconsortium/bind9:9.20
resources:
limits:
cpu: 100m

@ -17,7 +17,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.k8s.io/external-dns/external-dns:v0.14.2
image: registry.k8s.io/external-dns/external-dns:v0.16.1
resources:
limits:
cpu: 100m

@ -17,7 +17,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.k8s.io/external-dns/external-dns:v0.14.2
image: registry.k8s.io/external-dns/external-dns:v0.16.1
resources:
limits:
cpu: 100m
@ -29,10 +29,10 @@ spec:
- secretRef:
name: tsig-secret
args:
- --log-level=debug
- --events
- --registry=noop
- --provider=rfc2136
- --source=ingress
- --source=service
- --source=crd
- --domain-filter=k6.ee
@ -73,8 +73,3 @@ spec:
recordType: A
targets:
- 62.65.250.2
- dnsName: k-space.ee
recordTTL: 300
recordType: MX
targets:
- 10 mail.k-space.ee

@ -17,7 +17,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.k8s.io/external-dns/external-dns:v0.14.2
image: registry.k8s.io/external-dns/external-dns:v0.16.1
resources:
limits:
cpu: 100m

1
camtiler/.gitignore vendored

@ -1 +0,0 @@
deployments/

@ -1,87 +0,0 @@
# Cameras
Camtiler is the umbrella name for our homegrown camera surveilance system.
Everything besides [Camera](#camera)s is deployed with Kubernetes.
## Components
![cameras.graphviz.svg](cameras.graphviz.svg)
<!-- Manually rendered with https://dreampuf.github.io/GraphvizOnline
digraph G {
"camera-operator" -> "camera-motion-detect" [label="deploys"]
"camera-tiler" -> "cam.k-space.ee/tiled"
camera -> "camera-tiler"
camera -> "camera-motion-detect" -> mongo
"camera-motion-detect" -> "Minio S3"
"cam.k-space.ee" -> mongo [label="queries events", decorate=true]
mongo -> "camtiler-event-broker" [label="transforms object to add (signed) URL to S3", ]
"camtiler-event-broker" -> "cam.k-space.ee"
"Minio S3" -> "cam.k-space.ee" [label="using signed URL from camtiler-event-broker", decorate=true]
camera [label="📸 camera"]
}
-->
### 📸 Camera
Cameras are listed in [application.yml](application.yml) as `kind: Camera`.
Two types of camera hosts:
- GL-AR150 with [openwrt-camera-images](https://git.k-space.ee/k-space/openwrt-camera-image).
- [Doors](https://wiki.k-space.ee/e/en/hosting/doors) (Raspberry Pi) with mjpg-streamer.
### camera-tiler (cam.k-space.ee/tiled)
Out-of-bound, connects to cameras and streams to web browser.
One instance per every camera
#### camera-operator
Functionally the same as a kubernetes deployment for camera-tiler.
Operator/deployer for camera-tiler.
### camera-motion-detect
Connects to cameras, on motion writes events to Mongo and frames to S3.
### cam.k-space.ee (logmower)
Fetches motion-detect events from mongo. Fetches referenced images from S3 (minio).
#### camtiler-event-broker
MitM between motion-detect -> mongo. Appends S3 URLs to the response.
## Kubernetes commands
Apply changes:
```
kubectl apply -n camtiler \
-f application.yml \
-f minio.yml \
-f mongoexpress.yml \
-f mongodb-support.yml \
-f camera-tiler.yml \
-f logmower.yml \
-f ingress.yml \
-f network-policies.yml \
-f networkpolicy-base.yml
```
Deploy changes:
```
kubectl -n camtiler rollout restart deployment.apps/camtiler
```
Initialize secrets:
```
kubectl create secret generic -n camtiler mongodb-application-readwrite-password --from-literal="password=$(cat /dev/urandom | base64 | head -c 30)"
kubectl create secret generic -n camtiler mongodb-application-readonly-password --from-literal="password=$(cat /dev/urandom | base64 | head -c 30)"
kubectl create secret generic -n camtiler minio-secrets \
--from-literal="MINIO_ROOT_USER=root" \
--from-literal="MINIO_ROOT_PASSWORD=$(cat /dev/urandom | base64 | head -c 30)"
kubectl -n camtiler create secret generic camera-secrets \
--from-literal=username=... \
--from-literal=password=...
```
Restart all deployments:
```
for j in $(kubectl get deployments -n camtiler -o name); do kubectl rollout restart -n camtiler $j; done
```

@ -1,356 +0,0 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: MinioBucketClaim
metadata:
name: camtiler
spec:
capacity: 150Gi
class: dedicated
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: cams.k-space.ee
spec:
group: k-space.ee
names:
plural: cams
singular: cam
kind: Camera
shortNames:
- cam
scope: Namespaced
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
roi:
type: object
description: Region of interest for this camera
properties:
threshold:
type: integer
description: Percentage of pixels changed within ROI to
consider whole frame to have motion detected.
Defaults to 5.
enabled:
type: boolean
description: Whether motion detection is enabled for this
camera. Defaults to false.
left:
type: integer
description: Left boundary of ROI as
percentage of the width of a frame.
By default 0.
right:
type: integer
description: Right boundary of ROI as
percentage of the width of a frame.
By default 100.
top:
type: integer
description: Top boundary of ROI as
percentage of the height of a frame
By deafault 0.
bottom:
type: integer
description: Bottom boundary of ROI as
percentage of the height of a frame.
By default 100.
secretRef:
type: string
description: Secret that contains authentication credentials
target:
type: string
description: URL of the video feed stream
replicas:
type: integer
minimum: 1
maximum: 2
description: For highly available deployment set this to 2 or
higher. Make sure you also run Mongo and Minio in HA
configurations
required: ["target"]
required: ["spec"]
---
apiVersion: codemowers.io/v1alpha1
kind: ClusterOperator
metadata:
name: camera
spec:
resource:
group: k-space.ee
version: v1alpha1
plural: cams
secret:
enabled: false
services:
- apiVersion: v1
kind: Service
metadata:
name: foobar
labels:
app.kubernetes.io/name: foobar
component: camera-motion-detect
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: foobar
component: camera-motion-detect
ports:
- protocol: TCP
port: 80
targetPort: 5000
deployments:
- apiVersion: apps/v1
kind: Deployment
metadata:
name: camera-foobar
spec:
revisionHistoryLimit: 0
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
# Swap following two with replicas: 2
maxSurge: 1
maxUnavailable: 0
selector:
matchLabels:
app.kubernetes.io/name: foobar
template:
metadata:
labels:
app.kubernetes.io/name: foobar
component: camera-motion-detect
spec:
containers:
- name: camera-motion-detect
image: harbor.k-space.ee/k-space/camera-motion-detect:latest
starupProbe:
httpGet:
path: /healthz
port: 5000
initialDelaySeconds: 2
periodSeconds: 180
timeoutSeconds: 60
readinessProbe:
httpGet:
path: /readyz
port: 5000
initialDelaySeconds: 60
periodSeconds: 60
timeoutSeconds: 5
ports:
- containerPort: 5000
name: "http"
resources:
requests:
memory: "64Mi"
cpu: "200m"
limits:
memory: "256Mi"
cpu: "4000m"
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
command:
- /app/camdetect.py
- http://user@foobar.cam.k-space.ee:8080/?action=stream
env:
- name: SOURCE_NAME
value: foobar
- name: S3_BUCKET_NAME
valueFrom:
secretKeyRef:
name: miniobucket-camtiler-owner-secrets
key: BUCKET_NAME
- name: S3_ENDPOINT_URL
valueFrom:
secretKeyRef:
name: miniobucket-camtiler-owner-secrets
key: AWS_S3_ENDPOINT_URL
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: miniobucket-camtiler-owner-secrets
key: AWS_SECRET_ACCESS_KEY
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: miniobucket-camtiler-owner-secrets
key: AWS_ACCESS_KEY_ID
- name: BASIC_AUTH_PASSWORD
valueFrom:
secretKeyRef:
name: camera-secrets
key: password
- name: MONGO_URI
valueFrom:
secretKeyRef:
name: mongodb-application-readwrite
key: connectionString.standard
# Make sure 2+ pods of same camera are scheduled on different hosts
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- foobar
topologyKey: topology.kubernetes.io/zone
# Make sure camera deployments are spread over workers
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app.kubernetes.io/name: foobar
component: camera-motion-detect
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: cameras
spec:
groups:
- name: cameras
rules:
- alert: CameraLost
expr: rate(camtiler_frames_total{stage="downloaded"}[1m]) < 1
for: 2m
labels:
severity: warning
annotations:
summary: Camera feed stopped
- alert: CameraServerRoomMotion
expr: rate(camtiler_events_total{app_kubernetes_io_name="server-room"}[30m]) > 0
for: 1m
labels:
severity: warning
annotations:
summary: Motion was detected in server room
- alert: CameraSlowUploads
expr: camtiler_queue_frames{stage="upload"} > 10
for: 5m
labels:
severity: warning
annotations:
summary: Motion detect snapshots are piling up and
not getting uploaded to S3
- alert: CameraSlowProcessing
expr: camtiler_queue_frames{stage="download"} > 10
for: 5m
labels:
severity: warning
annotations:
summary: Motion detection processing pipeline is not keeping up
with incoming frames
- alert: CameraResourcesThrottled
expr: sum by (pod) (rate(container_cpu_cfs_throttled_periods_total{namespace="camtiler"}[1m])) > 0
for: 5m
labels:
severity: warning
annotations:
summary: CPU limits are bottleneck
---
# Referenced/linked by README.md
apiVersion: k-space.ee/v1alpha1
kind: Camera
metadata:
name: workshop
spec:
target: http://user@workshop.cam.k-space.ee:8080/?action=stream
secretRef: camera-secrets
replicas: 1
---
apiVersion: k-space.ee/v1alpha1
kind: Camera
metadata:
name: server-room
spec:
target: http://user@server-room.cam.k-space.ee:8080/?action=stream
secretRef: camera-secrets
replicas: 2
---
apiVersion: k-space.ee/v1alpha1
kind: Camera
metadata:
name: printer
spec:
target: http://user@printer.cam.k-space.ee:8080/?action=stream
secretRef: camera-secrets
replicas: 1
---
apiVersion: k-space.ee/v1alpha1
kind: Camera
metadata:
name: chaos
spec:
target: http://user@chaos.cam.k-space.ee:8080/?action=stream
secretRef: camera-secrets
replicas: 1
---
apiVersion: k-space.ee/v1alpha1
kind: Camera
metadata:
name: cyber
spec:
target: http://user@cyber.cam.k-space.ee:8080/?action=stream
secretRef: camera-secrets
replicas: 1
---
apiVersion: k-space.ee/v1alpha1
kind: Camera
metadata:
name: kitchen
spec:
target: http://user@kitchen.cam.k-space.ee:8080/?action=stream
secretRef: camera-secrets
replicas: 1
---
apiVersion: k-space.ee/v1alpha1
kind: Camera
metadata:
name: back-door
spec:
target: http://user@100.102.3.3:8080/?action=stream
secretRef: camera-secrets
replicas: 1
---
apiVersion: k-space.ee/v1alpha1
kind: Camera
metadata:
name: ground-door
spec:
target: http://user@100.102.3.1:8080/?action=stream
secretRef: camera-secrets
replicas: 1
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: camera-motion-detect
spec:
selector:
matchLabels:
component: camera-motion-detect
podMetricsEndpoints:
- port: http
podTargetLabels:
- app.kubernetes.io/name
- component

@ -1,98 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: camera-tiler
annotations:
keel.sh/policy: force
keel.sh/trigger: poll
spec:
revisionHistoryLimit: 0
replicas: 2
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: camtiler
component: camera-tiler
template:
metadata:
labels: *selectorLabels
spec:
serviceAccountName: camera-tiler
containers:
- name: camera-tiler
image: harbor.k-space.ee/k-space/camera-tiler:latest
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
ports:
- containerPort: 5001
name: "http"
resources:
requests:
memory: "200Mi"
cpu: "100m"
limits:
memory: "500Mi"
cpu: "4000m"
---
apiVersion: v1
kind: Service
metadata:
name: camera-tiler
labels:
app.kubernetes.io/name: camtiler
component: camera-tiler
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: camtiler
component: camera-tiler
ports:
- protocol: TCP
port: 5001
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: camera-tiler
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: camera-tiler
rules:
- apiGroups:
- ""
resources:
- services
verbs:
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: camera-tiler
subjects:
- kind: ServiceAccount
name: camera-tiler
apiGroup: ""
roleRef:
kind: Role
name: camera-tiler
apiGroup: ""
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: camtiler
spec:
selector:
matchLabels:
app.kubernetes.io/name: camtiler
component: camera-tiler
podMetricsEndpoints:
- port: http
podTargetLabels:
- app.kubernetes.io/name
- component

@ -1,131 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Generated by graphviz version 2.40.1 (20161225.0304)
-->
<!-- Title: G Pages: 1 -->
<svg width="658pt" height="387pt" viewBox="0.00 0.00 658.36 386.80" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 382.8)">
<title>G</title>
<polygon fill="#ffffff" stroke="transparent" points="-4,4 -4,-382.8 654.3562,-382.8 654.3562,4 -4,4"/>
<!-- camera&#45;operator -->
<g id="node1" class="node">
<title>camera-operator</title>
<ellipse fill="none" stroke="#000000" cx="356.22" cy="-360.8" rx="74.095" ry="18"/>
<text text-anchor="middle" x="356.22" y="-356.6" font-family="Times,serif" font-size="14.00" fill="#000000">camera-operator</text>
</g>
<!-- camera&#45;motion&#45;detect -->
<g id="node2" class="node">
<title>camera-motion-detect</title>
<ellipse fill="none" stroke="#000000" cx="356.22" cy="-272" rx="95.5221" ry="18"/>
<text text-anchor="middle" x="356.22" y="-267.8" font-family="Times,serif" font-size="14.00" fill="#000000">camera-motion-detect</text>
</g>
<!-- camera&#45;operator&#45;&gt;camera&#45;motion&#45;detect -->
<g id="edge1" class="edge">
<title>camera-operator-&gt;camera-motion-detect</title>
<path fill="none" stroke="#000000" d="M356.22,-342.4006C356.22,-330.2949 356.22,-314.2076 356.22,-300.4674"/>
<polygon fill="#000000" stroke="#000000" points="359.7201,-300.072 356.22,-290.072 352.7201,-300.0721 359.7201,-300.072"/>
<text text-anchor="middle" x="377.9949" y="-312.2" font-family="Times,serif" font-size="14.00" fill="#000000">deploys</text>
</g>
<!-- mongo -->
<g id="node6" class="node">
<title>mongo</title>
<ellipse fill="none" stroke="#000000" cx="292.22" cy="-199" rx="37.7256" ry="18"/>
<text text-anchor="middle" x="292.22" y="-194.8" font-family="Times,serif" font-size="14.00" fill="#000000">mongo</text>
</g>
<!-- camera&#45;motion&#45;detect&#45;&gt;mongo -->
<g id="edge5" class="edge">
<title>camera-motion-detect-&gt;mongo</title>
<path fill="none" stroke="#000000" d="M340.3997,-253.9551C332.3383,-244.76 322.4178,-233.4445 313.6783,-223.476"/>
<polygon fill="#000000" stroke="#000000" points="316.2049,-221.0485 306.9807,-215.8365 310.9413,-225.6632 316.2049,-221.0485"/>
</g>
<!-- Minio S3 -->
<g id="node7" class="node">
<title>Minio S3</title>
<ellipse fill="none" stroke="#000000" cx="396.22" cy="-145" rx="47.0129" ry="18"/>
<text text-anchor="middle" x="396.22" y="-140.8" font-family="Times,serif" font-size="14.00" fill="#000000">Minio S3</text>
</g>
<!-- camera&#45;motion&#45;detect&#45;&gt;Minio S3 -->
<g id="edge6" class="edge">
<title>camera-motion-detect-&gt;Minio S3</title>
<path fill="none" stroke="#000000" d="M361.951,-253.804C368.6045,-232.6791 379.6542,-197.5964 387.4031,-172.9935"/>
<polygon fill="#000000" stroke="#000000" points="390.8337,-173.7518 390.4996,-163.1622 384.157,-171.6489 390.8337,-173.7518"/>
</g>
<!-- camera&#45;tiler -->
<g id="node3" class="node">
<title>camera-tiler</title>
<ellipse fill="none" stroke="#000000" cx="527.22" cy="-272" rx="57.8558" ry="18"/>
<text text-anchor="middle" x="527.22" y="-267.8" font-family="Times,serif" font-size="14.00" fill="#000000">camera-tiler</text>
</g>
<!-- cam.k&#45;space.ee/tiled -->
<g id="node4" class="node">
<title>cam.k-space.ee/tiled</title>
<ellipse fill="none" stroke="#000000" cx="527.22" cy="-199" rx="89.7229" ry="18"/>
<text text-anchor="middle" x="527.22" y="-194.8" font-family="Times,serif" font-size="14.00" fill="#000000">cam.k-space.ee/tiled</text>
</g>
<!-- camera&#45;tiler&#45;&gt;cam.k&#45;space.ee/tiled -->
<g id="edge2" class="edge">
<title>camera-tiler-&gt;cam.k-space.ee/tiled</title>
<path fill="none" stroke="#000000" d="M527.22,-253.9551C527.22,-245.8828 527.22,-236.1764 527.22,-227.1817"/>
<polygon fill="#000000" stroke="#000000" points="530.7201,-227.0903 527.22,-217.0904 523.7201,-227.0904 530.7201,-227.0903"/>
</g>
<!-- camera -->
<g id="node5" class="node">
<title>camera</title>
<ellipse fill="none" stroke="#000000" cx="513.22" cy="-360.8" rx="51.565" ry="18"/>
<text text-anchor="middle" x="513.22" y="-356.6" font-family="Times,serif" font-size="14.00" fill="#000000">📸 camera</text>
</g>
<!-- camera&#45;&gt;camera&#45;motion&#45;detect -->
<g id="edge4" class="edge">
<title>camera-&gt;camera-motion-detect</title>
<path fill="none" stroke="#000000" d="M485.8726,-345.3322C460.8217,-331.1633 423.4609,-310.0318 395.271,-294.0875"/>
<polygon fill="#000000" stroke="#000000" points="396.8952,-290.9851 386.4679,-289.1084 393.449,-297.078 396.8952,-290.9851"/>
</g>
<!-- camera&#45;&gt;camera&#45;tiler -->
<g id="edge3" class="edge">
<title>camera-&gt;camera-tiler</title>
<path fill="none" stroke="#000000" d="M516.1208,-342.4006C518.0482,-330.175 520.6159,-313.8887 522.7961,-300.0599"/>
<polygon fill="#000000" stroke="#000000" points="526.2706,-300.4951 524.3708,-290.072 519.356,-299.4049 526.2706,-300.4951"/>
</g>
<!-- camtiler&#45;event&#45;broker -->
<g id="node9" class="node">
<title>camtiler-event-broker</title>
<ellipse fill="none" stroke="#000000" cx="95.22" cy="-91" rx="95.4404" ry="18"/>
<text text-anchor="middle" x="95.22" y="-86.8" font-family="Times,serif" font-size="14.00" fill="#000000">camtiler-event-broker</text>
</g>
<!-- mongo&#45;&gt;camtiler&#45;event&#45;broker -->
<g id="edge8" class="edge">
<title>mongo-&gt;camtiler-event-broker</title>
<path fill="none" stroke="#000000" d="M254.6316,-196.5601C185.4398,-191.6839 43.6101,-179.7471 28.9976,-163 18.4783,-150.9441 20.8204,-140.7526 28.9976,-127 32.2892,-121.4639 36.7631,-116.7259 41.8428,-112.6837"/>
<polygon fill="#000000" stroke="#000000" points="43.9975,-115.4493 50.2411,-106.8896 40.0224,-109.6875 43.9975,-115.4493"/>
<text text-anchor="middle" x="153.8312" y="-140.8" font-family="Times,serif" font-size="14.00" fill="#000000">transforms object to add (signed) URL to S3</text>
</g>
<!-- cam.k&#45;space.ee -->
<g id="node8" class="node">
<title>cam.k-space.ee</title>
<ellipse fill="none" stroke="#000000" cx="292.22" cy="-18" rx="70.0229" ry="18"/>
<text text-anchor="middle" x="292.22" y="-13.8" font-family="Times,serif" font-size="14.00" fill="#000000">cam.k-space.ee</text>
</g>
<!-- Minio S3&#45;&gt;cam.k&#45;space.ee -->
<g id="edge10" class="edge">
<title>Minio S3-&gt;cam.k-space.ee</title>
<path fill="none" stroke="#000000" d="M394.7596,-126.8896C392.7231,-111.3195 387.8537,-88.922 376.22,-73 366.0004,-59.0134 351.0573,-47.5978 336.5978,-38.8647"/>
<polygon fill="#000000" stroke="#000000" points="338.1215,-35.7041 327.7038,-33.7748 334.6446,-41.7796 338.1215,-35.7041"/>
<text text-anchor="middle" x="521.2881" y="-86.8" font-family="Times,serif" font-size="14.00" fill="#000000">using signed URL from camtiler-event-broker</text>
<polyline fill="none" stroke="#000000" points="650.3562,-82.6 392.22,-82.6 392.9753,-115.8309 "/>
</g>
<!-- cam.k&#45;space.ee&#45;&gt;mongo -->
<g id="edge7" class="edge">
<title>cam.k-space.ee-&gt;mongo</title>
<path fill="none" stroke="#000000" d="M292.22,-36.2125C292.22,-67.8476 292.22,-133.1569 292.22,-170.7273"/>
<polygon fill="#000000" stroke="#000000" points="288.7201,-170.9833 292.22,-180.9833 295.7201,-170.9833 288.7201,-170.9833"/>
<text text-anchor="middle" x="332.0647" y="-86.8" font-family="Times,serif" font-size="14.00" fill="#000000">queries events</text>
<polyline fill="none" stroke="#000000" points="371.9094,-82.6 292.22,-82.6 292.22,-91.3492 "/>
</g>
<!-- camtiler&#45;event&#45;broker&#45;&gt;cam.k&#45;space.ee -->
<g id="edge9" class="edge">
<title>camtiler-event-broker-&gt;cam.k-space.ee</title>
<path fill="none" stroke="#000000" d="M138.9406,-74.7989C169.6563,-63.417 210.7924,-48.1737 242.716,-36.3441"/>
<polygon fill="#000000" stroke="#000000" points="244.1451,-39.5472 252.3059,-32.7905 241.7128,-32.9833 244.1451,-39.5472"/>
</g>
</g>
</svg>

Before

(image error) Size: 7.8 KiB

@ -1,85 +0,0 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: sso
spec:
displayName: Cameras
uri: 'https://cam.k-space.ee/tiled'
allowedGroups:
- k-space:floor
- k-space:friends
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: camtiler
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: camtiler-sso@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
external-dns.alpha.kubernetes.io/hostname: cams.k-space.ee,cam.k-space.ee
spec:
rules:
- host: cam.k-space.ee
http:
paths:
- pathType: Prefix
path: "/tiled"
backend:
service:
name: camera-tiler
port:
number: 5001
- pathType: Prefix
path: "/m"
backend:
service:
name: camera-tiler
port:
number: 5001
- pathType: Prefix
path: "/events"
backend:
service:
name: logmower-eventsource
port:
number: 3002
- pathType: Prefix
path: "/"
backend:
service:
name: logmower-frontend
port:
number: 8080
tls:
- hosts:
- "*.k-space.ee"
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: cams-redirect
spec:
redirectRegex:
regex: ^https://cams.k-space.ee/(.*)$
replacement: https://cam.k-space.ee/$1
permanent: true
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: cams
spec:
entryPoints:
- websecure
routes:
- match: Host(`cams.k-space.ee`)
kind: Rule
middlewares:
- name: cams-redirect
services:
- kind: TraefikService
name: api@internal

@ -1,182 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logmower-eventsource
spec:
revisionHistoryLimit: 0
replicas: 2
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: camtiler
component: logmower-eventsource
template:
metadata:
labels: *selectorLabels
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- camtiler
- key: component
operator: In
values:
- logmower-eventsource
topologyKey: topology.kubernetes.io/zone
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
containers:
- name: logmower-eventsource
image: harbor.k-space.ee/k-space/logmower-eventsource
ports:
- containerPort: 3002
name: nodejs
env:
- name: MONGO_COLLECTION
value: eventlog
- name: MONGODB_HOST
valueFrom:
secretKeyRef:
name: mongodb-application-readonly
key: connectionString.standard
- name: BACKEND
value: 'camtiler'
- name: BACKEND_BROKER_URL
value: 'http://logmower-event-broker'
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logmower-event-broker
spec:
revisionHistoryLimit: 0
replicas: 2
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: camtiler
component: logmower-event-broker
template:
metadata:
labels: *selectorLabels
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- camtiler
- key: component
operator: In
values:
- logmower-event-broker
topologyKey: topology.kubernetes.io/zone
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
containers:
- name: logmower-event-broker
image: harbor.k-space.ee/k-space/camera-event-broker
ports:
- containerPort: 3000
env:
- name: MINIO_BUCKET
valueFrom:
secretKeyRef:
name: miniobucket-camtiler-owner-secrets
key: BUCKET_NAME
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: miniobucket-camtiler-owner-secrets
key: AWS_SECRET_ACCESS_KEY
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: miniobucket-camtiler-owner-secrets
key: AWS_ACCESS_KEY_ID
- name: MINIO_HOSTNAME
value: 'dedicated-5ee6428f-4cb5-4c2e-90b5-364668f515c2.minio-clusters.k-space.ee'
- name: MINIO_PORT
value: '443'
- name: MINIO_SCHEMA
value: 'https'
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logmower-frontend
spec:
revisionHistoryLimit: 0
replicas: 2
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: camtiler
component: logmower-frontend
template:
metadata:
labels: *selectorLabels
spec:
containers:
- name: logmower-frontend
image: harbor.k-space.ee/k-space/logmower-frontend
ports:
- containerPort: 8080
name: http
---
apiVersion: v1
kind: Service
metadata:
name: logmower-frontend
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: camtiler
component: logmower-frontend
ports:
- protocol: TCP
port: 8080
---
apiVersion: v1
kind: Service
metadata:
name: logmower-eventsource
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: camtiler
component: logmower-eventsource
ports:
- protocol: TCP
port: 3002
---
apiVersion: v1
kind: Service
metadata:
name: logmower-event-broker
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: camtiler
component: logmower-event-broker
ports:
- protocol: TCP
port: 80
targetPort: 3000

@ -1,110 +0,0 @@
---
apiVersion: mongodbcommunity.mongodb.com/v1
kind: MongoDBCommunity
metadata:
name: mongodb
spec:
agent:
logLevel: ERROR
maxLogFileDurationHours: 1
additionalMongodConfig:
systemLog:
quiet: true
members: 2
arbiters: 1
type: ReplicaSet
version: "6.0.3"
security:
authentication:
modes: ["SCRAM"]
users:
- name: readwrite
db: application
passwordSecretRef:
name: mongodb-application-readwrite-password
roles:
- name: readWrite
db: application
scramCredentialsSecretName: mongodb-application-readwrite
- name: readonly
db: application
passwordSecretRef:
name: mongodb-application-readonly-password
roles:
- name: read
db: application
scramCredentialsSecretName: mongodb-application-readonly
statefulSet:
spec:
logLevel: WARN
template:
spec:
containers:
- name: mongod
resources:
requests:
cpu: 100m
memory: 512Mi
limits:
cpu: 500m
memory: 1Gi
volumeMounts:
- name: journal-volume
mountPath: /data/journal
- name: mongodb-agent
resources:
requests:
cpu: 1m
memory: 100Mi
limits: {}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- mongodb-svc
topologyKey: topology.kubernetes.io/zone
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
volumeClaimTemplates:
- metadata:
name: logs-volume
labels:
usecase: logs
spec:
storageClassName: mongo
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
- metadata:
name: journal-volume
labels:
usecase: journal
spec:
storageClassName: mongo
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
- metadata:
name: data-volume
labels:
usecase: data
spec:
storageClassName: mongo
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi

@ -1,195 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: camera-motion-detect
spec:
podSelector:
matchLabels:
component: camera-motion-detect
policyTypes:
- Ingress
# - Egress # Something wrong with using minio-clusters as namespaceSelector.
ingress:
- from:
- podSelector:
matchLabels:
app.kubernetes.io/name: camtiler
component: camera-tiler
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: monitoring
podSelector:
matchLabels:
app.kubernetes.io/name: prometheus
egress:
- to:
- ipBlock:
# Permit access to cameras outside the cluster
cidr: 100.102.0.0/16
- to:
- podSelector:
matchLabels:
app: mongodb-svc
ports:
- port: 27017
- to:
- podSelector:
matchLabels:
app.kubernetes.io/name: minio
ports:
- port: 9000
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: camera-tiler
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: camtiler
component: camera-tiler
policyTypes:
- Ingress
- Egress
egress:
- to:
- podSelector:
matchLabels:
component: camera-motion-detect
ports:
- port: 5000
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: monitoring
podSelector:
matchLabels:
app.kubernetes.io/name: prometheus
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-eventsource
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: camtiler
component: logmower-eventsource
policyTypes:
- Ingress
# - Egress # Something wrong with using mongodb-svc as podSelector.
egress:
- to:
- podSelector:
matchLabels:
app: mongodb-svc
- podSelector:
matchLabels:
component: logmower-event-broker
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-event-broker
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: camtiler
component: logmower-event-broker
policyTypes:
- Ingress
- Egress
egress:
- to:
# Minio access via Traefik's public endpoint
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
ingress:
- from:
- podSelector:
matchLabels:
component: logmower-eventsource
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-frontend
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: camtiler
component: logmower-frontend
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
# Config drift: Added by ArgoCD
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: minio
spec:
egress:
- ports:
- port: http
protocol: TCP
to:
- podSelector:
matchLabels:
app.kubernetes.io/name: minio
ingress:
- from:
- podSelector: {}
ports:
- port: http
protocol: TCP
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: monitoring
podSelector:
matchLabels:
app.kubernetes.io/name: prometheus
podSelector:
matchLabels:
app.kubernetes.io/name: minio
policyTypes:
- Ingress
- Egress

@ -1 +0,0 @@
cert-manager.yaml

@ -7,7 +7,7 @@ Refer to the [Bind primary Ansible playbook](https://git.k-space.ee/k-space/ansi
[Bind namespace on Kubernetes cluster](https://git.k-space.ee/k-space/kube/src/branch/master/bind)
for more details
# For user
# For developer
Use `Certificate` CRD of cert-manager, refer to
[official documentation](https://cert-manager.io/docs/usage/certificate/).
@ -15,23 +15,14 @@ Use `Certificate` CRD of cert-manager, refer to
To find usage examples in this repository use
`grep -r -A10 "^kind: Certificate" .`
# For administrator
# Deployment
With ArgoCD. Render it locally:
Deployed with:
```
curl -L https://github.com/jetstack/cert-manager/releases/download/v1.15.1/cert-manager.yaml -O
kubectl apply -f cert-manager.yaml
```
To update the issuer configuration or TSIG secret:
```
kubectl apply -f default-issuer.yml
kubectl -n cert-manager create secret generic tsig-secret \
--from-literal=TSIG_SECRET=<secret>
```sh
kustomize build . --enable-helm
```
## Webhook timeout
Workaround for webhook timeout issue https://github.com/jetstack/cert-manager/issues/2602
It's not very clear why this is happening, deserves further investigation - presumably Calico related somehow:

@ -9,7 +9,7 @@ spec:
email: info@k-space.ee
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: example-issuer-account-key
name: example-issuer-account-key # auto-generated by cert-manager
solvers:
- dns01:
rfc2136:

@ -0,0 +1,21 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: cert-manager
# spec: https://kubectl.docs.kubernetes.io/references/kustomize/builtins/#_helmchartinflationgenerator_
helmCharts:
- includeCRDs: true
name: &name cert-manager
releaseName: *name
repo: https://charts.jetstack.io
valuesInline:
namespace: *name
global:
leaderElection:
namespace: *name
version: v1.18.1
resources:
- ssh://git@git.k-space.ee/secretspace/kube/cert-manager # secrets (.env): tsig-secret
- ./default.yaml

@ -1,8 +0,0 @@
# CloudNativePG
To deploy:
```
kubectl apply --server-side -f \
https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.23/releases/cnpg-1.23.2.yaml
```

@ -15,7 +15,7 @@ spec:
spec:
containers:
- name: netshoot
image: nicolaka/netshoot
image: mirror.gcr.io/nicolaka/netshoot:latest
command:
- /bin/bash
args:

@ -1,5 +1,4 @@
# Dragonfly Operator
Dragonfly operator is the preferred way to add Redis support to your application
as it is modern Go rewrite and it supports high availability.
@ -14,9 +13,7 @@ Note that vanilla Redis
[has changed it's licensing policy](https://redis.io/blog/redis-adopts-dual-source-available-licensing/)
# For users
Refer to [official documentation on usage](https://www.dragonflydb.io/docs/getting-started/kubernetes-operator#create-a-dragonfly-instance-with-replicas)
For example deployment see
[here](https://git.k-space.ee/k-space/kube/src/branch/master/passmower/dragonfly.yaml).
To find other instances in this repository use `grep -r "kind: Dragonfly"`
@ -26,13 +23,4 @@ To achieve high availabilllity use 2+ replicas with correctly configured
`topologySpreadConstraints`.
# For administrators
The operator was deployed with following snippet:
```
kubectl apply -f https://raw.githubusercontent.com/dragonflydb/dragonfly-operator/v1.1.6/manifests/dragonfly-operator.yaml
```
To upgrade refer to
[github.com/dragonflydb/dragonfly-operator](https://github.com/dragonflydb/dragonfly-operator/releases),
bump version and reapply
See [/argocd/applications/dragonfly.yaml](/argocd/applications/dragonfly.yaml)

@ -57,7 +57,7 @@ spec:
cpu: 100m
memory: 100Mi
- name: exporter
image: sepa/beats-exporter
image: mirror.gcr.io/sepa/beats-exporter:latest
args:
- -p=5066
ports:
@ -129,7 +129,7 @@ spec:
- name: filebeat-registry
mountPath: /usr/share/filebeat/data
- name: exporter
image: sepa/beats-exporter
image: mirror.gcr.io/sepa/beats-exporter:latest
args:
- -p=5066
ports:
@ -258,7 +258,7 @@ spec:
co.elastic.logs/enabled: 'false'
spec:
containers:
- name: kibana
- name: kibana
readinessProbe:
httpGet:
path: /app/home

@ -1,9 +1,8 @@
---
apiVersion: codemowers.io/v1alpha1
kind: OIDCGWMiddlewareClient
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: sso
namespace: etherpad
name: etherpad
spec:
displayName: Etherpad
uri: 'https://pad.k-space.ee/'
@ -29,7 +28,7 @@ spec:
spec:
containers:
- name: etherpad
image: etherpad/etherpad:2
image: mirror.gcr.io/etherpad/etherpad:2
securityContext:
# Etherpad writes session key during start
readOnlyRootFilesystem: false
@ -88,7 +87,6 @@ metadata:
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:

@ -13,6 +13,10 @@ Forwarding to personal eg. `@gmail.com` mailbox can be configured via
[Wildduck webmail](https://webmail.k-space.ee/account/profile)
> Whoops, looks like something went wrong — check logs in /storage/logs
The paid(!) OIDC plugin still requires creation of local account by an administrator. This probably means the OIDC user tried to log in before an account (with matching <username>@k-space.ee mail) existed in Freescout local users.
# For administrator
This application is managed by [ArgoCD](https://argocd.k-space.ee/applications/argocd/freescout)

@ -45,8 +45,7 @@ spec:
emptyDir: {}
initContainers:
- name: jq
image: >-
alpine/k8s:1.24.16@sha256:06f8942d87fa17b40795bb9a8eff029a9be3fc3c9bcc13d62071de4cc3324153
image: mirror.gcr.io/alpine/k8s:1.31.76@sha256:2a3fdd639c71c6cad69fbc8cac2467648855dac29961efec3b155466cc4fa730
command:
- /bin/bash
- '-c'
@ -81,7 +80,7 @@ spec:
imagePullPolicy: IfNotPresent
containers:
- name: mysql
image: mysql
image: mirror.gcr.io/library/mysql:latest
command:
- /bin/bash
- '-c'
@ -111,7 +110,6 @@ metadata:
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.middlewares: freescout-freescout@kubernetescrd
spec:

16
frigate/README.md Normal file

@ -0,0 +1,16 @@
# Frigate
## Deployment
With ArgoCD. Render it locally:
```sh
kustomize build . --enable-helm
```
## Cameras
<!-- linked to by https://wiki.k-space.ee/e/en/hosting/doors -->
- GL-AR150 with [openwrt-camera-images](https://git.k-space.ee/k-space/openwrt-camera-image) (USB cameras).
- [Doors](https://wiki.k-space.ee/e/en/hosting/doors) (Raspberry Pi) with mjpg-streamer (USB cameras).
- Amcrest 5MP Turret PoE Outdoor IP Camera with Mic/Audio, 98ft NightVision, 132° FOV, MicroSD (256GB) IP5M-T1179EW-AI-V3 white
Cameras are enumerated (with credentials) in secretspace.

10
frigate/auth.yml Normal file

@ -0,0 +1,10 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: frigate
spec:
displayName: Frigate
uri: 'https://frigate.k-space.ee/'
allowedGroups:
- k-space:legalmember

@ -0,0 +1,21 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: frigate
# spec: https://kubectl.docs.kubernetes.io/references/kustomize/builtins/#_helmchartinflationgenerator_
helmCharts:
- includeCRDs: true
name: &name frigate
releaseName: *name
repo: https://blakeblackshear.github.io/blakeshome-charts/
valuesFile: values.yaml
version: 7.8.0
resources:
- ssh://git@git.k-space.ee/secretspace/kube/frigate # secrets (.env): go2rtc-config, frigate-mqtt-secret, frigate-rtsp-secret
- ./auth.yml
- ./rabbitmq.yml
#- ./storage-class.yml
- ./storage.yml
- ./transcode.yml

12
frigate/rabbitmq.yml Normal file

@ -0,0 +1,12 @@
apiVersion: rabbitmq.com/v1beta1
kind: RabbitmqCluster
metadata:
name: frigate-mqtt
spec:
replicas: 3
persistence:
storageClassName: rabbitmq
storage: 10Gi
rabbitmq:
additionalPlugins:
- rabbitmq_mqtt

@ -1,27 +1,28 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: proxmox
parameters:
csi.storage.k8s.io/fstype: xfs
storage: ks-pvs
cache: none
ssd: "false"
name: frigate-config
provisioner: csi.proxmox.sinextra.dev
parameters:
cache: none
csi.storage.k8s.io/fstype: xfs
ssd: 'true'
storage: ks-pvs
reclaimPolicy: Retain
allowVolumeExpansion: true
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: proxmox-nas
parameters:
csi.storage.k8s.io/fstype: xfs
storage: ks-pvs-nas
cache: none
ssd: "false"
name: frigate-data
provisioner: csi.proxmox.sinextra.dev
allowVolumeExpansion: true
parameters:
cache: none
csi.storage.k8s.io/fstype: xfs
shared: 'true'
ssd: 'false'
storage: ks-pvs-nas
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer

32
frigate/storage.yml Normal file

@ -0,0 +1,32 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: frigate-storage
spec:
persistentVolumeReclaimPolicy: Retain
capacity:
storage: 1Ti
accessModes:
- ReadWriteMany
storageClassName: ""
nfs:
server: 172.21.0.7
path: /nas/k6/frigate
mountOptions:
- vers=4
- minorversion=1
- noac
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: frigate-storage
spec:
volumeName: frigate-storage
storageClassName: ""
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Ti

81
frigate/transcode.yml Normal file

@ -0,0 +1,81 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: go2rtc
labels:
app.kubernetes.io/name: go2rtc
app.kubernetes.io/instance: go2rtc
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: go2rtc
app.kubernetes.io/instance: go2rtc
template:
metadata:
labels:
app.kubernetes.io/name: go2rtc
app.kubernetes.io/instance: go2rtc
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- go2rtc
topologyKey: "kubernetes.io/hostname"
nodeSelector:
dedicated: nvr
tolerations:
- key: dedicated
operator: Equal
value: nvr
effect: NoSchedule
containers:
- name: go2rtc
image: alexxit/go2rtc
ports:
- name: rtsp
containerPort: 8554
protocol: TCP
- name: api
containerPort: 1984
protocol: TCP
volumeMounts:
- mountPath: /config/go2rtc.yaml
subPath: config.yml
name: config
resources:
limits:
nvidia.com/gpu: 1
volumes:
- name: config
secret:
secretName: go2rtc-config
items:
- key: config.yml
path: config.yml
---
apiVersion: v1
kind: Service
metadata:
name: go2rtc
labels:
app.kubernetes.io/name: go2rtc
app.kubernetes.io/instance: go2rtc
spec:
type: ClusterIP
ipFamilyPolicy: SingleStack
ports:
- name: rtsp
port: 8554
protocol: TCP
targetPort: rtsp
selector:
app.kubernetes.io/name: go2rtc
app.kubernetes.io/instance: go2rtc

141
frigate/values.yaml Normal file

@ -0,0 +1,141 @@
envFromSecrets:
# secrets are required before `helm install`
- frigate-rtsp-credentials
- frigate-mqtt-credentials
coral:
# -- enables the use of a Coral device
enabled: true
# -- path on the host to which to mount the Coral device
hostPath: /dev/bus/usb
# -- amount of shared memory to use for caching
shmSize: 4Gi
# -- use memory for tmpfs (mounted to /tmp)
tmpfs:
enabled: true
sizeLimit: 4Gi
# -- frigate configuration - see [Docs](https://docs.frigate.video/configuration/index) for more info
config: |
mqtt:
host: frigate-mqtt
port: 1883
topic_prefix: frigate
client_id: frigate
user: '{FRIGATE_MQTT_USERNAME}'
password: '{FRIGATE_MQTT_PASSWORD}'
stats_interval: 60
detectors:
coral:
type: edgetpu
device: usb
#model:
# width: 300
# height: 300
# input_tensor: nhwc
# input_pixel_format: bgr
# path: /openvino-model/ssdlite_mobilenet_v2.xml
# labelmap_path: /openvino-model/coco_91cl_bkgr.txt
record:
enabled: True
retain:
days: 3
mode: motion
events:
retain:
default: 30
mode: motion
# go2rtc (IPs, creds) each camera defined in secretspace/kube:frigate/go2rtc-config.yml
cameras:
server_room:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/server_room
roles:
- detect
- rtmp
- record
chaos:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/chaos
roles:
- detect
- rtmp
- record
cyber:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/cyber
roles:
- detect
- rtmp
- record
workshop:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/workshop
roles:
- detect
- rtmp
- record
ground_door:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/ground_door
roles:
- detect
- rtmp
- record
back_door:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/back_door
roles:
- detect
- rtmp
- record
ingress:
enabled: true
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.middlewares: frigate-frigate@kubernetescrd
hosts:
- host: frigate.k-space.ee
paths:
- path: '/'
portName: http
tls:
- hosts:
- "*.k-space.ee"
persistence:
config:
# WIP :)
enabled: false
storageClass: "null"
accessMode: ReadWriteOnce
size: 1000Mi
skipuninstall: false
media:
enabled: true
existingClaim: "frigate-storage"
skipuninstall: true
# Force application to run on nvr node
nodeSelector:
dedicated: nvr
tolerations:
- key: dedicated
operator: Equal
value: nvr
effect: NoSchedule

@ -7,3 +7,5 @@ Should ArgoCD be down manifests here can be applied with:
```
kubectl apply -n gitea -f application.yaml
```
Gitea DOES NOT go through Traefik. It has its own IP because ssh :22 would conflict with kube worker ssh. On its own IP, at the moment it doesn't flirt with Traefik — also has its own certificate.

@ -8,10 +8,13 @@ spec:
dnsNames:
- git.k-space.ee
issuerRef:
group: cert-manager.io
kind: ClusterIssuer
name: default
secretName: git-tls
revisionHistoryLimit: 1
# Gitea DOES NOT go through Traefik. It has its own IP because ssh :22 would conflict with kube worker ssh. On its own IP, at the moment it doesn't flirt with Traefik — also has its own certificate.
---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
@ -53,6 +56,7 @@ spec:
availableScopes:
- openid
- profile
overrideIncomingScopes: true
pkce: false
secretRefreshPod:
apiVersion: v1
@ -65,7 +69,7 @@ spec:
emptyDir: {}
initContainers:
- name: jq
image: alpine/k8s:1.24.16@sha256:06f8942d87fa17b40795bb9a8eff029a9be3fc3c9bcc13d62071de4cc3324153
image: mirror.gcr.io/alpine/k8s:1.31.76@sha256:2a3fdd639c71c6cad69fbc8cac2467648855dac29961efec3b155466cc4fa730
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /tmp
@ -79,7 +83,7 @@ spec:
- jq '{"strategyKey":"OpenID","config":{"Provider":"openidConnect","ClientID":$ENV.OIDC_CLIENT_ID,"ClientSecret":$ENV.OIDC_CLIENT_SECRET,"OpenIDConnectAutoDiscoveryURL":"https://auth.k-space.ee/.well-known/openid-configuration","CustomURLMapping":null,"IconURL":"","Scopes":null,"RequiredClaimName":"","RequiredClaimValue":"","GroupClaimName":"","AdminGroup":"","GroupTeamMap":"","GroupTeamMapRemoval":false,"RestrictedGroup":""}} | "UPDATE login_source SET cfg=\(.config|tostring|@sh) WHERE name=\(.strategyKey|tostring|@sh) LIMIT 1"' -n -r > /tmp/update.sql
containers:
- name: mysql
image: mysql
image: mirror.gcr.io/library/mysql:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /tmp
@ -121,7 +125,7 @@ spec:
runAsNonRoot: true
containers:
- name: gitea
image: gitea/gitea:1.22.1-rootless
image: docker.gitea.com/gitea:1.23.7-rootless
imagePullPolicy: IfNotPresent
securityContext:
readOnlyRootFilesystem: true
@ -170,6 +174,11 @@ spec:
value: "false"
- name: GITEA__SECURITY__INSTALL_LOCK
value: "true"
# Disable bypassing (disabled) OIDC account. Password-based app tokens remain enabled.
- name: GITEA__SERVICE__ENABLE_PASSWORD_SIGNIN_FORM
value: "false"
- name: GITEA__SERVICE__ENABLE_PASSKEY_AUTHENTICATION
value: "false"
- name: GITEA__SERVICE__REGISTER_EMAIL_CONFIRM
value: "true"
- name: GITEA__SERVICE__DISABLE_REGISTRATION

@ -18,6 +18,7 @@ spec:
availableScopes:
- openid
- profile
- groups
tokenEndpointAuthMethod: none
---
apiVersion: v1
@ -49,14 +50,17 @@ data:
root_url = https://%(domain)s/
[auth]
oauth_allow_insecure_email_lookup=true
[auth.basic]
enabled = false
[auth.generic_oauth]
name = OAuth
icon = signin
enabled = true
empty_scopes = false
scopes = openid profile groups
allow_sign_up = true
use_pkce = true
role_attribute_path = contains(groups[*], 'k-space:kubernetes:admins') && 'Admin' || 'Viewer'
role_attribute_path = contains(groups[*], 'k-space:kubernetes:admins') && 'Admin' || contains(groups[*], 'k-space:floor') && 'Editor' || Viewer
allow_assign_grafana_admin = true
[security]
disable_initial_admin_creation = true
---
@ -81,7 +85,7 @@ spec:
fsGroup: 472
containers:
- name: grafana
image: grafana/grafana:11.1.0
image: mirror.gcr.io/grafana/grafana:11.6.0
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
@ -199,7 +203,6 @@ metadata:
name: grafana
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
@ -216,11 +219,3 @@ spec:
tls:
- hosts:
- "*.k-space.ee"
---
apiVersion: codemowers.cloud/v1beta1
kind: MysqlDatabaseClaim
metadata:
name: grafana
spec:
capacity: 1Gi
class: shared

@ -82,7 +82,6 @@ metadata:
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
@ -110,57 +109,3 @@ spec:
app.kubernetes.io/name: doorboy-proxy
podMetricsEndpoints:
- port: http
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kdoorpi
spec:
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: kdoorpi
template:
metadata:
labels: *selectorLabels
spec:
containers:
- name: kdoorpi
image: harbor.k-space.ee/k-space/kdoorpi:latest
env:
- name: KDOORPI_API_ALLOWED
value: https://doorboy-proxy.k-space.ee/allowed
- name: KDOORPI_API_LONGPOLL
value: https://doorboy-proxy.k-space.ee/longpoll
- name: KDOORPI_API_SWIPE
value: http://172.21.99.98/swipe
- name: KDOORPI_DOOR
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: KDOORPI_API_KEY
valueFrom:
secretKeyRef:
name: doorboy-api
key: DOORBOY_SECRET
- name: KDOORPI_UID_SALT
valueFrom:
secretKeyRef:
name: doorboy-uid-hash-salt
key: KDOORPI_UID_SALT
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
nodeSelector:
dedicated: door
tolerations:
- key: dedicated
operator: Equal
value: door
effect: NoSchedule
- key: arch
operator: Equal
value: arm64
effect: NoSchedule

@ -2,38 +2,27 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: goredirect
namespace: hackerspace
spec:
replicas: 2
revisionHistoryLimit: 0
selector:
matchLabels:
app.kubernetes.io/name: goredirect
app: goredirect
template:
metadata:
labels:
app.kubernetes.io/name: goredirect
app: goredirect
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- goredirect
topologyKey: topology.kubernetes.io/zone
weight: 100
containers:
- image: harbor.k-space.ee/k-space/goredirect:latest
imagePullPolicy: Always
imagePullPolicy: Always
env:
- name: GOREDIRECT_NOT_FOUND
value: https://inventory.k-space.ee/m/inventory/add-slug/%s
value: https://inventory.k-space.ee/m/inventory/add-by-slug/%s
- name: GOREDIRECT_FOUND
value: https://inventory.k-space.ee/m/inventory/%s/view
- name: GOREDIRECT_NOPATH
value: https://inventory.k-space.ee/m/inventory
- name: MONGO_URI
valueFrom:
secretKeyRef:
@ -42,7 +31,6 @@ spec:
name: goredirect
ports:
- containerPort: 8080
name: http
protocol: TCP
resources:
limits:
@ -60,17 +48,35 @@ apiVersion: v1
kind: Service
metadata:
name: goredirect
annotations:
external-dns.alpha.kubernetes.io/hostname: k6.ee
metallb.universe.tf/address-pool: elisa
spec:
ports:
- name: http
protocol: TCP
port: 80
targetPort: 8080
nodePort: 32120
type: ClusterIP
selector:
app.kubernetes.io/name: goredirect
type: LoadBalancer
externalTrafficPolicy: Local
app: goredirect
ports:
- protocol: TCP
port: 8080
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: goredirect
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
# external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
# external-dns.alpha.kubernetes.io/hostname: k6.ee
spec:
rules:
- host: k6.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: goredirect
port:
number: 8080
tls:
- hosts:
- "k6.ee"

@ -39,5 +39,5 @@ metadata:
name: inventory-external
namespace: hackerspace
spec:
capacity: 1Gi
capacity: 10Gi
class: external

@ -2,18 +2,19 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: inventory
namespace: hackerspace
name: inventory-app
labels:
app: signs-webpage
spec:
replicas: 1
revisionHistoryLimit: 0
selector:
matchLabels:
app.kubernetes.io/name: inventory
app: inventory-app
template:
metadata:
labels:
app.kubernetes.io/name: inventory
app: inventory-app
spec:
containers:
- image: harbor.k-space.ee/k-space/inventory-app:latest
@ -25,6 +26,8 @@ spec:
value: "1"
- name: INVENTORY_ASSETS_BASE_URL
value: https://external.minio-clusters.k-space.ee/hackerspace-701d9303-0f27-4829-a2be-b1084021ad91/
- name: MACADDRESS_OUTLINK_BASEURL
value: https://grafana.k-space.ee/d/ddwyidbtbc16oa/ip-usage?orgId=1&from=now-2y&to=now&timezone=browser&var-Filters=mac%7C%3D%7C
- name: OIDC_USERS_NAMESPACE
value: passmower
- name: SECRET_KEY
@ -54,7 +57,7 @@ spec:
name: oidc-client-inventory-app-owner-secrets
- secretRef:
name: inventory-mongodb
name: inventory
name: inventory-app
ports:
- containerPort: 5000
name: http
@ -78,8 +81,7 @@ spec:
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
serviceAccount: inventory
serviceAccountName: inventory
serviceAccountName: inventory-svcacc
terminationGracePeriodSeconds: 30
volumes:
- name: tmp
@ -88,9 +90,8 @@ apiVersion: v1
kind: Service
metadata:
name: inventory-app
labels:
app: inventory-app
spec:
type: ClusterIP
selector:
app: inventory-app
ports:
@ -102,12 +103,11 @@ kind: Ingress
metadata:
name: inventory-app
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
external-dns.alpha.kubernetes.io/hostname: members.k-space.ee,inventory.k-space.ee
external-dns.alpha.kubernetes.io/hostname: inventory.k-space.ee,members.k-space.ee
spec:
ingressClassName: shared
rules:
- host: inventory.k-space.ee
http:
@ -133,19 +133,22 @@ spec:
- 'https://inventory.k-space.ee/login-callback'
grantTypes:
- 'authorization_code'
- 'refresh_token'
responseTypes:
- 'code'
availableScopes:
- 'openid'
- 'profile'
- 'groups'
- 'offline_access'
tokenEndpointAuthMethod: 'client_secret_basic'
pkce: false
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: inventory
name: inventory-role
namespace: hackerspace
rules:
- verbs:
- get
@ -160,17 +163,18 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: inventory
name: inventory-roles
namespace: hackerspace
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: inventory
name: inventory-role
subjects:
- kind: ServiceAccount
name: inventory
name: inventory-svcacc
namespace: hackerspace
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: inventory
name: inventory-svcacc

1
harbor/.gitignore vendored

@ -1 +0,0 @@
harbor.yml

@ -5,12 +5,14 @@ kubectl create namespace harbor-operator
kubectl -n harbor-operator create secret generic harbor-minio-credentials --from-literal REGISTRY_STORAGE_S3_ACCESSKEY=...--from-literal=REGISTRY_STORAGE_S3_SECRETKEY=...
kubectl -n harbor-operator create secret generic harbor-postgres-password --from-literal password=...
helm repo add harbor https://helm.goharbor.io
```
# Deployment
With ArgoCD. Render it locally:
helm template -n harbor-operator --release-name harbor harbor/harbor --include-crds -f harbor/values.yaml > harbor/application.yml
kubectl apply -n harbor-operator -f harbor/application.yml -f harbor/application-extras.yml
```sh
kustomize build . --enable-helm
```
After deployment login with Harbor admin credentials and configure OIDC:
After initial deployment login with Harbor admin credentials and configure OIDC:
![OIDC configuration](harbor-oidc-config.png)

@ -51,7 +51,7 @@ spec:
passwordFromSecret:
key: REDIS_PASSWORD
name: dragonfly-auth
replicas: 3
replicas: 1
resources:
limits:
memory: 5Gi
memory: 5Gi

File diff suppressed because it is too large Load Diff

16
harbor/kustomization.yaml Normal file

@ -0,0 +1,16 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: harbor-operator
# spec: https://kubectl.docs.kubernetes.io/references/kustomize/builtins/#_helmchartinflationgenerator_
helmCharts:
- includeCRDs: true
name: &name harbor
releaseName: *name
repo: https://helm.goharbor.io
valuesFile: values.yaml
version: 1.17.1
resources:
- ./application-extras.yml

@ -1,21 +1,19 @@
expose:
type: loadBalancer
type: ingress
tls:
# harbor helm needs PR to use non-core-host-named tls (wildcard), like *.k-space.ee; currently it gets its own cert (harbor.k-space.ee)
enabled: true
certSource: secret
secret:
secretName: "harbor-ingress"
loadBalancer:
name: harbor
ports:
httpPort: 80
httpsPort: 443
secretName: wildcard-tls
ingress:
hosts:
core: harbor.k-space.ee
annotations:
cert-manager.io/cluster-issuer: default
external-dns.alpha.kubernetes.io/hostname: harbor.k-space.ee
metallb.universe.tf/address-pool: elisa
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
labels: {}
sourceRanges: []
externalURL: https://harbor.k-space.ee
@ -48,7 +46,7 @@ persistence:
# Refer to
# https://github.com/distribution/distribution/blob/main/docs/configuration.md#redirect
# for the detail.
disableredirect: true
disableredirect: false
type: s3
s3:
# Set an existing secret for S3 accesskey and secretkey
@ -70,7 +68,6 @@ persistence:
#multipartcopychunksize: "33554432"
#multipartcopymaxconcurrency: 100
#multipartcopythresholdsize: "33554432"
# The initial password of Harbor admin. Change it from portal after launching Harbor
# or give an existing secret for it
@ -122,6 +119,8 @@ metrics:
trivy:
enabled: false
notary:
enabled: false
database:
type: "external"
@ -143,49 +142,3 @@ redis:
addr: "dragonfly:6379"
username: ""
password: "MvYcuU0RaIu1SX7fY1m1JrgLUSaZJjge"
nginx:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
portal:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
core:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
jobservice:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
registry:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule

Some files were not shown because too many files have changed in this diff Show More