Compare commits

...

245 Commits

Author SHA1 Message Date
e4dfde9562 argo docs 2 2024-12-15 06:34:47 +02:00
a82193f059 add argocd-image-updater 2024-12-15 06:28:42 +02:00
68a75b8389 migrate OIDC codemowers.io/v1alpha1 to v1beta1 2024-12-15 05:39:41 +02:00
5368fe90eb argo: add localhost callback for CLI login 2024-12-15 05:39:41 +02:00
cded6fde3f fixup argo docs 2024-12-15 05:39:41 +02:00
402ff86fde grafana: disable non-oauth login 2024-12-15 01:46:22 +02:00
272f60ab73 monitoring: mikrotik-exporter fix 2024-11-22 08:16:12 +02:00
9bcad2481b monitoring: Update node-exporter 2024-11-22 05:59:34 +02:00
c04a7b7f67 monitoring: Update mikrotik-exporter 2024-11-22 05:59:08 +02:00
c23fa07c5e monitoring: Update mikrotik-exporter 2024-11-19 15:48:31 +02:00
c1822888ec dont compile discourse assets 2024-10-25 14:44:27 +03:00
e26cac6d86 add discourse 2024-10-25 14:35:20 +03:00
d7ba4bc90e upgrade cnpg 2024-10-25 14:03:50 +03:00
da4df6c21d frigate: move storage to dedicated nfs share and offload transcoding to separate go2rtc deployment 2024-10-19 13:51:13 +03:00
2964034cd3 fix rosdump scheduling 2024-10-18 18:45:42 +03:00
ae525380b1 fix gitea oidc reg 2024-10-18 18:44:27 +03:00
4b9c3ad394 monitoring: Temporarily disable monitoring of core switches 2024-10-15 10:07:28 +03:00
dbebb39749 gitea: Bump version 2024-10-02 08:15:20 +03:00
6f15e45402 freeswitch: fix network policy 2024-10-01 22:32:16 +03:00
36bf431259 freeswitch: fix network policy 2024-10-01 20:27:08 +03:00
c14a313c57 frigate: enable recording and use openvino 2024-09-29 23:06:41 +03:00
15a2fd9375 add frigate 2024-09-29 21:34:31 +03:00
5bd6cf2317 freeswitch: add gitignore 2024-09-29 19:05:42 +03:00
407f691152 add freeswitch 2024-09-29 19:05:42 +03:00
e931f490c2 asterisk: update network policy 2024-09-29 19:05:42 +03:00
b96e8d16a6 expose harbor via traefik 2024-09-29 19:05:42 +03:00
15d4d44be7 expose traefik via ingress 2024-09-29 19:05:42 +03:00
52ce6eab0a expose harbor via traefik 2024-09-29 19:04:51 +03:00
e89d045f38 goredirect: add nopath env var 2024-09-13 21:54:49 +03:00
7e70315514 monitoring: Fix snmp-exporter 2024-09-12 22:15:10 +03:00
af5a048bcd replace ups 2024-09-12 21:54:46 +03:00
0005219f81 monitoring: Fix mikrotik-exporter formatting 2024-09-12 21:48:43 +03:00
813bb32e48 monitoring: Update UPS-es 2024-09-12 21:47:20 +03:00
0efae7baf9 unschedule harbor from storage nodes 2024-09-12 19:48:51 +03:00
be90b4e266 monitoring: Update mikrotik-exporter 2024-09-09 22:19:46 +03:00
999d17c384 rosdump: Use codemowers/git image 2024-09-09 08:45:21 +03:00
bacef8d438 remove logmower 2024-09-08 23:54:32 +03:00
60d1ba9b18 monitoring: Bump mikrotik-exporter again 2024-09-06 12:10:45 +03:00
dcb80e6638 monitoring: Bump mikrotik-exporter 2024-09-06 11:55:49 +03:00
95e0f97db2 grafana: Specify OIDC scopes on Grafana side 2024-09-05 09:32:34 +03:00
f5a7b44ae6 grafana: Add groups OIDC scope 2024-09-05 09:29:16 +03:00
be7e1d9459 grafana: Assign editor role for hackerspace members 2024-09-05 09:23:41 +03:00
cd807ebcde grafana: Allow OIDC assignment to admin role 2024-09-05 09:04:02 +03:00
eaac7f61a7 monitoring: Pin specific mikrotik-exporter image 2024-09-04 23:29:37 +03:00
a0d5a585e4 add and configure calico ippool 2024-09-04 23:12:35 +03:00
1f8f288f95 monitoring: Update Mikrotik exporter 2024-09-04 22:33:15 +03:00
9de1881647 monitoring: Enable Prometheus admin API 2024-09-04 22:28:01 +03:00
28904cdd63 make calico use ipip encapsulation 2024-09-04 22:27:36 +03:00
0df188db36 monitoring: README syntax fix 2024-09-04 07:12:56 +03:00
a42b79b5ac monitoring: Add doc.crds.dev ref 2024-09-04 07:12:21 +03:00
89875a66f8 update passmower config 2024-08-29 14:38:44 +03:00
927366a3d5 inventory: add groups scope 2024-08-28 16:15:07 +03:00
29212d7f14 passmower: get charts from ghcr 2024-08-27 15:58:05 +03:00
1d8528b312 argocd: Move to DragonflyDB and add resource customizations 2024-08-27 12:41:24 +03:00
566beecb6a Create dummy/stub entries in auth.k-space.ee 2024-08-26 23:51:04 +03:00
4c52ca88ef add proxmox-nas storage class 2024-08-25 11:34:31 +03:00
b5fceb0f35 Update storage classes 2024-08-25 09:26:57 +03:00
c609b1df04 wildduck: Restore MongoDB 2024-08-25 09:26:27 +03:00
22d65664b2 whoami: Set higher port 2024-08-25 00:25:49 +03:00
59db08e891 whoami: Fix memory limit 2024-08-25 00:22:54 +03:00
d8402bdec5 whoami: Drop privileges 2024-08-25 00:21:24 +03:00
a71bd5de37 whoami: Add resource limits 2024-08-25 00:17:34 +03:00
ce9891046f wildduck: Add resource limits 2024-08-25 00:12:25 +03:00
fea3e8ce66 nextcloud: Fix Dragonfly topology spread constraints 2024-08-25 00:02:51 +03:00
bfeba4017b monitoring: Add revisionHistoryLimit: 0 2024-08-24 23:58:07 +03:00
4b00d876ad nextcloud: Set resource limits 2024-08-24 23:31:22 +03:00
d1e8d8e356 bind: Fix resource limits 2024-08-24 23:28:28 +03:00
22c6fe1979 bind: Add resource limits 2024-08-24 23:25:40 +03:00
f53b31e030 bind: Use topology spread constraint instead of anti affinity rules 2024-08-24 23:22:34 +03:00
cb41b739cc passmower: Fix Dragonfly topology spread constraints 2024-08-24 23:05:13 +03:00
91af1911c4 rm users, now at k-space/members 2024-08-24 22:53:35 +03:00
4532eccd6d proxy image artefacts through harbor 2024-08-24 19:36:10 +03:00
d4913aacbf add netshoot container to debug network issues 2024-08-24 19:23:35 +03:00
abe022eecc update argo readme 2024-08-24 19:23:17 +03:00
4bcb0a8856 fix members argo app 2024-08-24 19:19:27 +03:00
b849ac340e fix members argo app 2024-08-24 19:12:31 +03:00
b922412417 fix members argo app 2024-08-24 19:10:14 +03:00
2661fe211e manage members (oidcusers) with argocd 2024-08-24 19:05:53 +03:00
a9406748c5 manage members (oidcusers) with argocd 2024-08-24 19:01:40 +03:00
cc92ea67f4 upgrade wildduck components 2024-08-24 17:44:19 +03:00
222d902ec2 cleanup old oidc-gateway 2024-08-24 16:29:24 +03:00
65e30d5dec migrate most storage classes to proxmox-csi, allow it on masters 2024-08-24 16:29:24 +03:00
4210855827 freescout: Elaborate about mail sync 2024-08-24 15:49:05 +03:00
d7287018ac monitoring: Specify resource limits 2024-08-24 12:36:37 +03:00
3fbecab179 Move to PVE CSI provider 2024-08-24 12:15:30 +03:00
024edc1c9b expose harbor via dedicated lb on storage nodes 2024-08-23 21:35:04 +03:00
a94a3f829c expose harbor via dedicated lb on storage nodes 2024-08-23 21:35:04 +03:00
36055cc869 migrate nextcloud to dragonfly 2024-08-23 21:35:04 +03:00
aa91322ec6 remove grafana pv as it's using db now 2024-08-23 21:35:04 +03:00
c6c94b1901 test proxmox csi 2024-08-23 17:10:55 +03:00
67fb6c3727 Consolidate monitoring stack to Kube master nodes 2024-08-23 08:00:23 +03:00
18483197c9 fix passmower image pulling 2024-08-22 14:12:54 +03:00
a37d268574 temporarily disable middleware from pve 2024-08-22 14:12:54 +03:00
4b5e30f51f monitoring: Revert snmp-exporter because config file needs to be updated 2024-08-21 07:20:32 +03:00
78b0f1534a monitoring: Use gcr mirror for node exporter 2024-08-21 07:19:15 +03:00
0b03a720b3 monitoring: Bump versions, use gcr mirror 2024-08-21 07:17:05 +03:00
f1a2051838 monitoring: Move to topologySpreadConstraints 2024-08-21 07:11:06 +03:00
3280b25a83 Add more revisionHistoryLimit: 1 defs 2024-08-20 12:25:15 +03:00
0eec1fde8b gitea: Add revisionHistoryLimit 2024-08-20 12:21:36 +03:00
ede08c205b grafana: Use declarative data sources 2024-08-20 12:14:42 +03:00
666d900128 Restore minio storage class 2024-08-16 18:50:33 +03:00
bc31357d5b Integrate dos4dev PR #29: postgres-cluster docs 2024-08-16 18:07:45 +03:00
f3244afb20 woodpecker: Use RWX 2024-08-15 22:23:45 +03:00
384a60244d update readme about network 2024-08-15 13:40:22 +03:00
ed25720003 run traefik with 4 replicas 2024-08-15 12:43:08 +03:00
5c1a894a43 add goredirect service manifest 2024-08-15 11:11:20 +03:00
0a9237fae9 wildduck: Limit CPU for Dragonfly 2024-08-15 10:58:34 +03:00
69dca7e1f2 wildduck: Add topologySpreadConstraints for Dragonfly 2024-08-15 09:52:38 +03:00
4d5c47e21b wildduck: Refined Dragonfly cleanup 2024-08-15 09:49:48 +03:00
b3f1eb069f wildduck: Cleanups 2024-08-15 09:37:24 +03:00
bbf421df63 wildduck: Use recreate strategy to avoid Kube scheduling deadlock 2024-08-15 09:24:16 +03:00
9bf5e2408a migrate workers to infra vlan, use bgp for calico, use calico for lb service annoucements 2024-08-14 18:16:21 +03:00
351f0ae746 Remove more Mongoose 2024-08-14 11:02:45 +03:00
84bb476812 Mongo migrated to external Mongo, removing in-cluster Mongo definitions temporarily 2024-08-14 11:00:26 +03:00
07a132748b Restore mongo storage class 2024-08-14 10:49:46 +03:00
656f28a34c Move yamllint config to separate file 2024-08-14 10:30:08 +03:00
12466b19b1 bind, cert-manager: More updates 2024-08-14 10:07:26 +03:00
1d39827375 bind, cert-manager: Cleanups 2024-08-14 10:04:41 +03:00
3f4d89b4b1 dragonfly-operator-system: Add grep example 2024-08-14 09:33:45 +03:00
474ae64156 tigera-operator: Update README 2024-08-14 09:19:00 +03:00
1fa0577ce4 passmower: Cleanup 2024-08-14 08:12:37 +03:00
f8cd93aa9c passmower: Fix Dragonfly topology spread constraints 2024-08-14 07:55:24 +03:00
e22bf78b2e dragonfly-operator-system: Add Redis license notice 2024-08-14 07:53:55 +03:00
be5b036ab8 longhorn-system: Reddit link 2024-08-14 07:42:24 +03:00
a75f703eaa longhorn-system: Update README 2024-08-14 07:41:25 +03:00
2708e48850 longhorn-system: README fix 2024-08-14 07:37:23 +03:00
cfc5a739a1 longhorn-system: Updates 2024-08-14 07:36:31 +03:00
e5e4a07d01 dragonfly-operator-system: Update README 2024-08-14 07:08:26 +03:00
f902bbfe02 dragonfly-operator-system: Update README 2024-08-14 07:00:16 +03:00
70e589ef45 etherpad: Cleanup 2024-08-14 06:58:28 +03:00
b0befbcd69 freescout: Cleanup 2024-08-14 06:57:36 +03:00
a09f7d4f7e remove rawfile-csi 2024-08-13 20:27:16 +03:00
2f2fa1a99f migrate inventory to external s3 2024-08-13 20:18:58 +03:00
66fbf32088 migrate wildduck to external mongo 2024-08-13 20:18:47 +03:00
9b698ea197 freescout: Remove unused reset-oidc-config.yaml 2024-08-13 14:51:33 +03:00
7aa26ea236 passmower: Add topologySpreadConstraints 2024-08-13 14:50:25 +03:00
7c16f84200 monitoring: Elaborate more about operator 2024-08-12 22:15:32 +03:00
c2d08d8a80 monitoring: Update README.md 2024-08-12 22:06:28 +03:00
7c2b862ca8 Move Ansible directory to separate repo 2024-08-12 21:41:36 +03:00
68e936463b chore: make tegra jetson a misc node 2024-08-12 11:45:35 +03:00
8a1b0b52af add new worker9 2024-08-08 22:39:35 +03:00
6b24ede7ac Upgrade to Kubernetes 1.30 2024-08-08 19:45:46 +03:00
e0cf532e42 Upgrade to Kubernetes 1.29 2024-08-08 18:55:02 +03:00
59373041cc passmower: run in 3 replicas 2024-08-08 15:53:53 +03:00
4e80899c77 Prepare for separation of ansible Git repo 2024-08-08 12:56:25 +03:00
9c2b5c39ee fix/update harbor 2024-08-08 12:45:57 +03:00
d3eb888d58 doc: inventory: reference rosdump 2024-08-08 12:40:54 +03:00
3714b174e7 camtiler: disable, it broken 2024-08-03 09:03:14 +03:00
a1acb06e12 traefik: publish services (for argo healthy) 2024-08-03 09:03:13 +03:00
0b6ab650a2 argo: add apps (already) in argo to git (config drift) 2024-08-03 09:03:11 +03:00
35404464f4 argo: strongarm autosync to prevent further config drift
Commenting empty syncPolicy, otherwise argocd sees it as diff
2024-08-03 08:01:55 +03:00
41da5931f9 auth migra: whoami 2024-08-03 06:04:27 +03:00
6879a4e5a5 argo: drone no longer exists 2024-08-03 06:04:27 +03:00
9b2c655a02 camtiler: unify to cam.k-space.ee 2024-08-03 06:04:27 +03:00
8876300dc4 argo config drift: camtiler 2024-08-03 06:04:24 +03:00
8199b3b732 argo config drift: wildduck
Change for apps/StatefulSet/wildduck/wildduck-operator
caused by 2d25377090 applied by ArgoCD:
-      serviceAccountName: codemowers-io-wildduck-operator
+      serviceAccountName: codemowers-cloud-wildduck-operator
2024-08-03 05:35:31 +03:00
43c9b3aa93 argo config drift: woodpecker 2024-08-03 05:35:31 +03:00
504bd3012e argo config drift: doorboy 2024-08-03 04:27:31 +03:00
75b5d39880 signs: deploy with argo 2024-08-03 04:27:31 +03:00
7377b62b3f doc: readme tip + todo for argo 'user-facing' doc 2024-08-03 04:27:31 +03:00
cd13de6cee doc: Reword backlink warning
we already got more broken links :/
I don't really want it to be an agressive warn.
2024-08-03 04:27:31 +03:00
13da9a8877 Add redirects sign.k-space.ee, members.k-space.ee
There still are dead inventory links with members.k-space.ee
2024-08-03 04:27:31 +03:00
490770485d fixup auth2 → auth rename 2024-08-03 04:27:20 +03:00
ba48643a37 inventory: tls host is k-space.ee, not codemowers
seems like copy-pasta typo
2024-08-03 01:44:15 +03:00
18a0079a21 chore: add eaas as contributor 2024-07-30 14:15:13 +03:00
885b13ecd7 chore: move doorboy to hackerspace 2024-07-30 14:13:25 +03:00
e17caa9c2d passmower: update login link template 2024-07-30 14:12:54 +03:00
336ab2efa2 update readme 2024-07-30 12:40:01 +03:00
27a5fe14c7 docs: commit todo items 2024-07-30 11:03:00 +03:00
66034d2463 docs: mega refactor
Also bunch of edits at wiki.k-space.ee
2024-07-30 10:51:34 +03:00
186ea5d947 docs: hackerspace / Inventory-app 2024-07-30 10:33:25 +03:00
470d4f3459 docs: Slack bots 2024-07-30 10:32:57 +03:00
8ad6b989e5 Migrate signs.k-space.ee from GitLab to kube
copy from ripe87
2024-07-30 10:18:40 +03:00
b6bf3ab225 passmower users: list prefix before name 2024-07-30 08:00:14 +03:00
7cac31964d docs: camtiler & doors 2024-07-30 06:13:56 +03:00
a250363bb0 rm replaced-unused mysql-operator 2024-07-30 02:56:50 +03:00
480ff4f426 update passmower deployment 2024-07-29 15:59:45 +03:00
b737d37b9c fmt ansible: compact and more readable 2024-07-28 22:28:30 +03:00
b4ad080e95 zrepl: enable prometheus for offsite 2024-07-28 21:46:26 +03:00
Simon
a5ad80d8cd Make login url clickable in emails 2024-07-28 18:42:38 +00:00
62be47c2e1 inventory: add ingress and other manifests 2024-07-28 20:58:25 +03:00
249ad2e9ed fix and update harbor install 2024-07-28 20:22:08 +03:00
0c38d2369b attempt to get kibana working 2024-07-28 20:22:08 +03:00
b07a5b9bc0 reconfigure grub only on x86 nodes 2024-07-28 20:22:08 +03:00
2d25377090 wildduck: migrate to dragonfly, disable network policies, upgrade wildduck-operator 2024-07-28 20:22:08 +03:00
73d185b2ee fix redirects 2024-07-28 20:22:08 +03:00
0eb2dc6503 deprecate crunchydata postgres operator 2024-07-28 20:22:08 +03:00
34f1b53544 zrepl: prometheus target 2024-07-28 20:00:51 +03:00
fd1aeaa1a3 Upgrade Calico 2024-07-28 10:38:25 +03:00
b8477de6a8 Upgrade cert-manager 2024-07-28 10:37:34 +03:00
2f712a935e fixup: nas root is not encrypted and failed 2024-07-28 03:32:11 +03:00
792ff38bea mv zrepl.yml to playbook.yml 2024-07-28 03:31:16 +03:00
e929b52e6d Fix ansible.cfg 2024-07-28 01:42:55 +03:00
b2b93879c2 mv to ansible/ 2024-07-27 23:55:16 +03:00
c222f22768 fix zrepl playbook 2024-07-27 23:54:29 +03:00
28ed62c40e migrate wildflock to new passmower 2024-07-27 23:51:04 +03:00
74600efb4c zrepl 2024-07-27 23:49:45 +03:00
79aaaf7498 add todo 2024-07-27 23:08:39 +03:00
f0b78f7b17 migrate grafana to new passmower and external db 2024-07-27 23:08:29 +03:00
ba520da57e update readme 2024-07-27 23:08:15 +03:00
30503ad121 update readme 2024-07-27 23:06:20 +03:00
fbe4a55251 migrate gitea to new passmower 2024-07-27 22:57:01 +03:00
37567eccf9 migrate wiki to new passmower 2024-07-27 22:57:01 +03:00
d3ba1cc05f add openebs-localpath 2024-07-27 22:57:01 +03:00
61b1b1d6ef migrate woodpecker to external mysql 2024-07-27 22:57:01 +03:00
1e8bccbfa3 migrate to new passmower 2024-07-27 22:57:01 +03:00
e89edca340 enable xfs quotas on worker node rootfs 2024-07-27 22:57:01 +03:00
2bb13ef505 manage kube-apiserver manifest with ansible 2024-07-27 22:57:01 +03:00
c44cfb8bc8 fix kubelogin 2024-07-27 22:57:01 +03:00
417f3ddcb8 Update storage nodes and readd Raspberry Pi 400 2024-07-27 22:11:38 +03:00
32fbd498cf Fix typo 2024-07-27 11:46:39 +03:00
97563e8092 Upgrade ECK operator 2024-07-27 10:50:17 +03:00
4141c6b8ae Add OpenSearch operator 2024-07-27 08:42:16 +03:00
bd26aa46b4 Upgrade Etherpad 2024-07-27 08:31:56 +03:00
92459ed68b Reorder SSH key update playbook 2024-07-27 08:30:53 +03:00
9cf57d8bc6 Upgrade MetalLB 2024-07-27 08:30:53 +03:00
af1c78dea6 deprecate members.k-space.ee 2024-07-27 03:17:24 +03:00
2e77813162 migrate to new passmower 2024-07-27 03:17:24 +03:00
ca623c11fd Update kubeadm, kubectl, kubelet deployment 2024-07-27 01:06:20 +03:00
047cbb5c6b traefik: upgrade to 3.1, migrate dashboard via ingressroute 2024-07-27 00:06:07 +03:00
3e52f37cde Add DragonflyDB operator 2024-07-26 17:46:45 +03:00
b955369e2a Upgrade CloudNativePG to 1.23.2 2024-07-26 17:35:42 +03:00
5e765e9788 Use Codemower's image for mikrotik-exporter 2024-07-26 14:15:18 +03:00
5d4f49409c Remove Keel annotations 2024-07-26 13:56:13 +03:00
de573721bd Deprecate Drone as it's devs moved on to develop Gitness 2024-07-26 13:51:55 +03:00
c868a62ab7 Update to Woodpecker 2.7.0 2024-07-26 13:26:24 +03:00
7b6f6252a5 Update external-dns 2024-07-26 13:16:49 +03:00
9223c956c0 Update Bind 9.19 to 9.20 2024-07-26 13:16:22 +03:00
1d4e5051d8 Add Prusa 3D printer web endpoint 2024-07-26 13:03:20 +03:00
56bb5be8a9 grafana: Upgrade and fix ACL
:# Please enter the commit message for your changes. Lines starting
2024-07-26 12:36:08 +03:00
d895360510 monitoring: Upgrade node-exporter 2024-07-25 19:17:24 +03:00
bc8de58ca8 monitoring: Upgrade blackbox-exporter 2024-07-25 19:17:24 +03:00
8d355ff9dc Update Prometheus operator 2024-07-25 19:17:24 +03:00
dc2a08dc78 goredirect: fix mongo uri 2024-07-24 12:51:53 +03:00
19a0b70b9e woodpecker: fix agent 2024-07-19 19:49:32 +03:00
9c656b0ef9 woodpecker: restore storage from backup 2024-07-19 18:13:09 +03:00
278817249e Add Ansible tasks to update authorized SSH keys 2024-07-19 14:08:51 +03:00
cb5644c7f3 Ansible SSH multiplexing fixes 2024-07-19 12:55:40 +03:00
78ef148f83 Add Ansible playbook to update known_hosts and ssh_config 2024-07-19 11:49:47 +03:00
c2b9ed0368 inventory: migrate to external mogno 2024-07-17 23:58:38 +03:00
43abf125a9 pve: add pve-internal.k-space.ee for pve-csi in whitelisted codemowers.cloud cluster 2024-07-17 17:59:59 +03:00
71d968a815 Upgrade longhorn to 1.6.2 2024-07-07 14:38:02 +03:00
9b4976450f Upgrade longhorn to 1.5.5 2024-07-07 14:00:27 +03:00
27eb0aa6cc Bump Gitea to 1.22.1 2024-07-04 16:26:06 +03:00
f97a77e5aa rm dev.k-space.ee, VM deprecated 2024-06-20 17:27:35 +03:00
202 changed files with 13610 additions and 92831 deletions

View File

@ -1,10 +0,0 @@
---
kind: pipeline
type: kubernetes
name: gitleaks
steps:
- name: gitleaks
image: zricethezav/gitleaks
commands:
- gitleaks detect --source=/drone/src

1
.gitignore vendored
View File

@ -1,3 +1,4 @@
*.keys
*secrets.yml
*secret.yml
*.swp

4
.yamllint Normal file
View File

@ -0,0 +1,4 @@
extends: default
ignore-from-file: .gitignore
rules:
line-length: disable

170
CLUSTER.md Normal file
View File

@ -0,0 +1,170 @@
# Kubernetes cluster
Kubernetes hosts run on [PVE Cluster](https://wiki.k-space.ee/en/hosting/proxmox). Hosts are listed in Ansible [inventory](ansible/inventory.yml).
## `kubectl`
- Authorization [ACLs](cluster-role-bindings.yml)
- [Troubleshooting `no such host`](#systemd-resolved-issues)
Authenticate to auth.k-space.ee:
```bash
kubectl krew install oidc-login
mkdir -p ~/.kube
cat << EOF > ~/.kube/config
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1EVXdNakEzTXpVMU1Wb1hEVE15TURReU9UQTNNelUxTVZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS2J2CjY3UFlXVHJMc3ZCQTZuWHUvcm55SlVhNnppTnNWTVN6N2w4ekhxM2JuQnhqWVNPUDJhN1RXTnpUTmZDanZBWngKTmlNbXJya1hpb2dYQWpVVkhSUWZlYm81TFIrb0JBOTdLWlcrN01UMFVJRXBuWVVaaTdBRHlaS01vcEJFUXlMNwp1SlU5UDhnNUR1T29FRHZieGJSMXFuV1JZRXpteFNmSFpocllpMVA3bFd4emkxR243eGRETFZaMjZjNm0xR3Y1CnViRjZyaFBXK1JSVkhiQzFKakJGeTBwRXdhYlUvUTd0Z2dic0JQUjk5NVZvMktCeElBelRmbHhVanlYVkJ3MjEKU2d3ZGI1amlpemxEM0NSbVdZZ0ZrRzd0NTVZeGF3ZmpaQjh5bW4xYjhUVjkwN3dRcG8veU8zM3RaaEE3L3BFUwpBSDJYeDk5bkpMbFVGVUtSY1A4Q0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZKNnZKeVk1UlJ1aklQWGxIK2ZvU3g2QzFRT2RNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQ04zcGtCTVM3ekkrbUhvOWdTZQp6SzdXdjl3bXlCTVE5Q3crQXBSNnRBQXg2T1VIN0d1enc5TTV2bXNkYjkrYXBKMHBlZFB4SUg3YXZ1aG9SUXNMCkxqTzRSVm9BMG9aNDBZV3J3UStBR0dvdkZuaWNleXRNcFVSNEZjRXc0ZDRmcGl6V3d0TVNlRlRIUXR6WG84V2MKNFJGWC9xUXNVR1NWa01PaUcvcVVrSFpXQVgyckdhWXZ1Tkw2eHdSRnh5ZHpsRTFSUk56TkNvQzVpTXhjaVRNagpackEvK0pqVEFWU2FuNXZnODFOSmthZEphbmNPWmEwS3JEdkZzd1JJSG5CMGpMLzh3VmZXSTV6czZURU1VZUk1ClF6dU01QXUxUFZ4VXZJUGhlMHl6UXZjWDV5RlhnMkJGU3MzKzJBajlNcENWVTZNY2dSSTl5TTRicitFTUlHL0kKY0pjPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
server: https://master.kube.k-space.ee:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: oidc
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: oidc
user:
exec:
apiVersion: client.authentication.k8s.io/v1beta1
args:
- oidc-login
- get-token
- --oidc-issuer-url=https://auth.k-space.ee/
- --oidc-client-id=passmower.kubelogin
- --oidc-use-pkce
- --oidc-extra-scope=profile,email,groups
- --listen-address=127.0.0.1:27890
command: kubectl
env: null
provideClusterInfo: false
EOF
# Test it:
kubectl get nodes # opens browser for authentication
```
### systemd-resolved issues
```sh
Unable to connect to the server: dial tcp: lookup master.kube.k-space.ee on 127.0.0.53:53: no such host
```
```
Network → VPN → `IPv4` → Other nameservers (Muud nimeserverid): `172.21.0.1`
Network → VPN → `IPv6` → Other nameservers (Muud nimeserverid): `2001:bb8:4008:21::1`
Network → VPN → `IPv4` → Search domains (Otsingudomeenid): `kube.k-space.ee`
Network → VPN → `IPv6` → Search domains (Otsingudomeenid): `kube.k-space.ee`
```
## Cluster formation
Created Ubuntu 22.04 VM-s on Proxmox with local storage.
Added some ARM64 workers by using Ubuntu 22.04 server on Raspberry Pi.
After machines have booted up and you can reach them via SSH:
```
# Disable Ubuntu caching DNS resolver
systemctl disable systemd-resolved.service
systemctl stop systemd-resolved
rm -fv /etc/resolv.conf
cat > /etc/resolv.conf << EOF
nameserver 1.1.1.1
nameserver 8.8.8.8
EOF
# Disable multipathd as Longhorn handles that itself
systemctl mask multipathd snapd
systemctl disable --now multipathd snapd bluetooth ModemManager hciuart wpa_supplicant packagekit
# Permit root login
sed -i -e 's/PermitRootLogin no/PermitRootLogin without-password/' /etc/ssh/sshd_config
systemctl reload ssh
cat ~ubuntu/.ssh/authorized_keys > /root/.ssh/authorized_keys
userdel -f ubuntu
apt-get install -yqq linux-image-generic
apt-get remove -yq cloud-init linux-image-*-kvm
```
On master:
```
kubeadm init --token-ttl=120m --pod-network-cidr=10.244.0.0/16 --control-plane-endpoint "master.kube.k-space.ee:6443" --upload-certs --apiserver-cert-extra-sans master.kube.k-space.ee --node-name master1.kube.k-space.ee
```
For the `kubeadm join` command specify FQDN via `--node-name $(hostname -f)`.
Set AZ labels:
```
for j in $(seq 1 9); do
for t in master mon worker storage; do
kubectl label nodes ${t}${j}.kube.k-space.ee topology.kubernetes.io/zone=node${j}
done
done
```
After forming the cluster add taints:
```bash
for j in $(seq 1 9); do
kubectl label nodes worker${j}.kube.k-space.ee node-role.kubernetes.io/worker=''
done
for j in $(seq 1 4); do
kubectl taint nodes mon${j}.kube.k-space.ee dedicated=monitoring:NoSchedule
kubectl label nodes mon${j}.kube.k-space.ee dedicated=monitoring
done
for j in $(seq 1 4); do
kubectl taint nodes storage${j}.kube.k-space.ee dedicated=storage:NoSchedule
kubectl label nodes storage${j}.kube.k-space.ee dedicated=storage
done
```
For `arm64` nodes add suitable taint to prevent scheduling non-multiarch images on them:
```bash
kubectl taint nodes worker9.kube.k-space.ee arch=arm64:NoSchedule
```
For door controllers:
```
for j in ground front back; do
kubectl taint nodes door-${j}.kube.k-space.ee dedicated=door:NoSchedule
kubectl label nodes door-${j}.kube.k-space.ee dedicated=door
kubectl taint nodes door-${j}.kube.k-space.ee arch=arm64:NoSchedule
done
```
To reduce wear on storage:
```
echo StandardOutput=null >> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
systemctl daemon-reload
systemctl restart kubelet
```
## Technology mapping
Our self-hosted Kubernetes stack compared to AWS based deployments:
| Hipster startup | Self-hosted hackerspace | Purpose |
|-------------------|-------------------------------------|---------------------------------------------------------------------|
| AWS ALB | Traefik | Reverse proxy also known as ingress controller in Kubernetes jargon |
| AWS AMP | Prometheus Operator | Monitoring and alerting |
| AWS CloudTrail | ECK Operator | Log aggregation |
| AWS DocumentDB | MongoDB Community Operator | Highly available NoSQL database |
| AWS EBS | Longhorn | Block storage for arbitrary applications needing persistent storage |
| AWS EC2 | Proxmox | Virtualization layer |
| AWS ECR | Harbor | Docker registry |
| AWS EKS | kubeadm | Provision Kubernetes master nodes |
| AWS NLB | MetalLB | L2/L3 level load balancing |
| AWS RDS for MySQL | MySQL Operator | Provision highly available relational databases |
| AWS Route53 | Bind and RFC2136 | DNS records and Let's Encrypt DNS validation |
| AWS S3 | Minio Operator | Highly available object storage |
| AWS VPC | Calico | Overlay network |
| Dex | Passmower | ACL mapping and OIDC provider which integrates with GitHub/Samba |
| GitHub Actions | Woodpecker | Build Docker images |
| GitHub | Gitea | Source code management, issue tracking |
| GitHub OAuth2 | Samba (Active Directory compatible) | Source of truth for authentication and authorization |
| Gmail | Wildduck | E-mail |

View File

@ -10,3 +10,4 @@ this Git repository happen:
* Song Meo <songmeo@k-space.ee>
* Rasmus Kallas <rasmus@k-space.ee>
* Kristjan Kuusk <kkuusk@k-space.ee>
* Erki Aas <eaas@k-space.ee>

254
README.md
View File

@ -1,230 +1,52 @@
# Kubernetes cluster manifests
# k-space.ee infrastructure
Kubernetes manifests, Ansible [playbooks](ansible/README.md), and documentation for K-SPACE services.
## Introduction
<!-- TODO: Docs for adding to ArgoCD (auto-)sync -->
- Repo is deployed with [ArgoCD](https://argocd.k-space.ee). For `kubectl` access, see [CLUSTER.md](CLUSTER.md#kubectl).
- Debugging Kubernetes [on Wiki](https://wiki.k-space.ee/en/hosting/debugging-kubernetes)
- Need help? → [`#kube`](https://k-space-ee.slack.com/archives/C02EYV1NTM2)
This is the Kubernetes manifests of services running on k-space.ee domains.
The applications are listed on https://auth2.k-space.ee for authenticated users.
Jump to docs: [inventory-app](hackerspace/README.md) / [cameras](camtiler/README.md) / [doors](https://wiki.k-space.ee/en/hosting/doors) / [list of apps](https://auth.k-space.ee) // [all infra](ansible/inventory.yml) / [network](https://wiki.k-space.ee/en/hosting/network/sensitive) / [retro](https://wiki.k-space.ee/en/hosting/retro) / [non-infra](https://wiki.k-space.ee)
Tip: Search the repo for `kind: xyz` for examples.
## Cluster access
## Supporting services
- Build [Git](https://git.k-space.ee) repositories with [Woodpecker](https://woodpecker.k-space.ee).
- Passmower: Authz with `kind: OIDCClient` (or `kind: OIDCMiddlewareClient`[^authz]).
- Traefik[^nonginx]: Expose services with `kind: Service` + `kind: Ingress` (TLS and DNS **included**).
General discussion is happening in the `#kube` Slack channel.
### Additional
- bind: Manage _additional_ DNS records with `kind: DNSEndpoint`.
- [Prometheus](https://wiki.k-space.ee/en/hosting/monitoring): Collect metrics with `kind: PodMonitor` (alerts with `kind: PrometheusRule`).
- [Slack bots](SLACK.md) and Kubernetes [CLUSTER.md](CLUSTER.md) itself.
<!-- TODO: Redirects: external-dns.alpha.kubernetes.io/hostname + in -extras.yaml: IngressRoute and Middleware -->
<details><summary>Bootstrapping access</summary>
For bootstrap access obtain `/etc/kubernetes/admin.conf` from one of the master
nodes and place it under `~/.kube/config` on your machine.
[^nonginx]: No nginx annotations! Use `kind: Ingress` instead. `IngressRoute` is not used as it doesn't support [`external-dns`](bind/README.md) out of the box.
[^authz]: Applications should use OpenID Connect (`kind: OIDCClient`) for authentication, whereever possible. If not possible, use `kind: OIDCMiddlewareClient` client, which will provide authentication via a Traefik middleware (`traefik.ingress.kubernetes.io/router.middlewares: passmower-proxmox@kubernetescrd`). Sometimes you might use both for extra security.
Once Passmower is working, OIDC access for others can be enabled with
running following on Kubernetes masters:
### Network
```bash
patch /etc/kubernetes/manifests/kube-apiserver.yaml - << EOF
@@ -23,6 +23,10 @@
- --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
- --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
- --etcd-servers=https://127.0.0.1:2379
+ - --oidc-issuer-url=https://auth2.k-space.ee/
+ - --oidc-client-id=oidc-gateway.kubelogin
+ - --oidc-username-claim=sub
+ - --oidc-groups-claim=groups
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
EOF
sudo systemctl daemon-reload
systemctl restart kubelet
```
</details>
All nodes are in Infra VLAN 21. Routing is implemented with BGP, all nodes and the router make a full-mesh. Both Serice LB IPs and Pod IPs are advertised to the router. Router does NAT for outbound pod traffic.
See the [Calico installation](tigera-operator/application.yml) for Kube side and Routing / BGP in the router.
Static routes for 193.40.103.36/30 have been added in pve nodes to make them communicating with Passmower via Traefik more stable - otherwise packets coming back to the PVE are routed directly via VLAN 21 internal IPs by the worker nodes, breaking TCP.
The following can be used to talk to the Kubernetes cluster using OIDC credentials:
<!-- Linked to by https://wiki.k-space.ee/e/en/hosting/storage -->
### Databases / -stores:
- KeyDB: `kind: KeydbClaim` (replaces Redis[^redisdead])
- Dragonfly: `kind: Dragonfly` (replaces Redis[^redisdead])
- Longhorn: `storageClassName: longhorn` (filesystem storage)
- Mongo[^mongoproblems]: `kind: MongoDBCommunity` (NAS* `inventory-mongodb`)
- Minio S3: `kind: MinioBucketClaim` with `class: dedicated` (NAS*: `class: external`)
- MariaDB*: search for `mysql`, `mariadb`[^mariadb] (replaces MySQL)
- Postgres*: hardcoded to [harbor/application.yml](harbor/application.yml)
```bash
kubectl krew install oidc-login
mkdir -p ~/.kube
cat << EOF > ~/.kube/config
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1EVXdNakEzTXpVMU1Wb1hEVE15TURReU9UQTNNelUxTVZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS2J2CjY3UFlXVHJMc3ZCQTZuWHUvcm55SlVhNnppTnNWTVN6N2w4ekhxM2JuQnhqWVNPUDJhN1RXTnpUTmZDanZBWngKTmlNbXJya1hpb2dYQWpVVkhSUWZlYm81TFIrb0JBOTdLWlcrN01UMFVJRXBuWVVaaTdBRHlaS01vcEJFUXlMNwp1SlU5UDhnNUR1T29FRHZieGJSMXFuV1JZRXpteFNmSFpocllpMVA3bFd4emkxR243eGRETFZaMjZjNm0xR3Y1CnViRjZyaFBXK1JSVkhiQzFKakJGeTBwRXdhYlUvUTd0Z2dic0JQUjk5NVZvMktCeElBelRmbHhVanlYVkJ3MjEKU2d3ZGI1amlpemxEM0NSbVdZZ0ZrRzd0NTVZeGF3ZmpaQjh5bW4xYjhUVjkwN3dRcG8veU8zM3RaaEE3L3BFUwpBSDJYeDk5bkpMbFVGVUtSY1A4Q0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZKNnZKeVk1UlJ1aklQWGxIK2ZvU3g2QzFRT2RNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQ04zcGtCTVM3ekkrbUhvOWdTZQp6SzdXdjl3bXlCTVE5Q3crQXBSNnRBQXg2T1VIN0d1enc5TTV2bXNkYjkrYXBKMHBlZFB4SUg3YXZ1aG9SUXNMCkxqTzRSVm9BMG9aNDBZV3J3UStBR0dvdkZuaWNleXRNcFVSNEZjRXc0ZDRmcGl6V3d0TVNlRlRIUXR6WG84V2MKNFJGWC9xUXNVR1NWa01PaUcvcVVrSFpXQVgyckdhWXZ1Tkw2eHdSRnh5ZHpsRTFSUk56TkNvQzVpTXhjaVRNagpackEvK0pqVEFWU2FuNXZnODFOSmthZEphbmNPWmEwS3JEdkZzd1JJSG5CMGpMLzh3VmZXSTV6czZURU1VZUk1ClF6dU01QXUxUFZ4VXZJUGhlMHl6UXZjWDV5RlhnMkJGU3MzKzJBajlNcENWVTZNY2dSSTl5TTRicitFTUlHL0kKY0pjPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
server: https://master.kube.k-space.ee:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: oidc
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: oidc
user:
exec:
apiVersion: client.authentication.k8s.io/v1beta1
args:
- oidc-login
- get-token
- --oidc-issuer-url=https://auth2.k-space.ee/
- --oidc-client-id=oidc-gateway.kubelogin
- --oidc-use-pkce
- --oidc-extra-scope=profile,email,groups
- --listen-address=127.0.0.1:27890
command: kubectl
env: null
provideClusterInfo: false
EOF
```
\* External, hosted directly on [nas.k-space.ee](https://wiki.k-space.ee/en/hosting/storage)
For access control mapping see [cluster-role-bindings.yml](cluster-role-bindings.yml)
[^mariadb]: As of 2024-07-30 used by auth, authelia, bitwarden, etherpad, freescout, git, grafana, nextcloud, wiki, woodpecker
### systemd-resolved issues on access
```sh
Unable to connect to the server: dial tcp: lookup master.kube.k-space.ee on 127.0.0.53:53: no such host
```
```
Network → VPN → `IPv4` → Other nameservers (Muud nimeserverid): `172.21.0.1`
Network → VPN → `IPv6` → Other nameservers (Muud nimeserverid): `2001:bb8:4008:21::1`
Network → VPN → `IPv4` → Search domains (Otsingudomeenid): `kube.k-space.ee`
Network → VPN → `IPv6` → Search domains (Otsingudomeenid): `kube.k-space.ee`
```
[^redisdead]: Redis has been replaced as redis-operatori couldn't handle itself: didn't reconcile after reboots, master URI was empty, and clients complained about missing masters. ArgoCD still hosts its own Redis.
# Technology mapping
[^mongoproblems]: Mongo problems: Incompatible with rawfile csi (wiredtiger.wt corrupts), complicated resizing (PVCs from statefulset PVC template).
Our self-hosted Kubernetes stack compared to AWS based deployments:
| Hipster startup | Self-hosted hackerspace | Purpose |
|-------------------|-------------------------------------|---------------------------------------------------------------------|
| AWS ALB | Traefik | Reverse proxy also known as ingress controller in Kubernetes jargon |
| AWS AMP | Prometheus Operator | Monitoring and alerting |
| AWS CloudTrail | ECK Operator | Log aggregation |
| AWS DocumentDB | MongoDB Community Operator | Highly available NoSQL database |
| AWS EBS | Longhorn | Block storage for arbitrary applications needing persistent storage |
| AWS EC2 | Proxmox | Virtualization layer |
| AWS ECR | Harbor | Docker registry |
| AWS EKS | kubeadm | Provision Kubernetes master nodes |
| AWS NLB | MetalLB | L2/L3 level load balancing |
| AWS RDS for MySQL | MySQL Operator | Provision highly available relational databases |
| AWS Route53 | Bind and RFC2136 | DNS records and Let's Encrypt DNS validation |
| AWS S3 | Minio Operator | Highly available object storage |
| AWS VPC | Calico | Overlay network |
| Dex | Passmower | ACL mapping and OIDC provider which integrates with GitHub/Samba |
| GitHub Actions | Drone | Build Docker images |
| GitHub | Gitea | Source code management, issue tracking |
| GitHub OAuth2 | Samba (Active Directory compatible) | Source of truth for authentication and authorization |
| Gmail | Wildduck | E-mail |
External dependencies running as classic virtual machines:
- Bind as DNS server
## Adding applications
Deploy applications via [ArgoCD](https://argocd.k-space.ee)
We use Treafik with Passmower for Ingress.
Applications where possible and where applicable should use `Remote-User`
authentication. This prevents application exposure on public Internet.
Otherwise use OpenID Connect for authentication,
see Argo itself as an example how that is done.
See `camtiler/ingress.yml` for commented Ingress example.
Note that we do not use IngressRoute objects because they don't
support `external-dns` out of the box.
Do NOT add nginx annotations, we use Traefik.
Do NOT manually add DNS records, they are added by `external-dns`.
Do NOT manually create Certificate objects,
these should be handled by `tls:` section in Ingress.
## Cluster formation
Created Ubuntu 22.04 VM-s on Proxmox with local storage.
Added some ARM64 workers by using Ubuntu 22.04 server on Raspberry Pi.
After machines have booted up and you can reach them via SSH:
```
# Disable Ubuntu caching DNS resolver
systemctl disable systemd-resolved.service
systemctl stop systemd-resolved
rm -fv /etc/resolv.conf
cat > /etc/resolv.conf << EOF
nameserver 1.1.1.1
nameserver 8.8.8.8
EOF
# Disable multipathd as Longhorn handles that itself
systemctl mask multipathd snapd
systemctl disable --now multipathd snapd bluetooth ModemManager hciuart wpa_supplicant packagekit
# Permit root login
sed -i -e 's/PermitRootLogin no/PermitRootLogin without-password/' /etc/ssh/sshd_config
systemctl reload ssh
cat ~ubuntu/.ssh/authorized_keys > /root/.ssh/authorized_keys
userdel -f ubuntu
apt-get install -yqq linux-image-generic
apt-get remove -yq cloud-init linux-image-*-kvm
```
On master:
```
kubeadm init --token-ttl=120m --pod-network-cidr=10.244.0.0/16 --control-plane-endpoint "master.kube.k-space.ee:6443" --upload-certs --apiserver-cert-extra-sans master.kube.k-space.ee --node-name master1.kube.k-space.ee
```
For the `kubeadm join` command specify FQDN via `--node-name $(hostname -f)`.
Set AZ labels:
```
for j in $(seq 1 9); do
for t in master mon worker storage; do
kubectl label nodes ${t}${j}.kube.k-space.ee topology.kubernetes.io/zone=node${j}
done
done
```
After forming the cluster add taints:
```bash
for j in $(seq 1 9); do
kubectl label nodes worker${j}.kube.k-space.ee node-role.kubernetes.io/worker=''
done
for j in $(seq 1 4); do
kubectl taint nodes mon${j}.kube.k-space.ee dedicated=monitoring:NoSchedule
kubectl label nodes mon${j}.kube.k-space.ee dedicated=monitoring
done
for j in $(seq 1 4); do
kubectl taint nodes storage${j}.kube.k-space.ee dedicated=storage:NoSchedule
kubectl label nodes storage${j}.kube.k-space.ee dedicated=storage
done
```
For `arm64` nodes add suitable taint to prevent scheduling non-multiarch images on them:
```bash
kubectl taint nodes worker9.kube.k-space.ee arch=arm64:NoSchedule
```
For door controllers:
```
for j in ground front back; do
kubectl taint nodes door-${j}.kube.k-space.ee dedicated=door:NoSchedule
kubectl label nodes door-${j}.kube.k-space.ee dedicated=door
kubectl taint nodes door-${j}.kube.k-space.ee arch=arm64:NoSchedule
done
```
To reduce wear on storage:
```
echo StandardOutput=null >> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
systemctl daemon-reload
systemctl restart kubelet
```
***
_This page is referenced by wiki [front page](https://wiki.k-space.ee) as **the** technical documentation for infra._

28
SLACK.md Normal file
View File

@ -0,0 +1,28 @@
## Slack bots
### Doorboy3
https://api.slack.com/apps/A05NDB6FVJQ
Slack app author: rasmus
Managed by inventory-app:
- Incoming (open-commands) to `/api/slack/doorboy`, inventory-app authorizes based on command originating from #members or #work-shop && oidc access group (floor, workshop).
- Posts logs to a private channel. Restricted to 193.40.103.0/24.
Secrets as `SLACK_DOORLOG_CALLBACK` and `SLACK_VERIFICATION_TOKEN`.
### oidc-gateway
https://api.slack.com/apps/A05DART9PP1
Slack app author: eaas
Managed by passmower:
- Links e-mail to slackId.
- Login via Slack (not enabled).
Secrets as `slackId` and `slack-client`.
### podi-podi uuenduste spämmikoobas
https://api.slack.com/apps/A033RE9TUFK
Slack app author: rasmus
Posts Prometheus alerts to a private channel.
Secret as `slack-secrets`.

View File

@ -1,76 +0,0 @@
- name: Setup primary nameserver
hosts: ns1.k-space.ee
tasks:
- name: Make sure bind9 is installed
ansible.builtin.apt:
name: bind9
state: present
- name: Configure Bind
register: bind
copy:
dest: /etc/bind/named.conf
content: |
# This file is managed by Ansible
# https://git.k-space.ee/k-space/kube/src/branch/master/ansible-bind-primary.yml
# Do NOT modify manually
include "/etc/bind/named.conf.local";
include "/etc/bind/readwrite.key";
include "/etc/bind/readonly.key";
options {
directory "/var/cache/bind";
version "";
listen-on { any; };
listen-on-v6 { any; };
pid-file "/var/run/named/named.pid";
notify explicit; also-notify { 172.20.53.1; 172.20.53.2; 172.20.53.3; };
allow-recursion { none; };
recursion no;
check-names master ignore;
dnssec-validation no;
auth-nxdomain no;
};
# https://kb.isc.org/docs/aa-00723
acl allowed {
172.20.3.0/24;
172.20.4.0/24;
};
acl rejected { !allowed; any; };
zone "." {
type hint;
file "/var/lib/bind/db.root";
};
zone "k-space.ee" {
type master;
file "/var/lib/bind/db.k-space.ee";
allow-update { !rejected; key readwrite; };
allow-transfer { !rejected; key readonly; key readwrite; };
};
zone "k6.ee" {
type master;
file "/var/lib/bind/db.k6.ee";
allow-update { !rejected; key readwrite; };
allow-transfer { !rejected; key readonly; key readwrite; };
};
zone "kspace.ee" {
type master;
file "/var/lib/bind/db.kspace.ee";
allow-update { !rejected; key readwrite; };
allow-transfer { !rejected; key readonly; key readwrite; };
};
- name: Check Bind config
ansible.builtin.shell: "named-checkconf"
- name: Reload Bind config
service:
name: bind9
state: reloaded
when: bind.changed

View File

@ -1,63 +0,0 @@
# ansible doors -m shell -a "ctr image pull harbor.k-space.ee/k-space/mjpg-streamer:latest"
# journalctl -u mjpg_streamer@video0.service -f
- name: Setup doors
hosts: doors
tasks:
- name: Make sure containerd is installed
ansible.builtin.apt:
name: containerd
state: present
- name: Copy systemd service for Doorboy controller
copy:
dest: /etc/systemd/system/godoor.service
content: |
[Unit]
Description=Doorboy service
Documentation=https://git.k-space.ee/k-space/godoor
After=network.target
[Service]
Environment=IMAGE=harbor.k-space.ee/k-space/godoor:latest
ExecStartPre=-ctr task kill --signal=9 %N
ExecStartPre=-ctr task rm %N
ExecStartPre=-ctr c rm %N
ExecStartPre=-ctr image pull $IMAGE
ExecStart=ctr run --rm --pid-file=/run/%N.pid --privileged --read-only --env-file=/etc/godoor --env=KDOORPI_API_ALLOWED=https://doorboy-proxy.k-space.ee/allowed --env=KDOORPI_API_LONGPOLL=https://doorboy-proxy.k-space.ee/longpoll --env=KDOORPI_API_SWIPE=https://doorboy-proxy.k-space.ee/swipe --env=KDOORPI_DOOR=%H --net-host --net-host --cwd /app $IMAGE %N /godoor
ExecStopPost=ctr task rm %N
ExecStopPost=ctr c rm %N
Restart=always
[Install]
WantedBy=multi-user.target
- name: Enable Doorboy controller
ansible.builtin.systemd:
state: restarted
daemon_reload: yes
name: godoor.service
- name: Copy systemd service for mjpg-streamer
copy:
dest: /etc/systemd/system/mjpg_streamer@.service
content: |
[Unit]
Description=A server for streaming Motion-JPEG from a video capture device
After=network.target
ConditionPathExists=/dev/%I
[Service]
Environment=IMAGE=harbor.k-space.ee/k-space/mjpg-streamer:latest
StandardOutput=tty
Type=forking
ExecStartPre=-ctr task kill --signal=9 %p_%i
ExecStartPre=-ctr task rm %p_%i
ExecStartPre=-ctr c rm %p_%i
ExecStartPre=-ctr image pull $IMAGE
ExecStart=ctr run --tty -d --rm --pid-file=/run/%i.pid --privileged --read-only --net-host $IMAGE %p_%i /usr/local/bin/mjpg_streamer -i 'input_uvc.so -d /dev/%I -r 1280x720 -f 10' -o 'output_http.so -w /usr/share/mjpg_streamer/www'
ExecStopPost=ctr task rm %p_%i
ExecStopPost=ctr c rm %p_%i
PIDFile=/run/%i.pid
[Install]
WantedBy=multi-user.target
- name: Enable mjpg-streamer
ansible.builtin.systemd:
state: restarted
daemon_reload: yes
name: mjpg_streamer@video0.service

View File

@ -1,81 +0,0 @@
---
- name: Reconfigure graceful shutdown for kubelet
hosts: kubernetes
tasks:
- name: Reconfigure shutdownGracePeriod
ansible.builtin.lineinfile:
path: /var/lib/kubelet/config.yaml
regexp: '^shutdownGracePeriod:'
line: 'shutdownGracePeriod: 5m'
- name: Reconfigure shutdownGracePeriodCriticalPods
ansible.builtin.lineinfile:
path: /var/lib/kubelet/config.yaml
regexp: '^shutdownGracePeriodCriticalPods:'
line: 'shutdownGracePeriodCriticalPods: 5m'
- name: Work around unattended-upgrades
ansible.builtin.lineinfile:
path: /lib/systemd/logind.conf.d/unattended-upgrades-logind-maxdelay.conf
regexp: '^InhibitDelayMaxSec='
line: 'InhibitDelayMaxSec=5m0s'
- name: Pin kube components
hosts: kubernetes
tasks:
- name: Pin packages
loop:
- kubeadm
- kubectl
- kubelet
ansible.builtin.copy:
dest: "/etc/apt/preferences.d/{{ item }}"
content: |
Package: {{ item }}
Pin: version 1.26.*
Pin-Priority: 1001
- name: Reset /etc/containers/registries.conf
hosts: kubernetes
tasks:
- name: Copy /etc/containers/registries.conf
ansible.builtin.copy:
content: "unqualified-search-registries = [\"docker.io\"]\n"
dest: /etc/containers/registries.conf
register: registries
- name: Restart CRI-O
service:
name: cri-o
state: restarted
when: registries.changed
- name: Reset /etc/modules
hosts: kubernetes
tasks:
- name: Copy /etc/modules
ansible.builtin.copy:
content: |
overlay
br_netfilter
dest: /etc/modules
register: kernel_modules
- name: Load kernel modules
ansible.builtin.shell: "cat /etc/modules | xargs -L 1 -t modprobe"
when: kernel_modules.changed
- name: Reset /etc/sysctl.d/99-k8s.conf
hosts: kubernetes
tasks:
- name: Copy /etc/sysctl.d/99-k8s.conf
ansible.builtin.copy:
content: |
net.ipv4.conf.all.accept_redirects = 0
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
vm.max_map_count = 524288
fs.inotify.max_user_instances = 1280
fs.inotify.max_user_watches = 655360
dest: /etc/sysctl.d/99-k8s.conf
register: sysctl
- name: Reload sysctl config
ansible.builtin.shell: "sysctl --system"
when: sysctl.changed

View File

@ -1,12 +0,0 @@
[defaults]
ansible_managed = This file is managed by Ansible, manual changes will be overwritten.
inventory = inventory.yml
nocows = 1
pipelining = True
pattern =
deprecation_warnings = False
fact_caching = jsonfile
fact_caching_connection = ~/.ansible/k-space-fact-cache
[ssh_connection]
ssh_args = -F ssh_config

View File

@ -1,63 +1,11 @@
# Workflow
Most applications in our Kubernetes cluster are managed by ArgoCD.
Most notably operators are NOT managed by ArgoCD.
Adding to `applications/`: `kubectl apply -f newapp.yaml`
# Deployment
To deploy ArgoCD:
```bash
helm repo add argo-cd https://argoproj.github.io/argo-helm
kubectl create secret -n argocd generic argocd-secret # Initialize empty secret for sessions
helm template -n argocd --release-name k6 argo-cd/argo-cd --include-crds -f values.yaml > argocd.yml
kubectl apply -f argocd.yml -f application-extras.yml -n argocd
kubectl -n argocd rollout restart deployment/k6-argocd-redis
kubectl -n argocd rollout restart deployment/k6-argocd-repo-server
kubectl -n argocd rollout restart deployment/k6-argocd-server
kubectl -n argocd rollout restart deployment/k6-argocd-notifications-controller
kubectl -n argocd rollout restart statefulset/k6-argocd-application-controller
kubectl label -n argocd secret oidc-client-argocd-owner-secrets app.kubernetes.io/part-of=argocd
```
# Setting up Git secrets
Generate SSH key to access Gitea:
## Managing applications
Update apps (see TODO below):
```
ssh-keygen -t ecdsa -f id_ecdsa -C argocd.k-space.ee -P ''
kubectl -n argocd create secret generic gitea-kube \
--from-literal=type=git \
--from-literal=url=git@git.k-space.ee:k-space/kube \
--from-file=sshPrivateKey=id_ecdsa
kubectl -n argocd create secret generic gitea-kube-staging \
--from-literal=type=git \
--from-literal=url=git@git.k-space.ee:k-space/kube-staging \
--from-file=sshPrivateKey=id_ecdsa
kubectl -n argocd create secret generic gitea-kube-members \
--from-literal=type=git \
--from-literal=url=git@git.k-space.ee:k-space/kube-members \
--from-file=sshPrivateKey=id_ecdsa
kubectl label -n argocd secret gitea-kube argocd.argoproj.io/secret-type=repository
kubectl label -n argocd secret gitea-kube-staging argocd.argoproj.io/secret-type=repository
kubectl label -n argocd secret gitea-kube-members argocd.argoproj.io/secret-type=repository
rm -fv id_ecdsa
```
Have Gitea admin reset password for user `argocd` and log in with that account.
Add the SSH key for user `argocd` from file `id_ecdsa.pub`.
Delete any other SSH keys associated with Gitea user `argocd`.
# Managing applications
To update apps:
```
for j in asterisk bind camtiler drone drone-execution etherpad freescout gitea grafana hackerspace nextcloud nyancat rosdump traefik wiki wildduck woodpecker; do
for j in asterisk bind camtiler etherpad freescout gitea grafana hackerspace nextcloud nyancat rosdump traefik wiki wildduck woodpecker; do
cat << EOF >> applications/$j.yaml
---
apiVersion: argoproj.io/v1alpha1
@ -65,6 +13,10 @@ kind: Application
metadata:
name: $j
namespace: argocd
annotations:
# Works with only Kustomize and Helm. Kustomize is easy, see https://github.com/argoproj-labs/argocd-image-updater/tree/master/manifests/base for an example.
argocd-image-updater.argoproj.io/image-list: TODO:^2 # semver 2.*.*
argocd-image-updater.argoproj.io/write-back-method: git
spec:
project: k-space.ee
source:
@ -74,8 +26,31 @@ spec:
destination:
server: 'https://kubernetes.default.svc'
namespace: $j
syncPolicy: {}
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true
EOF
done
find applications -name "*.yaml" -exec kubectl apply -n argocd -f {} \;
```
### Repository secrets
1. Generate keys locally with `ssh-keygen -f argo`
2. Add `argo.pub` in `git.k-space.ee/<your>/<repo>` → Settings → Deploy keys
3. Add `argo` (private key) at https://argocd.k-space.ee/settings/repos along with referenced repo.
## Argo Deployment
To deploy ArgoCD itself:
```bash
helm repo add argo-cd https://argoproj.github.io/argo-helm
kubectl create secret -n argocd generic argocd-secret # Empty secret for sessions
helm template -n argocd --release-name k6 argo-cd/argo-cd --include-crds -f values.yaml > argocd.yml
kubectl apply -f argocd.yml -f application-extras.yml -f redis.yaml -f monitoring.yml -n argocd
kubectl label -n argocd secret oidc-client-argocd-owner-secrets app.kubernetes.io/part-of=argocd
kubectl -n argocd rollout restart deployment/k6-argocd-redis deployment/k6-argocd-repo-server deployment/k6-argocd-server deployment/k6-argocd-notifications-controller statefulset/k6-argocd-application-controller
```

View File

@ -1,6 +1,6 @@
---
apiVersion: codemowers.io/v1alpha1
kind: OIDCGWClient
apiVersion: codemowers.cloud/v1beta1
kind: OIDCClient
metadata:
name: argocd
namespace: argocd
@ -9,6 +9,7 @@ spec:
uri: https://argocd.k-space.ee
redirectUris:
- https://argocd.k-space.ee/auth/callback
- http://localhost:8085/auth/callback
allowedGroups:
- k-space:kubernetes:admins
grantTypes:

View File

@ -2,16 +2,17 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: whoami-oidc
name: argocd-applications
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: whoami-oidc
path: argocd/applications
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: whoami-oidc
namespace: argocd
syncPolicy:
automated: {}
automated:
prune: false

View File

@ -0,0 +1,20 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: argocd-image-updater
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'https://github.com/argoproj-labs/argocd-image-updater.git'
path: manifests/base
targetRevision: stable
destination:
server: 'https://kubernetes.default.svc'
namespace: argocd
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -13,4 +13,8 @@ spec:
destination:
server: 'https://kubernetes.default.svc'
namespace: asterisk
syncPolicy: {}
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -13,4 +13,8 @@ spec:
destination:
server: 'https://kubernetes.default.svc'
namespace: bind
syncPolicy: {}
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -1,16 +1,15 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: camtiler
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: camtiler
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: camtiler
syncPolicy: {}
# ---
# apiVersion: argoproj.io/v1alpha1
# kind: Application
# metadata:
# name: camtiler
# namespace: argocd
# spec:
# project: k-space.ee
# source:
# repoURL: 'git@git.k-space.ee:k-space/kube.git'
# path: camtiler
# targetRevision: HEAD
# destination:
# server: 'https://kubernetes.default.svc'
# namespace: camtiler

View File

@ -13,4 +13,8 @@ spec:
destination:
server: 'https://kubernetes.default.svc'
namespace: etherpad
syncPolicy: {}
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -13,4 +13,8 @@ spec:
destination:
server: 'https://kubernetes.default.svc'
namespace: freescout
syncPolicy: {}
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -13,4 +13,8 @@ spec:
destination:
server: 'https://kubernetes.default.svc'
namespace: gitea
syncPolicy: {}
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -13,4 +13,8 @@ spec:
destination:
server: 'https://kubernetes.default.svc'
namespace: grafana
syncPolicy: {}
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -13,4 +13,8 @@ spec:
destination:
server: 'https://kubernetes.default.svc'
namespace: hackerspace
syncPolicy: {}
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,20 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kubernetes-dashboard
namespace: argocd
spec:
project: default
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: kubernetes-dashboard
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: kubernetes-dashboard
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,20 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: members
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/members.git'
path: members
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: passmower
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,20 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: minio-clusters
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: minio-clusters
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: minio-clusters
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,20 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: monitoring
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: monitoring
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: monitoring
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,20 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: mysql-clusters
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: mysql-clusters
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: mysql-clusters
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -13,4 +13,8 @@ spec:
destination:
server: 'https://kubernetes.default.svc'
namespace: nextcloud
syncPolicy: {}
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -13,4 +13,8 @@ spec:
destination:
server: 'https://kubernetes.default.svc'
namespace: nyancat
syncPolicy: {}
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,20 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: postgres-clusters
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: postgres-clusters
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: postgres-clusters
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,20 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: redis-clusters
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: redis-clusters
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: redis-clusters
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -2,15 +2,19 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: drone
name: reloader
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: drone
path: reloader
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: drone
syncPolicy: {}
namespace: reloader
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -13,4 +13,8 @@ spec:
destination:
server: 'https://kubernetes.default.svc'
namespace: rosdump
syncPolicy: {}
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -2,15 +2,19 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: drone-execution
name: signs
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: drone-execution
path: signs
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: drone-execution
syncPolicy: {}
namespace: signs
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -13,4 +13,8 @@ spec:
destination:
server: 'https://kubernetes.default.svc'
namespace: traefik
syncPolicy: {}
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,20 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: whoami
namespace: argocd
spec:
project: default
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: whoami
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: whoami
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -13,4 +13,8 @@ spec:
destination:
server: 'https://kubernetes.default.svc'
namespace: wiki
syncPolicy: {}
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -13,4 +13,8 @@ spec:
destination:
server: 'https://kubernetes.default.svc'
namespace: wildduck
syncPolicy: {}
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -13,4 +13,8 @@ spec:
destination:
server: 'https://kubernetes.default.svc'
namespace: woodpecker
syncPolicy: {}
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

50
argocd/redis.yaml Normal file
View File

@ -0,0 +1,50 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
metadata:
name: argocd-redis
namespace: argocd
spec:
size: 32
mapping:
- key: redis-password
value: "%(plaintext)s"
- key: REDIS_URI
value: "redis://:%(plaintext)s@argocd-redis"
---
apiVersion: dragonflydb.io/v1alpha1
kind: Dragonfly
metadata:
name: argocd-redis
namespace: argocd
spec:
authentication:
passwordFromSecret:
key: redis-password
name: argocd-redis
replicas: 3
resources:
limits:
cpu: 1000m
memory: 1Gi
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app: argocd-redis
app.kubernetes.io/part-of: dragonfly
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: argocd-redis
namespace: argocd
spec:
selector:
matchLabels:
app: argocd-redis
app.kubernetes.io/part-of: dragonfly
podMetricsEndpoints:
- port: admin

View File

@ -5,9 +5,13 @@ global:
dex:
enabled: false
# Maybe one day switch to Redis HA?
redis:
enabled: false
redis-ha:
enabled: false
externalRedis:
host: argocd-redis
existingSecret: argocd-redis
server:
# HTTPS is implemented by Traefik
@ -23,20 +27,6 @@ server:
- hosts:
- "*.k-space.ee"
configfucked:
resource.customizations: |
# https://github.com/argoproj/argo-cd/issues/1704
networking.k8s.io/Ingress:
health.lua: |
hs = {}
hs.status = "Healthy"
return hs
apiextensions.k8s.io/CustomResourceDefinition:
ignoreDifferences: |
jsonPointers:
- "x-kubernetes-validations"
metrics:
enabled: true
@ -79,12 +69,26 @@ configs:
p, role:developers, applications, action/apps/Deployment/restart, default/camtiler, allow
p, role:developers, applications, sync, default/camtiler, allow
p, role:developers, applications, update, default/camtiler, allow
# argocd-image-updater
p, role:image-updater, applications, get, */*, allow
p, role:image-updater, applications, update, */*, allow
g, image-updater, role:image-updater
cm:
admin.enabled: "false"
resource.customizations: |
# https://github.com/argoproj/argo-cd/issues/1704
networking.k8s.io/Ingress:
health.lua: |
hs = {}
hs.status = "Healthy"
return hs
apiextensions.k8s.io/CustomResourceDefinition:
ignoreDifferences: |
jsonPointers:
- "x-kubernetes-validations"
oidc.config: |
name: OpenID Connect
issuer: https://auth2.k-space.ee/
issuer: https://auth.k-space.ee/
clientID: $oidc-client-argocd-owner-secrets:OIDC_CLIENT_ID
cliClientID: $oidc-client-argocd-owner-secrets:OIDC_CLIENT_ID
clientSecret: $oidc-client-argocd-owner-secrets:OIDC_CLIENT_SECRET

View File

@ -32,14 +32,8 @@ spec:
cidr: 172.20.8.241/32 # Erki A
- from:
- ipBlock:
cidr: 195.222.16.36/32 # Elisa SIP
- from:
- ipBlock:
cidr: 195.222.16.38/32 # Elisa SIP
cidr: 212.47.211.10/32 # Elisa SIP
egress:
- to:
- ipBlock:
cidr: 195.222.16.36/32 # Elisa SIP
- to:
- ipBlock:
cidr: 195.222.16.38/32 # Elisa SIP
cidr: 212.47.211.10/32 # Elisa SIP

View File

@ -1,15 +1,35 @@
# Bind setup
# Bind namespace
The Bind primary resides outside Kubernetes at `193.40.103.2` and
The Bind secondary servers and `external-dns` service pods are running in this namespace.
The `external-dns` pods are used to declaratively update DNS records on the
[Bind primary](https://git.k-space.ee/k-space/ansible/src/branch/main/authoritative-nameserver.yaml).
The Bind primary `ns1.k-space.ee` resides outside Kubernetes at `193.40.103.2` and
it's internally reachable via `172.20.0.2`.
Bind secondaries perform AXFR (zone transfer) from `ns1.k-space.ee` using
shared secret autentication.
The primary triggers notification events to `172.20.53.{1..3}`
which are internally exposed IP-s of the secondaries.
Bind secondaries are hosted inside Kubernetes, load balanced behind `62.65.250.2` and
under normal circumstances managed by [ArgoCD](https://argocd.k-space.ee/applications/argocd/bind).
Note that [cert-manager](https://git.k-space.ee/k-space/kube/src/branch/master/cert-manager/) also performs DNS updates on the Bind primary.
# For user
`Ingresses` and `DNSEndpoint` resources under `k-space.ee`, `kspace.ee`, `k6.ee`
domains are picked up automatically by `external-dns` and updated on the Bind primary.
To find usage examples in this repository use
`grep -r -A25 "^kind: Ingress" .` and
`grep -R -r -A100 "^kind: DNSEndpoint" .`
# For administrator
Ingresses and DNSEndpoints referring to `k-space.ee`, `kspace.ee`, `k6.ee`
are picked up automatically by `external-dns` and updated on primary.
The primary triggers notification events to `172.20.53.{1..3}`
The primary triggers notification events to `172.21.53.{1..3}`
which are internally exposed IP-s of the secondaries.
# Secrets
@ -48,7 +68,7 @@ zone "foobar.com" {
file "/var/lib/bind/db.foobar.com";
allow-update { !rejected; key foobar; };
allow-transfer { !rejected; key readonly; key foobar; };
notify explicit; also-notify { 172.20.53.1; 172.20.53.2; 172.20.53.3; };
notify explicit; also-notify { 172.21.53.1; 172.21.53.2; 172.21.53.3; };
};
```

View File

@ -3,6 +3,7 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: bind-secondary-config-local
namespace: bind
data:
named.conf.local: |
zone "codemowers.ee" { type slave; masters { 172.20.0.2 key readonly; }; };
@ -13,6 +14,7 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: bind-secondary-config
namespace: bind
data:
named.conf: |
include "/etc/bind/named.conf.local";
@ -36,6 +38,7 @@ metadata:
name: bind-secondary
namespace: bind
spec:
revisionHistoryLimit: 0
replicas: 3
selector:
matchLabels:
@ -45,15 +48,16 @@ spec:
labels:
app: bind-secondary
spec:
volumes:
- name: run
emptyDir: {}
containers:
- name: bind-secondary
image: internetsystemsconsortium/bind9:9.19
volumeMounts:
- mountPath: /run/named
name: run
image: internetsystemsconsortium/bind9:9.20
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 1m
memory: 35Mi
workingDir: /var/bind
command:
- named
@ -79,16 +83,13 @@ spec:
name: bind-readonly-secret
- name: bind-data
emptyDir: {}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- bind-secondary
topologyKey: "kubernetes.io/hostname"
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app: bind-secondary
---
apiVersion: v1
kind: Service
@ -119,7 +120,7 @@ metadata:
spec:
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: 172.20.53.1
loadBalancerIP: 172.21.53.1
selector:
app: bind-secondary
statefulset.kubernetes.io/pod-name: bind-secondary-0
@ -141,7 +142,7 @@ metadata:
spec:
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: 172.20.53.2
loadBalancerIP: 172.21.53.2
selector:
app: bind-secondary
statefulset.kubernetes.io/pod-name: bind-secondary-1
@ -163,7 +164,7 @@ metadata:
spec:
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: 172.20.53.3
loadBalancerIP: 172.21.53.3
selector:
app: bind-secondary
statefulset.kubernetes.io/pod-name: bind-secondary-2

View File

@ -3,6 +3,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns-k-space
namespace: bind
spec:
revisionHistoryLimit: 0
selector:
@ -16,7 +17,14 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.k8s.io/external-dns/external-dns:v0.13.5
image: registry.k8s.io/external-dns/external-dns:v0.14.2
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 2m
memory: 35Mi
envFrom:
- secretRef:
name: tsig-secret

View File

@ -3,6 +3,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns-k6
namespace: bind
spec:
revisionHistoryLimit: 0
selector:
@ -16,7 +17,14 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.k8s.io/external-dns/external-dns:v0.13.5
image: registry.k8s.io/external-dns/external-dns:v0.14.2
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 2m
memory: 35Mi
envFrom:
- secretRef:
name: tsig-secret
@ -41,31 +49,32 @@ apiVersion: externaldns.k8s.io/v1alpha1
kind: DNSEndpoint
metadata:
name: k6
namespace: bind
spec:
endpoints:
- dnsName: k6.ee
recordTTL: 300
recordType: SOA
targets:
- "ns1.k-space.ee. hostmaster.k-space.ee. (1 300 300 300 300)"
- dnsName: k6.ee
recordTTL: 300
recordType: NS
targets:
- ns1.k-space.ee
- ns2.k-space.ee
- dnsName: ns1.k-space.ee
recordTTL: 300
recordType: A
targets:
- 193.40.103.2
- dnsName: ns2.k-space.ee
recordTTL: 300
recordType: A
targets:
- 62.65.250.2
- dnsName: k-space.ee
recordTTL: 300
recordType: MX
targets:
- 10 mail.k-space.ee
- dnsName: k6.ee
recordTTL: 300
recordType: SOA
targets:
- "ns1.k-space.ee. hostmaster.k-space.ee. (1 300 300 300 300)"
- dnsName: k6.ee
recordTTL: 300
recordType: NS
targets:
- ns1.k-space.ee
- ns2.k-space.ee
- dnsName: ns1.k-space.ee
recordTTL: 300
recordType: A
targets:
- 193.40.103.2
- dnsName: ns2.k-space.ee
recordTTL: 300
recordType: A
targets:
- 62.65.250.2
- dnsName: k-space.ee
recordTTL: 300
recordType: MX
targets:
- 10 mail.k-space.ee

View File

@ -3,6 +3,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns-kspace
namespace: bind
spec:
revisionHistoryLimit: 0
selector:
@ -16,10 +17,17 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.k8s.io/external-dns/external-dns:v0.13.5
image: registry.k8s.io/external-dns/external-dns:v0.14.2
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 2m
memory: 35Mi
envFrom:
- secretRef:
name: tsig-secret
- secretRef:
name: tsig-secret
args:
- --events
- --registry=noop
@ -41,26 +49,27 @@ apiVersion: externaldns.k8s.io/v1alpha1
kind: DNSEndpoint
metadata:
name: kspace
namespace: bind
spec:
endpoints:
- dnsName: kspace.ee
recordTTL: 300
recordType: SOA
targets:
- "ns1.k-space.ee. hostmaster.k-space.ee. (1 300 300 300 300)"
- dnsName: kspace.ee
recordTTL: 300
recordType: NS
targets:
- ns1.k-space.ee
- ns2.k-space.ee
- dnsName: ns1.k-space.ee
recordTTL: 300
recordType: A
targets:
- 193.40.103.2
- dnsName: ns2.k-space.ee
recordTTL: 300
recordType: A
targets:
- 62.65.250.2
- dnsName: kspace.ee
recordTTL: 300
recordType: SOA
targets:
- "ns1.k-space.ee. hostmaster.k-space.ee. (1 300 300 300 300)"
- dnsName: kspace.ee
recordTTL: 300
recordType: NS
targets:
- ns1.k-space.ee
- ns2.k-space.ee
- dnsName: ns1.k-space.ee
recordTTL: 300
recordType: A
targets:
- 193.40.103.2
- dnsName: ns2.k-space.ee
recordTTL: 300
recordType: A
targets:
- 62.65.250.2

View File

@ -4,55 +4,57 @@ kind: ClusterRole
metadata:
name: external-dns
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
- nodes
verbs:
- get
- watch
- list
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- externaldns.k8s.io
resources:
- dnsendpoints
verbs:
- get
- watch
- list
- apiGroups:
- externaldns.k8s.io
resources:
- dnsendpoints/status
verbs:
- update
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
- nodes
verbs:
- get
- watch
- list
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- externaldns.k8s.io
resources:
- dnsendpoints
verbs:
- get
- watch
- list
- apiGroups:
- externaldns.k8s.io
resources:
- dnsendpoints/status
verbs:
- update
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns
namespace: bind
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: external-dns-viewer
namespace: bind
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-dns
subjects:
- kind: ServiceAccount
name: external-dns
namespace: bind
- kind: ServiceAccount
name: external-dns
namespace: bind

View File

@ -1,5 +1,56 @@
To apply changes:
# Cameras
Camtiler is the umbrella name for our homegrown camera surveilance system.
Everything besides [Camera](#camera)s is deployed with Kubernetes.
## Components
![cameras.graphviz.svg](cameras.graphviz.svg)
<!-- Manually rendered with https://dreampuf.github.io/GraphvizOnline
digraph G {
"camera-operator" -> "camera-motion-detect" [label="deploys"]
"camera-tiler" -> "cam.k-space.ee/tiled"
camera -> "camera-tiler"
camera -> "camera-motion-detect" -> mongo
"camera-motion-detect" -> "Minio S3"
"cam.k-space.ee" -> mongo [label="queries events", decorate=true]
mongo -> "camtiler-event-broker" [label="transforms object to add (signed) URL to S3", ]
"camtiler-event-broker" -> "cam.k-space.ee"
"Minio S3" -> "cam.k-space.ee" [label="using signed URL from camtiler-event-broker", decorate=true]
camera [label="📸 camera"]
}
-->
### 📸 Camera
Cameras are listed in [application.yml](application.yml) as `kind: Camera`.
Two types of camera hosts:
- GL-AR150 with [openwrt-camera-images](https://git.k-space.ee/k-space/openwrt-camera-image).
- [Doors](https://wiki.k-space.ee/e/en/hosting/doors) (Raspberry Pi) with mjpg-streamer.
### camera-tiler (cam.k-space.ee/tiled)
Out-of-bound, connects to cameras and streams to web browser.
One instance per every camera
#### camera-operator
Functionally the same as a kubernetes deployment for camera-tiler.
Operator/deployer for camera-tiler.
### camera-motion-detect
Connects to cameras, on motion writes events to Mongo and frames to S3.
### cam.k-space.ee (logmower)
Fetches motion-detect events from mongo. Fetches referenced images from S3 (minio).
#### camtiler-event-broker
MitM between motion-detect -> mongo. Appends S3 URLs to the response.
## Kubernetes commands
Apply changes:
```
kubectl apply -n camtiler \
-f application.yml \
@ -13,14 +64,12 @@ kubectl apply -n camtiler \
-f networkpolicy-base.yml
```
To deploy changes:
Deploy changes:
```
kubectl -n camtiler rollout restart deployment.apps/camtiler
```
To initialize secrets:
Initialize secrets:
```
kubectl create secret generic -n camtiler mongodb-application-readwrite-password --from-literal="password=$(cat /dev/urandom | base64 | head -c 30)"
kubectl create secret generic -n camtiler mongodb-application-readonly-password --from-literal="password=$(cat /dev/urandom | base64 | head -c 30)"
@ -32,8 +81,7 @@ kubectl -n camtiler create secret generic camera-secrets \
--from-literal=password=...
```
To restart all deployments:
Restart all deployments:
```
for j in $(kubectl get deployments -n camtiler -o name); do kubectl rollout restart -n camtiler $j; done
```

View File

@ -268,6 +268,7 @@ spec:
annotations:
summary: CPU limits are bottleneck
---
# Referenced/linked by README.md
apiVersion: k-space.ee/v1alpha1
kind: Camera
metadata:

View File

@ -0,0 +1,131 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Generated by graphviz version 2.40.1 (20161225.0304)
-->
<!-- Title: G Pages: 1 -->
<svg width="658pt" height="387pt" viewBox="0.00 0.00 658.36 386.80" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 382.8)">
<title>G</title>
<polygon fill="#ffffff" stroke="transparent" points="-4,4 -4,-382.8 654.3562,-382.8 654.3562,4 -4,4"/>
<!-- camera&#45;operator -->
<g id="node1" class="node">
<title>camera-operator</title>
<ellipse fill="none" stroke="#000000" cx="356.22" cy="-360.8" rx="74.095" ry="18"/>
<text text-anchor="middle" x="356.22" y="-356.6" font-family="Times,serif" font-size="14.00" fill="#000000">camera-operator</text>
</g>
<!-- camera&#45;motion&#45;detect -->
<g id="node2" class="node">
<title>camera-motion-detect</title>
<ellipse fill="none" stroke="#000000" cx="356.22" cy="-272" rx="95.5221" ry="18"/>
<text text-anchor="middle" x="356.22" y="-267.8" font-family="Times,serif" font-size="14.00" fill="#000000">camera-motion-detect</text>
</g>
<!-- camera&#45;operator&#45;&gt;camera&#45;motion&#45;detect -->
<g id="edge1" class="edge">
<title>camera-operator-&gt;camera-motion-detect</title>
<path fill="none" stroke="#000000" d="M356.22,-342.4006C356.22,-330.2949 356.22,-314.2076 356.22,-300.4674"/>
<polygon fill="#000000" stroke="#000000" points="359.7201,-300.072 356.22,-290.072 352.7201,-300.0721 359.7201,-300.072"/>
<text text-anchor="middle" x="377.9949" y="-312.2" font-family="Times,serif" font-size="14.00" fill="#000000">deploys</text>
</g>
<!-- mongo -->
<g id="node6" class="node">
<title>mongo</title>
<ellipse fill="none" stroke="#000000" cx="292.22" cy="-199" rx="37.7256" ry="18"/>
<text text-anchor="middle" x="292.22" y="-194.8" font-family="Times,serif" font-size="14.00" fill="#000000">mongo</text>
</g>
<!-- camera&#45;motion&#45;detect&#45;&gt;mongo -->
<g id="edge5" class="edge">
<title>camera-motion-detect-&gt;mongo</title>
<path fill="none" stroke="#000000" d="M340.3997,-253.9551C332.3383,-244.76 322.4178,-233.4445 313.6783,-223.476"/>
<polygon fill="#000000" stroke="#000000" points="316.2049,-221.0485 306.9807,-215.8365 310.9413,-225.6632 316.2049,-221.0485"/>
</g>
<!-- Minio S3 -->
<g id="node7" class="node">
<title>Minio S3</title>
<ellipse fill="none" stroke="#000000" cx="396.22" cy="-145" rx="47.0129" ry="18"/>
<text text-anchor="middle" x="396.22" y="-140.8" font-family="Times,serif" font-size="14.00" fill="#000000">Minio S3</text>
</g>
<!-- camera&#45;motion&#45;detect&#45;&gt;Minio S3 -->
<g id="edge6" class="edge">
<title>camera-motion-detect-&gt;Minio S3</title>
<path fill="none" stroke="#000000" d="M361.951,-253.804C368.6045,-232.6791 379.6542,-197.5964 387.4031,-172.9935"/>
<polygon fill="#000000" stroke="#000000" points="390.8337,-173.7518 390.4996,-163.1622 384.157,-171.6489 390.8337,-173.7518"/>
</g>
<!-- camera&#45;tiler -->
<g id="node3" class="node">
<title>camera-tiler</title>
<ellipse fill="none" stroke="#000000" cx="527.22" cy="-272" rx="57.8558" ry="18"/>
<text text-anchor="middle" x="527.22" y="-267.8" font-family="Times,serif" font-size="14.00" fill="#000000">camera-tiler</text>
</g>
<!-- cam.k&#45;space.ee/tiled -->
<g id="node4" class="node">
<title>cam.k-space.ee/tiled</title>
<ellipse fill="none" stroke="#000000" cx="527.22" cy="-199" rx="89.7229" ry="18"/>
<text text-anchor="middle" x="527.22" y="-194.8" font-family="Times,serif" font-size="14.00" fill="#000000">cam.k-space.ee/tiled</text>
</g>
<!-- camera&#45;tiler&#45;&gt;cam.k&#45;space.ee/tiled -->
<g id="edge2" class="edge">
<title>camera-tiler-&gt;cam.k-space.ee/tiled</title>
<path fill="none" stroke="#000000" d="M527.22,-253.9551C527.22,-245.8828 527.22,-236.1764 527.22,-227.1817"/>
<polygon fill="#000000" stroke="#000000" points="530.7201,-227.0903 527.22,-217.0904 523.7201,-227.0904 530.7201,-227.0903"/>
</g>
<!-- camera -->
<g id="node5" class="node">
<title>camera</title>
<ellipse fill="none" stroke="#000000" cx="513.22" cy="-360.8" rx="51.565" ry="18"/>
<text text-anchor="middle" x="513.22" y="-356.6" font-family="Times,serif" font-size="14.00" fill="#000000">📸 camera</text>
</g>
<!-- camera&#45;&gt;camera&#45;motion&#45;detect -->
<g id="edge4" class="edge">
<title>camera-&gt;camera-motion-detect</title>
<path fill="none" stroke="#000000" d="M485.8726,-345.3322C460.8217,-331.1633 423.4609,-310.0318 395.271,-294.0875"/>
<polygon fill="#000000" stroke="#000000" points="396.8952,-290.9851 386.4679,-289.1084 393.449,-297.078 396.8952,-290.9851"/>
</g>
<!-- camera&#45;&gt;camera&#45;tiler -->
<g id="edge3" class="edge">
<title>camera-&gt;camera-tiler</title>
<path fill="none" stroke="#000000" d="M516.1208,-342.4006C518.0482,-330.175 520.6159,-313.8887 522.7961,-300.0599"/>
<polygon fill="#000000" stroke="#000000" points="526.2706,-300.4951 524.3708,-290.072 519.356,-299.4049 526.2706,-300.4951"/>
</g>
<!-- camtiler&#45;event&#45;broker -->
<g id="node9" class="node">
<title>camtiler-event-broker</title>
<ellipse fill="none" stroke="#000000" cx="95.22" cy="-91" rx="95.4404" ry="18"/>
<text text-anchor="middle" x="95.22" y="-86.8" font-family="Times,serif" font-size="14.00" fill="#000000">camtiler-event-broker</text>
</g>
<!-- mongo&#45;&gt;camtiler&#45;event&#45;broker -->
<g id="edge8" class="edge">
<title>mongo-&gt;camtiler-event-broker</title>
<path fill="none" stroke="#000000" d="M254.6316,-196.5601C185.4398,-191.6839 43.6101,-179.7471 28.9976,-163 18.4783,-150.9441 20.8204,-140.7526 28.9976,-127 32.2892,-121.4639 36.7631,-116.7259 41.8428,-112.6837"/>
<polygon fill="#000000" stroke="#000000" points="43.9975,-115.4493 50.2411,-106.8896 40.0224,-109.6875 43.9975,-115.4493"/>
<text text-anchor="middle" x="153.8312" y="-140.8" font-family="Times,serif" font-size="14.00" fill="#000000">transforms object to add (signed) URL to S3</text>
</g>
<!-- cam.k&#45;space.ee -->
<g id="node8" class="node">
<title>cam.k-space.ee</title>
<ellipse fill="none" stroke="#000000" cx="292.22" cy="-18" rx="70.0229" ry="18"/>
<text text-anchor="middle" x="292.22" y="-13.8" font-family="Times,serif" font-size="14.00" fill="#000000">cam.k-space.ee</text>
</g>
<!-- Minio S3&#45;&gt;cam.k&#45;space.ee -->
<g id="edge10" class="edge">
<title>Minio S3-&gt;cam.k-space.ee</title>
<path fill="none" stroke="#000000" d="M394.7596,-126.8896C392.7231,-111.3195 387.8537,-88.922 376.22,-73 366.0004,-59.0134 351.0573,-47.5978 336.5978,-38.8647"/>
<polygon fill="#000000" stroke="#000000" points="338.1215,-35.7041 327.7038,-33.7748 334.6446,-41.7796 338.1215,-35.7041"/>
<text text-anchor="middle" x="521.2881" y="-86.8" font-family="Times,serif" font-size="14.00" fill="#000000">using signed URL from camtiler-event-broker</text>
<polyline fill="none" stroke="#000000" points="650.3562,-82.6 392.22,-82.6 392.9753,-115.8309 "/>
</g>
<!-- cam.k&#45;space.ee&#45;&gt;mongo -->
<g id="edge7" class="edge">
<title>cam.k-space.ee-&gt;mongo</title>
<path fill="none" stroke="#000000" d="M292.22,-36.2125C292.22,-67.8476 292.22,-133.1569 292.22,-170.7273"/>
<polygon fill="#000000" stroke="#000000" points="288.7201,-170.9833 292.22,-180.9833 295.7201,-170.9833 288.7201,-170.9833"/>
<text text-anchor="middle" x="332.0647" y="-86.8" font-family="Times,serif" font-size="14.00" fill="#000000">queries events</text>
<polyline fill="none" stroke="#000000" points="371.9094,-82.6 292.22,-82.6 292.22,-91.3492 "/>
</g>
<!-- camtiler&#45;event&#45;broker&#45;&gt;cam.k&#45;space.ee -->
<g id="edge9" class="edge">
<title>camtiler-event-broker-&gt;cam.k-space.ee</title>
<path fill="none" stroke="#000000" d="M138.9406,-74.7989C169.6563,-63.417 210.7924,-48.1737 242.716,-36.3441"/>
<polygon fill="#000000" stroke="#000000" points="244.1451,-39.5472 252.3059,-32.7905 241.7128,-32.9833 244.1451,-39.5472"/>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 7.8 KiB

View File

@ -1,11 +1,11 @@
---
apiVersion: codemowers.io/v1alpha1
kind: OIDCGWMiddlewareClient
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: sso
spec:
displayName: Cameras
uri: 'https://cams.k-space.ee/tiled'
uri: 'https://cam.k-space.ee/tiled'
allowedGroups:
- k-space:floor
- k-space:friends
@ -17,21 +17,12 @@ metadata:
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: camtiler-sso@kubernetescrd,camtiler-redirect@kubernetescrd
traefik.ingress.kubernetes.io/router.middlewares: camtiler-sso@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
external-dns.alpha.kubernetes.io/hostname: cams.k-space.ee,cam.k-space.ee
spec:
rules:
- host: cams.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: logmower-frontend
port:
number: 8080
- host: cam.k-space.ee
http:
paths:
@ -67,12 +58,28 @@ spec:
- hosts:
- "*.k-space.ee"
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: redirect
name: cams-redirect
spec:
redirectRegex:
regex: ^https://cams.k-space.ee/(.*)$
replacement: https://cam.k-space.ee/$1
permanent: false
permanent: true
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: cams
spec:
entryPoints:
- websecure
routes:
- match: Host(`cams.k-space.ee`)
kind: Rule
middlewares:
- name: cams-redirect
services:
- kind: TraefikService
name: api@internal

View File

@ -85,7 +85,7 @@ spec:
- ReadWriteOnce
resources:
requests:
storage: 200Mi
storage: 100Mi
- metadata:
name: journal-volume
labels:

View File

@ -152,3 +152,44 @@ spec:
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
# Config drift: Added by ArgoCD
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: minio
spec:
egress:
- ports:
- port: http
protocol: TCP
to:
- podSelector:
matchLabels:
app.kubernetes.io/name: minio
ingress:
- from:
- podSelector: {}
ports:
- port: http
protocol: TCP
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: monitoring
podSelector:
matchLabels:
app.kubernetes.io/name: prometheus
podSelector:
matchLabels:
app.kubernetes.io/name: minio
policyTypes:
- Ingress
- Egress

1
cert-manager/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
cert-manager.yaml

View File

@ -1,18 +1,33 @@
# cert-manager
`cert-manager` is used to obtain TLS certificates from Let's Encrypt.
It uses DNS-01 challenge in conjunction with Bind primary
at `ns1.k-space.ee`.
Refer to the [Bind primary Ansible playbook](https://git.k-space.ee/k-space/ansible/src/branch/main/authoritative-nameserver.yaml) and
[Bind namespace on Kubernetes cluster](https://git.k-space.ee/k-space/kube/src/branch/master/bind)
for more details
Added manifest with:
# For user
Use `Certificate` CRD of cert-manager, refer to
[official documentation](https://cert-manager.io/docs/usage/certificate/).
To find usage examples in this repository use
`grep -r -A10 "^kind: Certificate" .`
# For administrator
Deployed with:
```
curl -L https://github.com/jetstack/cert-manager/releases/download/v1.6.1/cert-manager.yaml -O
curl -L https://github.com/jetstack/cert-manager/releases/download/v1.15.1/cert-manager.yaml -O
kubectl apply -f cert-manager.yaml
```
To update certificate issuer
To update the issuer configuration or TSIG secret:
```
kubectl apply -f namespace.yml -f cert-manager.yaml
kubectl apply -f issuer.yml
kubectl apply -f default-issuer.yml
kubectl -n cert-manager create secret generic tsig-secret \
--from-literal=TSIG_SECRET=<secret>
```

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,21 @@
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: default
namespace: cert-manager
spec:
acme:
email: info@k-space.ee
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: example-issuer-account-key
solvers:
- dns01:
rfc2136:
nameserver: 193.40.103.2
tsigKeyName: readwrite.
tsigAlgorithm: HMACSHA512
tsigSecretSecretRef:
name: tsig-secret
key: TSIG_SECRET

View File

@ -1,19 +0,0 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: default
spec:
acme:
email: info@k-space.ee
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: example-issuer-account-key
solvers:
- dns01:
rfc2136:
nameserver: 193.40.103.2
tsigKeyName: acme.
tsigAlgorithm: HMACSHA512
tsigSecretSecretRef:
name: tsig-secret
key: TSIG_SECRET

View File

@ -3,6 +3,6 @@
To deploy:
```
wget https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.20/releases/cnpg-1.20.2.yaml -O application.yml
kubectl apply -f application.yml
kubectl apply --server-side -f \
https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml
```

File diff suppressed because it is too large Load Diff

44
default/netshoot.yaml Normal file
View File

@ -0,0 +1,44 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: netshoot
spec:
replicas: 1
selector:
matchLabels:
app: netshoot
template:
metadata:
creationTimestamp: null
labels:
app: netshoot
spec:
containers:
- name: netshoot
image: nicolaka/netshoot
command:
- /bin/bash
args:
- '-c'
- while true; do ping localhost; sleep 60;done
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600

382
discourse/application.yaml Normal file
View File

@ -0,0 +1,382 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: discourse
annotations:
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
tls:
- hosts:
- "*.k-space.ee"
secretName:
rules:
- host: "discourse.k-space.ee"
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: discourse
port:
name: http
---
apiVersion: v1
kind: Service
metadata:
name: discourse
spec:
type: ClusterIP
ipFamilyPolicy: SingleStack
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
selector:
app.kubernetes.io/instance: discourse
app.kubernetes.io/name: discourse
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: discourse
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: discourse
annotations:
reloader.stakater.com/auto: "true"
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: discourse
app.kubernetes.io/name: discourse
strategy:
type: Recreate
template:
metadata:
labels:
app.kubernetes.io/instance: discourse
app.kubernetes.io/name: discourse
spec:
serviceAccountName: discourse
securityContext:
fsGroup: 0
fsGroupChangePolicy: Always
initContainers:
containers:
- name: discourse
image: docker.io/bitnami/discourse:3.3.2-debian-12-r0
imagePullPolicy: "IfNotPresent"
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- CHOWN
- SYS_CHROOT
- FOWNER
- SETGID
- SETUID
- DAC_OVERRIDE
drop:
- ALL
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
seLinuxOptions: {}
seccompProfile:
type: RuntimeDefault
env:
- name: BITNAMI_DEBUG
value: "true"
- name: DISCOURSE_USERNAME
valueFrom:
secretKeyRef:
name: discourse-password
key: username
- name: DISCOURSE_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-password
key: password
- name: DISCOURSE_PORT_NUMBER
value: "8080"
- name: DISCOURSE_EXTERNAL_HTTP_PORT_NUMBER
value: "80"
- name: DISCOURSE_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-postgresql
key: password
- name: POSTGRESQL_CLIENT_CREATE_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-postgres-superuser
key: password
- name: POSTGRESQL_CLIENT_POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-postgres-superuser
key: password
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-redis
key: redis-password
envFrom:
- configMapRef:
name: discourse
- secretRef:
name: discourse-email
ports:
- name: http
containerPort: 8080
protocol: TCP
livenessProbe:
tcpSocket:
port: http
initialDelaySeconds: 500
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
readinessProbe:
httpGet:
path: /srv/status
port: http
initialDelaySeconds: 100
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
resources:
limits:
cpu: "6.0"
ephemeral-storage: 2Gi
memory: 12288Mi
requests:
cpu: "1.0"
ephemeral-storage: 50Mi
memory: 3072Mi
volumeMounts:
- name: discourse-data
mountPath: /bitnami/discourse
subPath: discourse
- name: sidekiq
image: docker.io/bitnami/discourse:3.3.2-debian-12-r0
imagePullPolicy: "IfNotPresent"
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- CHOWN
- SYS_CHROOT
- FOWNER
- SETGID
- SETUID
- DAC_OVERRIDE
drop:
- ALL
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
seLinuxOptions: {}
seccompProfile:
type: RuntimeDefault
command:
- /opt/bitnami/scripts/discourse/entrypoint.sh
args:
- /opt/bitnami/scripts/discourse-sidekiq/run.sh
env:
- name: BITNAMI_DEBUG
value: "true"
- name: DISCOURSE_USERNAME
valueFrom:
secretKeyRef:
name: discourse-password
key: username
- name: DISCOURSE_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-password
key: password
- name: DISCOURSE_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-postgresql
key: password
- name: DISCOURSE_POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-postgres-superuser
key: password
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: discourse-redis
key: redis-password
envFrom:
- configMapRef:
name: discourse
- secretRef:
name: discourse-email
livenessProbe:
exec:
command: ["/bin/sh", "-c", "pgrep -f ^sidekiq"]
initialDelaySeconds: 500
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command: ["/bin/sh", "-c", "pgrep -f ^sidekiq"]
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
resources:
limits:
cpu: 750m
ephemeral-storage: 2Gi
memory: 768Mi
requests:
cpu: 500m
ephemeral-storage: 50Mi
memory: 512Mi
volumeMounts:
- name: discourse-data
mountPath: /bitnami/discourse
subPath: discourse
volumes:
- name: discourse-data
persistentVolumeClaim:
claimName: discourse-data
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: discourse-data
namespace: discourse
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "3Gi"
storageClassName: "proxmox-nas"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: discourse
namespace: discourse
data:
DISCOURSE_HOST: "discourse.k-space.ee"
DISCOURSE_SKIP_INSTALL: "yes"
DISCOURSE_PRECOMPILE_ASSETS: "no"
DISCOURSE_SITE_NAME: "K-Space Discourse"
DISCOURSE_USERNAME: "k-space"
DISCOURSE_EMAIL: "dos4dev@k-space.ee"
DISCOURSE_REDIS_HOST: "discourse-redis"
DISCOURSE_REDIS_PORT_NUMBER: "6379"
DISCOURSE_DATABASE_HOST: "discourse-postgres-rw"
DISCOURSE_DATABASE_PORT_NUMBER: "5432"
DISCOURSE_DATABASE_NAME: "discourse"
DISCOURSE_DATABASE_USER: "discourse"
POSTGRESQL_CLIENT_DATABASE_HOST: "discourse-postgres-rw"
POSTGRESQL_CLIENT_DATABASE_PORT_NUMBER: "5432"
POSTGRESQL_CLIENT_POSTGRES_USER: "postgres"
POSTGRESQL_CLIENT_CREATE_DATABASE_NAME: "discourse"
POSTGRESQL_CLIENT_CREATE_DATABASE_EXTENSIONS: "hstore,pg_trgm"
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCClient
metadata:
name: discourse
namespace: discourse
spec:
displayName: Discourse
uri: https://discourse.k-space.ee
redirectUris:
- https://discourse.k-space.ee/auth/oidc/callback
allowedGroups:
- k-space:floor
- k-space:friends
grantTypes:
- authorization_code
- refresh_token
responseTypes:
- code
availableScopes:
- openid
- profile
pkce: false
---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
metadata:
name: discourse-redis
namespace: discourse
spec:
size: 32
mapping:
- key: redis-password
value: "%(plaintext)s"
- key: REDIS_URI
value: "redis://:%(plaintext)s@discourse-redis"
---
apiVersion: dragonflydb.io/v1alpha1
kind: Dragonfly
metadata:
name: discourse-redis
namespace: discourse
spec:
authentication:
passwordFromSecret:
key: redis-password
name: discourse-redis
replicas: 3
resources:
limits:
cpu: 1000m
memory: 1Gi
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app: discourse-redis
app.kubernetes.io/part-of: dragonfly
---
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: discourse-postgres
namespace: discourse
spec:
instances: 1
enableSuperuserAccess: true
bootstrap:
initdb:
database: discourse
owner: discourse
secret:
name: discourse-postgresql
dataChecksums: true
encoding: 'UTF8'
storage:
size: 10Gi
storageClass: postgres

View File

@ -0,0 +1,38 @@
# Dragonfly Operator
Dragonfly operator is the preferred way to add Redis support to your application
as it is modern Go rewrite and it supports high availability.
Following alternatives were considered, but are discouraged:
* Vanilla Redis without replication is unusable during pod reschedule or Kubernetes worker outage
* Vanilla Redis' replication is clunky and there is no reliable operator for Kubernetes
to use vanilla redis
* KeyDB Cluster was unable to guarantee strong consistency
Note that vanilla Redis
[has changed it's licensing policy](https://redis.io/blog/redis-adopts-dual-source-available-licensing/)
# For users
Refer to [official documentation on usage](https://www.dragonflydb.io/docs/getting-started/kubernetes-operator#create-a-dragonfly-instance-with-replicas)
For example deployment see
[here](https://git.k-space.ee/k-space/kube/src/branch/master/passmower/dragonfly.yaml).
To find other instances in this repository use `grep -r "kind: Dragonfly"`
Use storage class `redis` for persistent instances.
To achieve high availabilllity use 2+ replicas with correctly configured
`topologySpreadConstraints`.
# For administrators
The operator was deployed with following snippet:
```
kubectl apply -f https://raw.githubusercontent.com/dragonflydb/dragonfly-operator/v1.1.6/manifests/dragonfly-operator.yaml
```
To upgrade refer to
[github.com/dragonflydb/dragonfly-operator](https://github.com/dragonflydb/dragonfly-operator/releases),
bump version and reapply

View File

@ -1,13 +0,0 @@
To deply:
```
kubectl apply -n drone-execution -f application.yml
```
To bootstrap secrets:
```
kubectl create secret generic -n drone-execution application-secrets \
--from-literal=DRONE_RPC_SECRET=$(kubectl get secret -n drone application-secrets -o jsonpath="{.data.DRONE_RPC_SECRET}" | base64 -d) \
--from-literal=DRONE_SECRET_PLUGIN_TOKEN=$(cat /dev/urandom | base64 | head -c 30)
```

View File

@ -1,177 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: drone-runner-kube
---
apiVersion: v1
kind: ConfigMap
metadata:
name: application-config
data:
DRONE_DEBUG: "false"
DRONE_TRACE: "false"
DRONE_NAMESPACE_DEFAULT: "drone-execution"
DRONE_RPC_HOST: "drone.k-space.ee"
DRONE_RPC_PROTO: "https"
PLUGIN_MTU: "1300"
DRONE_SECRET_PLUGIN_ENDPOINT: "http://secrets:3000"
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: drone-runner-kube
namespace: "drone-execution"
labels:
app: drone-runner-kube
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- apiGroups:
- ""
resources:
- pods
- pods/log
verbs:
- get
- create
- delete
- list
- watch
- update
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: drone-runner-kube
namespace: drone-execution
labels:
app: drone-runner-kube
subjects:
- kind: ServiceAccount
name: drone-runner-kube
namespace: drone-execution
roleRef:
kind: Role
name: drone-runner-kube
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: drone-runner-kube
labels:
app: drone-runner-kube
spec:
type: ClusterIP
ports:
- port: 3000
targetPort: http
protocol: TCP
name: http
selector:
app: drone-runner-kube
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: drone-runner-kube
annotations:
keel.sh/policy: force
keel.sh/trigger: poll
keel.sh/pollSchedule: "@midnight"
spec:
replicas: 1
selector:
matchLabels:
app: drone-runner-kube
template:
metadata:
labels:
app: drone-runner-kube
spec:
serviceAccountName: drone-runner-kube
terminationGracePeriodSeconds: 3600
containers:
- name: server
securityContext:
{}
image: drone/drone-runner-kube
imagePullPolicy: Always
ports:
- name: http
containerPort: 3000
protocol: TCP
envFrom:
- configMapRef:
name: application-config
- secretRef:
name: application-secrets
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: drone-kubernetes-secrets
annotations:
keel.sh/policy: force
keel.sh/trigger: poll
keel.sh/pollSchedule: "@midnight"
spec:
replicas: 1
selector:
matchLabels:
app: drone-kubernetes-secrets
template:
metadata:
labels:
app: drone-kubernetes-secrets
spec:
containers:
- name: secrets
image: drone/kubernetes-secrets
imagePullPolicy: Always
ports:
- containerPort: 3000
env:
- name: SECRET_KEY
valueFrom:
secretKeyRef:
name: application-secrets
key: DRONE_SECRET_PLUGIN_TOKEN
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: drone-kubernetes-secrets
spec:
podSelector:
matchLabels:
app: drone-kubernetes-secrets
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
app: drone-runner-kube
ports:
- port: 3000
---
# Following should block access to pods in other namespaces, but should permit
# Git checkout, pip install, talking to Traefik via public IP etc
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: drone-runner-kube
spec:
podSelector: {}
policyTypes:
- Egress
egress:
- to:
- ipBlock:
cidr: 0.0.0.0/0

View File

@ -1 +0,0 @@
../shared/networkpolicy-base.yml

View File

@ -1,25 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
# Chart dirs/files
docs/
ci/

View File

@ -1,155 +0,0 @@
# Deployment
To deploy:
```
kubectl apply -n drone -f application.yml
```
To bootstrap secrets:
```
kubectl create secret generic -n drone application-secrets \
--from-literal=DRONE_GITEA_CLIENT_ID=... \
--from-literal=DRONE_GITEA_CLIENT_SECRET=... \
--from-literal=DRONE_RPC_SECRET=$(cat /dev/urandom | base64 | head -c 30)
```
# Integrating with Docker registry
We use harbor.k-space.ee to host own images.
Set up robot account `robot$k-space+drone` in Harbor first.
In Drone associate `docker_username` and `docker_password` secrets with the
`k-space`.
Instead of click marathon you can also pull the CLI configuration for Drone
from https://drone.k-space.ee/account
```
drone orgsecret add k-space docker_username 'robot$k-space+drone'
drone orgsecret add k-space docker_password '...'
```
# Integrating with e-mail
To (re)set e-mail credentials:
```
drone orgsecret add k-space email_password '...'
```
To issue build hit the button in Drone web interface or alternatively:
```
drone build create k-space/...
```
# Using templates
Templates unfortunately aren't pulled in from this Git repo.
Current `docker.yaml` template includes following:
```
kind: pipeline
type: kubernetes
name: build-arm64
platform:
arch: arm64
os: linux
node_selector:
kubernetes.io/arch: arm64
tolerations:
- key: arch
operator: Equal
value: arm64
effect: NoSchedule
steps:
- name: submodules
image: alpine/git
commands:
- touch .gitmodules
- sed -i -e 's/git@git.k-space.ee:/https:\\/\\/git.k-space.ee\\//g' .gitmodules
- git submodule update --init --recursive
- echo "ENV GIT_COMMIT=$(git rev-parse HEAD)" >> Dockerfile
- echo "ENV GIT_COMMIT_TIMESTAMP=$(git log -1 --format=%cd --date=iso-strict)" >> Dockerfile
- cat Dockerfile
- name: docker
image: harbor.k-space.ee/k-space/drone-kaniko
settings:
repo: ${DRONE_REPO}
tags: latest-arm64
registry: harbor.k-space.ee
username:
from_secret: docker_username
password:
from_secret: docker_password
---
kind: pipeline
type: kubernetes
name: build-amd64
platform:
arch: amd64
os: linux
node_selector:
kubernetes.io/arch: amd64
steps:
- name: submodules
image: alpine/git
commands:
- touch .gitmodules
- sed -i -e 's/git@git.k-space.ee:/https:\\/\\/git.k-space.ee\\//g' .gitmodules
- git submodule update --init --recursive
- echo "ENV GIT_COMMIT=$(git rev-parse HEAD)" >> Dockerfile
- echo "ENV GIT_COMMIT_TIMESTAMP=$(git log -1 --format=%cd --date=iso-strict)" >> Dockerfile
- cat Dockerfile
- name: docker
image: harbor.k-space.ee/k-space/drone-kaniko
settings:
repo: ${DRONE_REPO}
tags: latest-amd64
registry: harbor.k-space.ee
storage_driver: vfs
username:
from_secret: docker_username
password:
from_secret: docker_password
---
kind: pipeline
type: kubernetes
name: manifest
steps:
- name: manifest
image: plugins/manifest
settings:
target: ${DRONE_REPO}:latest
template: ${DRONE_REPO}:latest-ARCH
platforms:
- linux/amd64
- linux/arm64
username:
from_secret: docker_username
password:
from_secret: docker_password
depends_on:
- build-amd64
- build-arm64
---
kind: pipeline
type: kubernetes
name: gitlint
steps:
- name: gitlint
image: harbor.k-space.ee/k-space/gitlint-bundle
# https://git.k-space.ee/k-space/gitlint-bundle
---
kind: pipeline
type: kubernetes
name: flake8
steps:
- name: flake8
image: harbor.k-space.ee/k-space/flake8-bundle
# https://git.k-space.ee/k-space/flake8-bundle
```

View File

@ -1,117 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: drone
spec:
type: ClusterIP
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app: drone
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: drone
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/pollSchedule: "@midnight"
spec:
serviceName: drone
replicas: 1
selector:
matchLabels:
app: drone
template:
metadata:
labels:
app: drone
spec:
automountServiceAccountToken: false
securityContext:
{}
containers:
- name: server
securityContext:
{}
image: drone/drone:2
ports:
- name: http
containerPort: 80
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
env:
- name: DRONE_GITEA_SERVER
value: https://git.k-space.ee
- name: DRONE_GIT_ALWAYS_AUTH
value: "false"
- name: DRONE_SERVER_HOST
value: drone.k-space.ee
- name: DRONE_SERVER_PROTO
value: https
- name: DRONE_USER_CREATE
value: username:lauri,admin:true
- name: DRONE_DEBUG
value: "true"
- name: DRONE_TRACE
value: "true"
envFrom:
- secretRef:
name: application-secrets
volumeMounts:
- name: drone-data
mountPath: /data
volumeClaimTemplates:
- metadata:
name: drone-data
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 8Gi
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: redirect
spec:
redirectRegex:
regex: ^https://(.*)/register$
replacement: https://${1}/
permanent: false
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: drone
annotations:
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
traefik.ingress.kubernetes.io/router.middlewares: drone-redirect@kubernetescrd
spec:
tls:
- hosts:
- "*.k-space.ee"
rules:
- host: "drone.k-space.ee"
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: drone
port:
number: 80

2
elastic-system/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
crds.yaml
operator.yaml

View File

@ -1,7 +1,7 @@
# elastic-operator
```
wget https://download.elastic.co/downloads/eck/2.4.0/crds.yaml
wget https://download.elastic.co/downloads/eck/2.4.0/operator.yaml
wget https://download.elastic.co/downloads/eck/2.13.0/crds.yaml
wget https://download.elastic.co/downloads/eck/2.13.0/operator.yaml
kubectl apply -n elastic-system -f application.yml -f crds.yaml -f operator.yaml
```

View File

@ -5,7 +5,7 @@ metadata:
name: filebeat
spec:
type: filebeat
version: 8.4.3
version: 8.14.3
elasticsearchRef:
name: elasticsearch
config:
@ -150,7 +150,7 @@ metadata:
spec:
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: 172.20.51.4
loadBalancerIP: 172.21.51.4
ports:
- name: filebeat-syslog
port: 514
@ -169,7 +169,7 @@ metadata:
spec:
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: 172.20.51.4
loadBalancerIP: 172.21.51.4
ports:
- name: filebeat-syslog
port: 514
@ -218,10 +218,12 @@ kind: Elasticsearch
metadata:
name: elasticsearch
spec:
version: 8.4.3
version: 8.14.3
nodeSets:
- name: default
count: 1
count: 2
config:
node.roles: [ "data_content", "data_hot", "ingest", "master", "remote_cluster_client", "data_cold", "remote_cluster_client" ]
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
@ -232,17 +234,13 @@ spec:
requests:
storage: 5Gi
storageClassName: longhorn
http:
tls:
selfSignedCertificate:
disabled: true
---
apiVersion: kibana.k8s.elastic.co/v1
kind: Kibana
metadata:
name: kibana
spec:
version: 8.4.3
version: 8.14.3
count: 1
elasticsearchRef:
name: elasticsearch
@ -254,23 +252,13 @@ spec:
server.publicBaseUrl: https://kibana.k-space.ee
xpack.reporting.enabled: false
xpack.apm.ui.enabled: false
xpack.security.authc.providers:
anonymous.anonymous1:
order: 0
credentials:
username: "elastic"
secureSettings:
- secretName: elasticsearch-es-elastic-user
entries:
- key: elastic
path: xpack.security.authc.providers.anonymous.anonymous1.credentials.password
podTemplate:
metadata:
annotations:
co.elastic.logs/enabled: 'false'
spec:
containers:
- name: kibana
- name: kibana
readinessProbe:
httpGet:
path: /app/home
@ -329,3 +317,28 @@ spec:
app.kubernetes.io/name: elasticsearch-exporter
podMetricsEndpoints:
- port: exporter
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kibana
annotations:
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: 'true'
spec:
tls:
- hosts:
- '*.k-space.ee'
rules:
- host: kibana.k-space.ee
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kibana-kb-http
port:
number: 5601

File diff suppressed because it is too large Load Diff

View File

@ -9,12 +9,13 @@ metadata:
# Source: eck-operator/templates/service-account.yaml
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: elastic-operator
namespace: elastic-system
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.4.0"
app.kubernetes.io/version: "2.13.0"
---
# Source: eck-operator/templates/webhook.yaml
apiVersion: v1
@ -24,7 +25,7 @@ metadata:
namespace: elastic-system
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.4.0"
app.kubernetes.io/version: "2.13.0"
---
# Source: eck-operator/templates/configmap.yaml
apiVersion: v1
@ -34,7 +35,7 @@ metadata:
namespace: elastic-system
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.4.0"
app.kubernetes.io/version: "2.13.0"
data:
eck.yaml: |-
log-verbosity: 0
@ -45,6 +46,7 @@ data:
ca-cert-rotate-before: 24h
cert-validity: 8760h
cert-rotate-before: 24h
disable-config-watch: false
exposed-node-labels: [topology.kubernetes.io/.*,failure-domain.beta.kubernetes.io/.*]
set-default-security-context: auto-detect
kube-client-timeout: 60s
@ -54,7 +56,11 @@ data:
validate-storage-class: true
enable-webhook: true
webhook-name: elastic-webhook.k8s.elastic.co
webhook-port: 9443
operator-namespace: elastic-system
enable-leader-election: true
elasticsearch-observation-interval: 10s
ubi-only: false
---
# Source: eck-operator/templates/cluster-roles.yaml
apiVersion: rbac.authorization.k8s.io/v1
@ -63,7 +69,7 @@ metadata:
name: elastic-operator
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.4.0"
app.kubernetes.io/version: "2.13.0"
rules:
- apiGroups:
- "authorization.k8s.io"
@ -151,6 +157,19 @@ rules:
- create
- update
- patch
- apiGroups:
- autoscaling.k8s.elastic.co
resources:
- elasticsearchautoscalers
- elasticsearchautoscalers/status
- elasticsearchautoscalers/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- kibana.k8s.elastic.co
resources:
@ -229,6 +248,32 @@ rules:
- create
- update
- patch
- apiGroups:
- stackconfigpolicy.k8s.elastic.co
resources:
- stackconfigpolicies
- stackconfigpolicies/status
- stackconfigpolicies/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- logstash.k8s.elastic.co
resources:
- logstashes
- logstashes/status
- logstashes/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- storage.k8s.io
resources:
@ -268,11 +313,14 @@ metadata:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
control-plane: elastic-operator
app.kubernetes.io/version: "2.4.0"
app.kubernetes.io/version: "2.13.0"
rules:
- apiGroups: ["elasticsearch.k8s.elastic.co"]
resources: ["elasticsearches"]
verbs: ["get", "list", "watch"]
- apiGroups: ["autoscaling.k8s.elastic.co"]
resources: ["elasticsearchautoscalers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apm.k8s.elastic.co"]
resources: ["apmservers"]
verbs: ["get", "list", "watch"]
@ -291,6 +339,12 @@ rules:
- apiGroups: ["maps.k8s.elastic.co"]
resources: ["elasticmapsservers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["stackconfigpolicy.k8s.elastic.co"]
resources: ["stackconfigpolicies"]
verbs: ["get", "list", "watch"]
- apiGroups: ["logstash.k8s.elastic.co"]
resources: ["logstashes"]
verbs: ["get", "list", "watch"]
---
# Source: eck-operator/templates/cluster-roles.yaml
apiVersion: rbac.authorization.k8s.io/v1
@ -301,11 +355,14 @@ metadata:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
control-plane: elastic-operator
app.kubernetes.io/version: "2.4.0"
app.kubernetes.io/version: "2.13.0"
rules:
- apiGroups: ["elasticsearch.k8s.elastic.co"]
resources: ["elasticsearches"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["autoscaling.k8s.elastic.co"]
resources: ["elasticsearchautoscalers"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["apm.k8s.elastic.co"]
resources: ["apmservers"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
@ -324,6 +381,12 @@ rules:
- apiGroups: ["maps.k8s.elastic.co"]
resources: ["elasticmapsservers"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["stackconfigpolicy.k8s.elastic.co"]
resources: ["stackconfigpolicies"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["logstash.k8s.elastic.co"]
resources: ["logstashes"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
---
# Source: eck-operator/templates/role-bindings.yaml
apiVersion: rbac.authorization.k8s.io/v1
@ -332,7 +395,7 @@ metadata:
name: elastic-operator
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.4.0"
app.kubernetes.io/version: "2.13.0"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@ -350,7 +413,7 @@ metadata:
namespace: elastic-system
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.4.0"
app.kubernetes.io/version: "2.13.0"
spec:
ports:
- name: https
@ -367,7 +430,7 @@ metadata:
namespace: elastic-system
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.4.0"
app.kubernetes.io/version: "2.13.0"
spec:
selector:
matchLabels:
@ -380,21 +443,29 @@ spec:
# Rename the fields "error" to "error.message" and "source" to "event.source"
# This is to avoid a conflict with the ECS "error" and "source" documents.
"co.elastic.logs/raw": "[{\"type\":\"container\",\"json.keys_under_root\":true,\"paths\":[\"/var/log/containers/*${data.kubernetes.container.id}.log\"],\"processors\":[{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"error\",\"to\":\"_error\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"_error\",\"to\":\"error.message\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"source\",\"to\":\"_source\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"_source\",\"to\":\"event.source\"}]}}]}]"
"checksum/config": a99a5f63f628a1ca8df440c12506cdfbf17827a1175dc5765b05f22f92b12b95
"checksum/config": 8b10381ca4067cf2c56aecc94c799473b09486202e146d2d7e5d6714f4c2e533
labels:
control-plane: elastic-operator
spec:
terminationGracePeriodSeconds: 10
serviceAccountName: elastic-operator
automountServiceAccountToken: true
securityContext:
runAsNonRoot: true
containers:
- image: "docker.elastic.co/eck/eck-operator:2.4.0"
- image: "docker.elastic.co/eck/eck-operator:2.13.0"
imagePullPolicy: IfNotPresent
name: manager
args:
- "manager"
- "--config=/conf/eck.yaml"
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
env:
- name: OPERATOR_NAMESPACE
valueFrom:
@ -440,10 +511,9 @@ metadata:
name: elastic-webhook.k8s.elastic.co
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.4.0"
app.kubernetes.io/version: "2.13.0"
webhooks:
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
@ -451,7 +521,7 @@ webhooks:
failurePolicy: Ignore
name: elastic-agent-validation-v1alpha1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
@ -464,7 +534,6 @@ webhooks:
resources:
- agents
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
@ -472,7 +541,7 @@ webhooks:
failurePolicy: Ignore
name: elastic-apm-validation-v1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
@ -485,7 +554,6 @@ webhooks:
resources:
- apmservers
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
@ -493,7 +561,7 @@ webhooks:
failurePolicy: Ignore
name: elastic-apm-validation-v1beta1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
@ -506,7 +574,6 @@ webhooks:
resources:
- apmservers
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
@ -514,7 +581,7 @@ webhooks:
failurePolicy: Ignore
name: elastic-beat-validation-v1beta1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
@ -527,7 +594,6 @@ webhooks:
resources:
- beats
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
@ -535,7 +601,7 @@ webhooks:
failurePolicy: Ignore
name: elastic-ent-validation-v1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
@ -548,7 +614,6 @@ webhooks:
resources:
- enterprisesearches
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
@ -556,7 +621,7 @@ webhooks:
failurePolicy: Ignore
name: elastic-ent-validation-v1beta1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
@ -569,7 +634,6 @@ webhooks:
resources:
- enterprisesearches
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
@ -577,7 +641,7 @@ webhooks:
failurePolicy: Ignore
name: elastic-es-validation-v1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
@ -590,7 +654,6 @@ webhooks:
resources:
- elasticsearches
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
@ -598,7 +661,7 @@ webhooks:
failurePolicy: Ignore
name: elastic-es-validation-v1beta1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
@ -611,7 +674,26 @@ webhooks:
resources:
- elasticsearches
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-ems-k8s-elastic-co-v1alpha1-mapsservers
failurePolicy: Ignore
name: elastic-ems-validation-v1alpha1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
- maps.k8s.elastic.co
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- mapsservers
- clientConfig:
service:
name: elastic-webhook-server
namespace: elastic-system
@ -619,7 +701,7 @@ webhooks:
failurePolicy: Ignore
name: elastic-kb-validation-v1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
@ -632,7 +714,6 @@ webhooks:
resources:
- kibanas
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
@ -640,7 +721,7 @@ webhooks:
failurePolicy: Ignore
name: elastic-kb-validation-v1beta1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
@ -652,4 +733,64 @@ webhooks:
- UPDATE
resources:
- kibanas
- clientConfig:
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-autoscaling-k8s-elastic-co-v1alpha1-elasticsearchautoscaler
failurePolicy: Ignore
name: elastic-esa-validation-v1alpha1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
- autoscaling.k8s.elastic.co
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- elasticsearchautoscalers
- clientConfig:
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-scp-k8s-elastic-co-v1alpha1-stackconfigpolicies
failurePolicy: Ignore
name: elastic-scp-validation-v1alpha1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
- stackconfigpolicy.k8s.elastic.co
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- stackconfigpolicies
- clientConfig:
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-logstash-k8s-elastic-co-v1alpha1-logstash
failurePolicy: Ignore
name: elastic-logstash-validation-v1alpha1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
- logstash.k8s.elastic.co
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- logstashes

View File

@ -1,4 +1,15 @@
To apply changes:
# Etherpad namespace
# For users
Etherpad is a simple publicly available application for taking notes
running at [pad.k-space.ee](https://pad.k-space.ee/)
# For administrators
This application is managed by [ArgoCD](https://argocd.k-space.ee/applications/argocd/etherpad)
In case ArgoCD is broken you can manually deploy changes with:
```
kubectl apply -n etherpad -f application.yml

View File

@ -1,8 +1,8 @@
---
apiVersion: codemowers.io/v1alpha1
kind: OIDCGWMiddlewareClient
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: sso
name: etherpad
spec:
displayName: Etherpad
uri: 'https://pad.k-space.ee/'
@ -12,13 +12,10 @@ kind: StatefulSet
metadata:
name: etherpad
namespace: etherpad
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/pollSchedule: "@midnight"
spec:
# Etherpad does NOT support running multiple replicas due to
# in-application caching https://github.com/ether/etherpad-lite/issues/3680
revisionHistoryLimit: 0
replicas: 1
serviceName: etherpad
selector:
@ -31,7 +28,7 @@ spec:
spec:
containers:
- name: etherpad
image: etherpad/etherpad:1
image: etherpad/etherpad:2
securityContext:
# Etherpad writes session key during start
readOnlyRootFilesystem: false
@ -79,8 +76,8 @@ spec:
selector:
app: etherpad
ports:
- protocol: TCP
port: 9001
- protocol: TCP
port: 9001
---
apiVersion: networking.k8s.io/v1
kind: Ingress
@ -94,19 +91,19 @@ metadata:
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: pad.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: etherpad
port:
number: 9001
- host: pad.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: etherpad
port:
number: 9001
tls:
- hosts:
- "*.k-space.ee"
- hosts:
- "*.k-space.ee"
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy

View File

@ -1,5 +1,20 @@
# Freescout
# For user
Freescout scrapes `info@k-space.ee` and `accounting@k-space.ee` mailboxes
from Wildduck and builds issue tracker on top the mailbox.
The Freescout user interface is accessible at
[freescout.k-space.ee](https://freescout.k-space.ee/)
Note that Freescout notifications are sent to `@k-space.ee` mailboxes.
Forwarding to personal eg. `@gmail.com` mailbox can be configured via
[Wildduck webmail](https://webmail.k-space.ee/account/profile)
# For administrator
This application is managed by [ArgoCD](https://argocd.k-space.ee/applications/argocd/freescout)
Should ArgoCD be down manifests here can be applied with:
@ -7,3 +22,9 @@ Should ArgoCD be down manifests here can be applied with:
```
kubectl apply -n freescout -f application.yaml
```
If the Kubernetes cronjob for picking up mail is not working for more than
3 days the mails will not get synced by default. To manually synchronize
Freescout head to [Freescout system tools](https://freescout.k-space.ee/system/tools)
page, increase `Days` to appropriate number and hit `Fetch Emails` button.
Select `All` if some mails have been opened via Wildduck Webmail during debug process.

View File

@ -1,8 +1,9 @@
---
apiVersion: codemowers.io/v1alpha1
kind: OIDCGWMiddlewareClient
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: freescout
namespace: freescout
spec:
displayName: Freescout Middleware
uri: 'https://freescout.k-space.ee'
@ -14,10 +15,11 @@ spec:
name: Remote-Name
user: Remote-User
---
apiVersion: codemowers.io/v1alpha1
kind: OIDCGWClient
apiVersion: codemowers.cloud/v1beta1
kind: OIDCClient
metadata:
name: freescout
namespace: freescout
spec:
displayName: Freescout
uri: https://freescout.k-space.ee
@ -34,11 +36,78 @@ spec:
- openid
- profile
pkce: false
secretRefreshPod:
apiVersion: v1
kind: Pod
spec:
volumes:
- name: tmp
emptyDir: {}
initContainers:
- name: jq
image: >-
alpine/k8s:1.24.16@sha256:06f8942d87fa17b40795bb9a8eff029a9be3fc3c9bcc13d62071de4cc3324153
command:
- /bin/bash
- '-c'
- >-
rm -fv /tmp/update.sql; jq
'{"name":"oauth.client_id","value":$ENV.OIDC_CLIENT_ID} | "UPDATE
options SET value=\(.value|tostring|@sh) WHERE
name=\(.name|tostring|@sh) LIMIT 1;"' -n -r >> /tmp/update.sql; jq
'{"name":"oauth.client_secret","value":$ENV.OIDC_CLIENT_SECRET} |
"UPDATE options SET value=\(.value|tostring|@sh) WHERE
name=\(.name|tostring|@sh) LIMIT 1;"' -n -r >> /tmp/update.sql; jq
'{"name":"oauth.auth_url","value":$ENV.OIDC_IDP_AUTH_URI} |
"UPDATE options SET value=\(.value + "?scope=openid+profile"
|tostring|@sh) WHERE name=\(.name|tostring|@sh) LIMIT 1;"' -n -r
>> /tmp/update.sql; jq
'{"name":"oauth.token_url","value":$ENV.OIDC_IDP_TOKEN_URI} |
"UPDATE options SET value=\(.value|tostring|@sh) WHERE
name=\(.name|tostring|@sh) LIMIT 1;"' -n -r >> /tmp/update.sql; jq
'{"name":"oauth.user_url","value":$ENV.OIDC_IDP_USERINFO_URI}
| "UPDATE options SET value=\(.value|tostring|@sh) WHERE
name=\(.name|tostring|@sh) LIMIT 1;"' -n -r >> /tmp/update.sql;
cat /tmp/update.sql
envFrom:
- secretRef:
name: oidc-client-freescout-owner-secrets
resources: {}
volumeMounts:
- name: tmp
mountPath: /tmp
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
containers:
- name: mysql
image: mysql
command:
- /bin/bash
- '-c'
- >-
mysql -u kspace_freescout kspace_freescout -h 172.20.36.1
-p${MYSQL_PWD} < /tmp/update.sql
env:
- name: MYSQL_PWD
valueFrom:
secretKeyRef:
name: freescout-secrets
key: DB_PASS
resources: {}
volumeMounts:
- name: tmp
mountPath: /tmp
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: OnFailure
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: oidc-gateway
name: freescout
namespace: freescout
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
@ -65,6 +134,7 @@ apiVersion: v1
kind: Service
metadata:
name: freescout
namespace: freescout
spec:
type: ClusterIP
selector:
@ -78,9 +148,11 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: freescout
namespace: freescout
labels:
app: freescout
spec:
revisionHistoryLimit: 0
selector:
matchLabels:
app: freescout
@ -92,7 +164,7 @@ spec:
spec:
containers:
- name: freescout
image: harbor.k-space.ee/k-space/freescout@sha256:de1a6c8bd1f285f6f6c61aa48921a884fe7a1496655b31c9536805397c01ee58
image: harbor.k-space.ee/k-space/freescout
ports:
- containerPort: 8080
env:
@ -143,17 +215,18 @@ apiVersion: batch/v1
kind: CronJob
metadata:
name: freescout-cron
namespace: freescout
spec:
schedule: "0,30 * * * *" # Should be every minute in theory, keeps hanging
schedule: "0,30 * * * *" # Should be every minute in theory, keeps hanging
jobTemplate:
spec:
activeDeadlineSeconds: 1800 # this is unholy https://github.com/freescout-helpdesk/freescout/blob/dist/app/Console/Kernel.php
activeDeadlineSeconds: 1800 # this is unholy https://github.com/freescout-helpdesk/freescout/blob/dist/app/Console/Kernel.php
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: freescout-cron
image: harbor.k-space.ee/k-space/freescout@sha256:de1a6c8bd1f285f6f6c61aa48921a884fe7a1496655b31c9536805397c01ee58
image: harbor.k-space.ee/k-space/freescout
imagePullPolicy: Always
command:
- php
@ -208,6 +281,7 @@ apiVersion: codemowers.cloud/v1beta1
kind: MinioBucketClaim
metadata:
name: attachments
namespace: freescout
spec:
capacity: 10Gi
class: external
@ -216,14 +290,15 @@ apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: freescout
namespace: freescout
spec:
groups:
- name: freescout
rules:
- alert: FreescoutSyncBroken
expr: time() - wildduck_last_login{email=~"(info|accounting)@k-space.ee"} > 300
for: 10m
labels:
severity: warning
annotations:
summary: Freescout mailbox synchronization is broken
- alert: FreescoutSyncBroken
expr: time() - wildduck_last_login{email=~"(info|accounting)@k-space.ee"} > 300
for: 10m
labels:
severity: warning
annotations:
summary: Freescout mailbox synchronization is broken

View File

@ -1,50 +0,0 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: reset-oidc-config
spec:
template:
spec:
volumes:
- name: tmp
emptyDir: {}
initContainers:
- name: jq
image: alpine/k8s:1.24.16@sha256:06f8942d87fa17b40795bb9a8eff029a9be3fc3c9bcc13d62071de4cc3324153
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /tmp
name: tmp
envFrom:
- secretRef:
name: oidc-client-freescout-owner-secrets
command:
- /bin/bash
- -c
- rm -fv /tmp/update.sql;
jq '{"name":"oauth.client_id","value":$ENV.OIDC_CLIENT_ID} | "UPDATE options SET value=\(.value|tostring|@sh) WHERE name=\(.name|tostring|@sh) LIMIT 1;"' -n -r >> /tmp/update.sql;
jq '{"name":"oauth.client_secret","value":$ENV.OIDC_CLIENT_SECRET} | "UPDATE options SET value=\(.value|tostring|@sh) WHERE name=\(.name|tostring|@sh) LIMIT 1;"' -n -r >> /tmp/update.sql;
jq '{"name":"oauth.auth_url","value":$ENV.OIDC_GATEWAY_AUTH_URI} | "UPDATE options SET value=\(.value + "?scope=openid+profile" |tostring|@sh) WHERE name=\(.name|tostring|@sh) LIMIT 1;"' -n -r >> /tmp/update.sql;
jq '{"name":"oauth.token_url","value":$ENV.OIDC_GATEWAY_TOKEN_URI} | "UPDATE options SET value=\(.value|tostring|@sh) WHERE name=\(.name|tostring|@sh) LIMIT 1;"' -n -r >> /tmp/update.sql;
jq '{"name":"oauth.user_url","value":$ENV.OIDC_GATEWAY_USERINFO_URI} | "UPDATE options SET value=\(.value|tostring|@sh) WHERE name=\(.name|tostring|@sh) LIMIT 1;"' -n -r >> /tmp/update.sql;
cat /tmp/update.sql
containers:
- name: mysql
image: mysql
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /tmp
name: tmp
env:
- name: MYSQL_PWD
valueFrom:
secretKeyRef:
name: freescout-secrets
key: DB_PASS
command:
- /bin/bash
- -c
- mysql -u kspace_freescout kspace_freescout -h 172.20.36.1 -p${MYSQL_PWD} < /tmp/update.sql
restartPolicy: OnFailure
backoffLimit: 4

1
freeswitch/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
PASSWORDS.xml

View File

@ -0,0 +1,14 @@
<include>
<X-PRE-PROCESS cmd="set" data="default_password=">
<X-PRE-PROCESS cmd="set" data="ipcall_password="/>
<X-PRE-PROCESS cmd="set" data="1000_password="/>
<X-PRE-PROCESS cmd="set" data="1001_password="/>
<X-PRE-PROCESS cmd="set" data="1002_password="/>
<X-PRE-PROCESS cmd="set" data="1003_password="/>
<X-PRE-PROCESS cmd="set" data="1004_password="/>
<X-PRE-PROCESS cmd="set" data="1005_password="/>
<X-PRE-PROCESS cmd="set" data="1006_password="/>
<X-PRE-PROCESS cmd="set" data="1007_password="/>
<X-PRE-PROCESS cmd="set" data="1008_password="/>
<X-PRE-PROCESS cmd="set" data="1009_password="/>
</include>

3
freeswitch/README.md Normal file
View File

@ -0,0 +1,3 @@
```
kubectl -n freeswitch create secret generic freeswitch-passwords --from-file freeswitch/PASSWORDS.xml
```

567
freeswitch/application.yaml Normal file
View File

@ -0,0 +1,567 @@
apiVersion: v1
kind: Service
metadata:
name: freeswitch
namespace: freeswitch
annotations:
external-dns.alpha.kubernetes.io/hostname: freeswitch.k-space.ee
metallb.universe.tf/address-pool: eenet
metallb.universe.tf/ip-allocated-from-pool: eenet
spec:
ports:
- name: sip-internal-udp
protocol: UDP
port: 5060
targetPort: 5060
nodePort: 31787
- name: sip-nat-udp
protocol: UDP
port: 5070
targetPort: 5070
nodePort: 32241
- name: sip-external-udp
protocol: UDP
port: 5080
targetPort: 5080
nodePort: 31354
- name: sip-data-10000
protocol: UDP
port: 10000
targetPort: 10000
nodePort: 30786
- name: sip-data-10001
protocol: UDP
port: 10001
targetPort: 10001
nodePort: 31788
- name: sip-data-10002
protocol: UDP
port: 10002
targetPort: 10002
nodePort: 30247
- name: sip-data-10003
protocol: UDP
port: 10003
targetPort: 10003
nodePort: 32389
- name: sip-data-10004
protocol: UDP
port: 10004
targetPort: 10004
nodePort: 30723
- name: sip-data-10005
protocol: UDP
port: 10005
targetPort: 10005
nodePort: 30295
- name: sip-data-10006
protocol: UDP
port: 10006
targetPort: 10006
nodePort: 30782
- name: sip-data-10007
protocol: UDP
port: 10007
targetPort: 10007
nodePort: 32165
- name: sip-data-10008
protocol: UDP
port: 10008
targetPort: 10008
nodePort: 30282
- name: sip-data-10009
protocol: UDP
port: 10009
targetPort: 10009
nodePort: 31325
- name: sip-data-10010
protocol: UDP
port: 10010
targetPort: 10010
nodePort: 31234
selector:
app: freeswitch
type: LoadBalancer
externalTrafficPolicy: Local
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
internalTrafficPolicy: Cluster
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: freeswitch-sounds
namespace: freeswitch
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi
storageClassName: longhorn
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: freeswitch
namespace: freeswitch
labels:
app: freeswitch
annotations:
reloader.stakater.com/auto: "true"
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: freeswitch
template:
metadata:
labels:
app: freeswitch
spec:
volumes:
- name: config
configMap:
name: freeswitch-config
defaultMode: 420
- name: directory
configMap:
name: freeswitch-directory
defaultMode: 420
- name: sounds
persistentVolumeClaim:
claimName: freeswitch-sounds
- name: passwords
secret:
secretName: freeswitch-passwords
containers:
- name: freeswitch
image: dheaps/freeswitch
env:
- name: SOUND_TYPES
value: en-us-callie
- name: SOUND_RATES
value: "32000"
resources: {}
volumeMounts:
- name: config
mountPath: /etc/freeswitch/sip_profiles/external/ipcall.xml
subPath: ipcall.xml
- name: config
mountPath: /etc/freeswitch/dialplan/default/00_outbound_ipcall.xml
subPath: 00_outbound_ipcall.xml
- name: config
mountPath: /etc/freeswitch/dialplan/public.xml
subPath: dialplan.xml
- name: config
mountPath: /etc/freeswitch/autoload_configs/switch.conf.xml
subPath: switch.xml
- name: config
mountPath: /etc/freeswitch/vars.xml
subPath: vars.xml
- name: passwords
mountPath: /etc/freeswitch/PASSWORDS.xml
subPath: PASSWORDS.xml
- name: directory
mountPath: /etc/freeswitch/directory/default
- name: sounds
mountPath: /usr/share/freeswitch/sounds
---
apiVersion: v1
kind: ConfigMap
metadata:
name: freeswitch-config
namespace: freeswitch
data:
dialplan.xml: |
<!--
NOTICE:
This context is usually accessed via the external sip profile listening on port 5080.
It is recommended to have separate inbound and outbound contexts. Not only for security
but clearing up why you would need to do such a thing. You don't want outside un-authenticated
callers hitting your default context which allows dialing calls thru your providers and results
in Toll Fraud.
-->
<!-- http://wiki.freeswitch.org/wiki/Dialplan_XML -->
<include>
<context name="public">
<extension name="unloop">
<condition field="${unroll_loops}" expression="^true$"/>
<condition field="${sip_looped_call}" expression="^true$">
<action application="deflect" data="${destination_number}"/>
</condition>
</extension>
<!--
Tag anything pass thru here as an outside_call so you can make sure not
to create any routing loops based on the conditions that it came from
the outside of the switch.
-->
<extension name="outside_call" continue="true">
<condition>
<action application="set" data="outside_call=true"/>
<action application="export" data="RFC2822_DATE=${strftime(%a, %d %b %Y %T %z)}"/>
</condition>
</extension>
<extension name="call_debug" continue="true">
<condition field="${call_debug}" expression="^true$" break="never">
<action application="info"/>
</condition>
</extension>
<extension name="public_extensions">
<condition field="destination_number" expression="^(10[01][0-9])$">
<action application="transfer" data="$1 XML default"/>
</condition>
</extension>
<extension name="public_conference_extensions">
<condition field="destination_number" expression="^(3[5-8][01][0-9])$">
<action application="transfer" data="$1 XML default"/>
</condition>
</extension>
<!--
You can place files in the public directory to get included.
-->
<X-PRE-PROCESS cmd="include" data="public/*.xml"/>
<!--
If you have made it this far lets challenge the caller and if they authenticate
lets try what they dialed in the default context. (commented out by default)
-->
<!-- TODO:
<extension name="check_auth" continue="true">
<condition field="${sip_authorized}" expression="^true$" break="never">
<anti-action application="respond" data="407"/>
</condition>
</extension>
-->
<extension name="transfer_to_default">
<condition>
<!-- TODO: proper ring grouping -->
<action application="bridge" data="user/1004@freeswitch.k-space.ee,user/1003@freeswitch.k-space.ee,sofia/gateway/ipcall/53543824"/>
</condition>
</extension>
</context>
</include>
ipcall.xml: |
<include>
<gateway name="ipcall">
<param name="proxy" value="sip.ipcall.ee"/>
<param name="register" value="true"/>
<param name="realm" value="sip.ipcall.ee"/>
<param name="username" value="6659652"/>
<param name="password" value="$${ipcall_password}"/>
<param name="from-user" value="6659652"/>
<param name="from-domain" value="sip.ipcall.ee"/>
<param name="extension" value="ring_group/default"/>
</gateway>
</include>
00_outbound_ipcall.xml: |
<extension name="outbound">
<!-- TODO: check toll_allow ? -->
<condition field="destination_number" expression="^(\d+)$">
<action application="set" data="sip_invite_domain=sip.ipcall.ee"/>
<action application="bridge" data="sofia/gateway/ipcall/${destination_number}"/>
</condition>
</extension>
switch.xml: |
<configuration name="switch.conf" description="Core Configuration">
<cli-keybindings>
<key name="1" value="help"/>
<key name="2" value="status"/>
<key name="3" value="show channels"/>
<key name="4" value="show calls"/>
<key name="5" value="sofia status"/>
<key name="6" value="reloadxml"/>
<key name="7" value="console loglevel 0"/>
<key name="8" value="console loglevel 7"/>
<key name="9" value="sofia status profile internal"/>
<key name="10" value="sofia profile internal siptrace on"/>
<key name="11" value="sofia profile internal siptrace off"/>
<key name="12" value="version"/>
</cli-keybindings>
<default-ptimes>
</default-ptimes>
<settings>
<param name="colorize-console" value="true"/>
<param name="dialplan-timestamps" value="false"/>
<param name="max-db-handles" value="50"/>
<param name="db-handle-timeout" value="10"/>
<param name="max-sessions" value="1000"/>
<param name="sessions-per-second" value="30"/>
<param name="loglevel" value="debug"/>
<param name="mailer-app" value="sendmail"/>
<param name="mailer-app-args" value="-t"/>
<param name="dump-cores" value="yes"/>
<param name="rtp-start-port" value="10000"/>
<param name="rtp-end-port" value="10010"/>
</settings>
</configuration>
vars.xml: |
<include>
<X-PRE-PROCESS cmd="set" data="disable_system_api_commands=true"/>
<X-PRE-PROCESS cmd="set" data="sound_prefix=$${sounds_dir}/en/us/callie"/>
<X-PRE-PROCESS cmd="set" data="domain=freeswitch.k-space.ee"/>
<X-PRE-PROCESS cmd="set" data="domain_name=$${domain}"/>
<X-PRE-PROCESS cmd="set" data="hold_music=local_stream://moh"/>
<X-PRE-PROCESS cmd="set" data="use_profile=external"/>
<X-PRE-PROCESS cmd="set" data="rtp_sdes_suites=AEAD_AES_256_GCM_8|AEAD_AES_128_GCM_8|AES_CM_256_HMAC_SHA1_80|AES_CM_192_HMAC_SHA1_80|AES_CM_128_HMAC_SHA1_80|AES_CM_256_HMAC_SHA1_32|AES_CM_192_HMAC_SHA1_32|AES_CM_128_HMAC_SHA1_32|AES_CM_128_NULL_AUTH"/>
<X-PRE-PROCESS cmd="set" data="global_codec_prefs=OPUS,G722,PCMU,PCMA,H264,VP8"/>
<X-PRE-PROCESS cmd="set" data="outbound_codec_prefs=OPUS,G722,PCMU,PCMA,H264,VP8"/>
<X-PRE-PROCESS cmd="set" data="xmpp_client_profile=xmppc"/>
<X-PRE-PROCESS cmd="set" data="xmpp_server_profile=xmpps"/>
<X-PRE-PROCESS cmd="set" data="bind_server_ip=auto"/>
<X-PRE-PROCESS cmd="stun-set" data="external_rtp_ip=host:freeswitch.k-space.ee"/>
<X-PRE-PROCESS cmd="stun-set" data="external_sip_ip=host:freeswitch.k-space.ee"/>
<X-PRE-PROCESS cmd="set" data="unroll_loops=true"/>
<X-PRE-PROCESS cmd="set" data="outbound_caller_name=FreeSWITCH"/>
<X-PRE-PROCESS cmd="set" data="outbound_caller_id=0000000000"/>
<X-PRE-PROCESS cmd="set" data="call_debug=false"/>
<X-PRE-PROCESS cmd="set" data="console_loglevel=info"/>
<X-PRE-PROCESS cmd="set" data="default_areacode=372"/>
<X-PRE-PROCESS cmd="set" data="default_country=EE"/>
<X-PRE-PROCESS cmd="set" data="presence_privacy=false"/>
<X-PRE-PROCESS cmd="set" data="au-ring=%(400,200,383,417);%(400,2000,383,417)"/>
<X-PRE-PROCESS cmd="set" data="be-ring=%(1000,3000,425)"/>
<X-PRE-PROCESS cmd="set" data="ca-ring=%(2000,4000,440,480)"/>
<X-PRE-PROCESS cmd="set" data="cn-ring=%(1000,4000,450)"/>
<X-PRE-PROCESS cmd="set" data="cy-ring=%(1500,3000,425)"/>
<X-PRE-PROCESS cmd="set" data="cz-ring=%(1000,4000,425)"/>
<X-PRE-PROCESS cmd="set" data="de-ring=%(1000,4000,425)"/>
<X-PRE-PROCESS cmd="set" data="dk-ring=%(1000,4000,425)"/>
<X-PRE-PROCESS cmd="set" data="dz-ring=%(1500,3500,425)"/>
<X-PRE-PROCESS cmd="set" data="eg-ring=%(2000,1000,475,375)"/>
<X-PRE-PROCESS cmd="set" data="es-ring=%(1500,3000,425)"/>
<X-PRE-PROCESS cmd="set" data="fi-ring=%(1000,4000,425)"/>
<X-PRE-PROCESS cmd="set" data="fr-ring=%(1500,3500,440)"/>
<X-PRE-PROCESS cmd="set" data="hk-ring=%(400,200,440,480);%(400,3000,440,480)"/>
<X-PRE-PROCESS cmd="set" data="hu-ring=%(1250,3750,425)"/>
<X-PRE-PROCESS cmd="set" data="il-ring=%(1000,3000,400)"/>
<X-PRE-PROCESS cmd="set" data="in-ring=%(400,200,425,375);%(400,2000,425,375)"/>
<X-PRE-PROCESS cmd="set" data="jp-ring=%(1000,2000,420,380)"/>
<X-PRE-PROCESS cmd="set" data="ko-ring=%(1000,2000,440,480)"/>
<X-PRE-PROCESS cmd="set" data="pk-ring=%(1000,2000,400)"/>
<X-PRE-PROCESS cmd="set" data="pl-ring=%(1000,4000,425)"/>
<X-PRE-PROCESS cmd="set" data="ro-ring=%(1850,4150,475,425)"/>
<X-PRE-PROCESS cmd="set" data="rs-ring=%(1000,4000,425)"/>
<X-PRE-PROCESS cmd="set" data="ru-ring=%(800,3200,425)"/>
<X-PRE-PROCESS cmd="set" data="sa-ring=%(1200,4600,425)"/>
<X-PRE-PROCESS cmd="set" data="tr-ring=%(2000,4000,450)"/>
<X-PRE-PROCESS cmd="set" data="uk-ring=%(400,200,400,450);%(400,2000,400,450)"/>
<X-PRE-PROCESS cmd="set" data="us-ring=%(2000,4000,440,480)"/>
<X-PRE-PROCESS cmd="set" data="bong-ring=v=-7;%(100,0,941.0,1477.0);v=-7;>=2;+=.1;%(1400,0,350,440)"/>
<X-PRE-PROCESS cmd="set" data="beep=%(1000,0,640)"/>
<X-PRE-PROCESS cmd="set" data="sit=%(274,0,913.8);%(274,0,1370.6);%(380,0,1776.7)"/>
<X-PRE-PROCESS cmd="set" data="df_us_ssn=(?!219099999|078051120)(?!666|000|9\d{2})\d{3}(?!00)\d{2}(?!0{4})\d{4}"/>
<X-PRE-PROCESS cmd="set" data="df_luhn=?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|6(?:011|5[0-9]{2})[0-9]{12}|(?:2131|1800|35\d{3})\d{11}"/>
<XX-PRE-PROCESS cmd="set" data="digits_dialed_filter=(($${df_luhn})|($${df_us_ssn}))"/>
<X-PRE-PROCESS cmd="set" data="default_provider=sip.ipcall.ee"/>
<X-PRE-PROCESS cmd="set" data="default_provider_username="/>
<X-PRE-PROCESS cmd="set" data="default_provider_password="/>
<X-PRE-PROCESS cmd="set" data="default_provider_from_domain=sip.ipcall.ee"/>
<X-PRE-PROCESS cmd="set" data="default_provider_register=true"/>
<X-PRE-PROCESS cmd="set" data="default_provider_contact=1004"/>
<X-PRE-PROCESS cmd="set" data="sip_tls_version=tlsv1,tlsv1.1,tlsv1.2"/>
<X-PRE-PROCESS cmd="set" data="sip_tls_ciphers=ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH"/>
<X-PRE-PROCESS cmd="set" data="internal_auth_calls=true"/>
<X-PRE-PROCESS cmd="set" data="internal_sip_port=5060"/>
<X-PRE-PROCESS cmd="set" data="internal_tls_port=5061"/>
<X-PRE-PROCESS cmd="set" data="internal_ssl_enable=false"/>
<X-PRE-PROCESS cmd="set" data="external_auth_calls=false"/>
<X-PRE-PROCESS cmd="set" data="external_sip_port=5080"/>
<X-PRE-PROCESS cmd="set" data="external_tls_port=5081"/>
<X-PRE-PROCESS cmd="set" data="external_ssl_enable=false"/>
<X-PRE-PROCESS cmd="set" data="rtp_video_max_bandwidth_in=3mb"/>
<X-PRE-PROCESS cmd="set" data="rtp_video_max_bandwidth_out=3mb"/>
<X-PRE-PROCESS cmd="set" data="suppress_cng=true"/>
<X-PRE-PROCESS cmd="set" data="rtp_liberal_dtmf=true"/>
<X-PRE-PROCESS cmd="set" data="video_mute_png=$${images_dir}/default-mute.png"/>
<X-PRE-PROCESS cmd="set" data="video_no_avatar_png=$${images_dir}/default-avatar.png"/>
<X-PRE-PROCESS cmd="include" data="PASSWORDS.xml"/>
</include>
---
apiVersion: v1
kind: ConfigMap
metadata:
name: freeswitch-directory
namespace: freeswitch
data:
1000.xml: |
<include>
<user id="1000">
<params>
<param name="password" value="$${1000_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1000"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1000"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1001.xml: |
<include>
<user id="1001">
<params>
<param name="password" value="$${1001_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1001"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1001"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1002.xml: |
<include>
<user id="1002">
<params>
<param name="password" value="$${1002_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1002"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1002"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1003.xml: |
<include>
<user id="1003">
<params>
<param name="password" value="$${1003_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1003"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value="Erki A"/>
<variable name="effective_caller_id_number" value="1003"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1004.xml: |
<include>
<user id="1004">
<params>
<param name="password" value="$${1004_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1004"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value="Erki A"/>
<variable name="effective_caller_id_number" value="1004"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1005.xml: |
<include>
<user id="1005">
<params>
<param name="password" value="$${1005_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1005"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1005"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1006.xml: |
<include>
<user id="1006">
<params>
<param name="password" value="$${1006_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1006"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1006"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1007.xml: |
<include>
<user id="1007">
<params>
<param name="password" value="$${1007_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1007"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1007"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1008.xml: |
<include>
<user id="1008">
<params>
<param name="password" value="$${1008_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1008"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1008"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>
1009.xml: |
<include>
<user id="1009">
<params>
<param name="password" value="$${1009_password}"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,local"/>
<variable name="accountcode" value="1009"/>
<variable name="user_context" value="default"/>
<variable name="effective_caller_id_name" value=""/>
<variable name="effective_caller_id_number" value="1009"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
</variables>
</user>
</include>

View File

@ -0,0 +1,49 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: freeswitch
spec:
podSelector:
matchLabels:
app: freeswitch
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: monitoring
podSelector:
matchLabels:
app.kubernetes.io/name: prometheus
- from:
- ipBlock:
cidr: 100.101.0.0/16
- from:
- ipBlock:
cidr: 100.102.0.0/16
- from:
- ipBlock:
cidr: 81.90.125.224/32 # Lauri home
- from:
- ipBlock:
cidr: 172.20.8.241/32 # Erki A
- from:
- ipBlock:
cidr: 212.47.211.10/32 # Elisa SIP
- from:
- ipBlock:
cidr: 212.47.211.10/32 # Elisa SIP
egress:
- to:
- ipBlock:
cidr: 212.47.211.10/32 # Elisa SIP
- to:
- ipBlock:
cidr: 195.222.16.38/32 # Elisa SIP
- to:
ports:
- port: 53
protocol: UDP

4
frigate/README.md Normal file
View File

@ -0,0 +1,4 @@
```
helm repo add blakeblackshear https://blakeblackshear.github.io/blakeshome-charts/
helm template -n frigate --release-name frigate blakeblackshear/frigate --include-crds -f values.yaml > application.yml
```

279
frigate/application.yml Normal file
View File

@ -0,0 +1,279 @@
---
# Source: frigate/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: frigate
namespace: frigate
labels:
app.kubernetes.io/name: frigate
helm.sh/chart: frigate-7.6.0
app.kubernetes.io/instance: frigate
app.kubernetes.io/managed-by: Helm
data:
config.yml: |
mqtt:
host: frigate-mqtt
port: 1883
topic_prefix: frigate
client_id: frigate
user: '{FRIGATE_MQTT_USERNAME}'
password: '{FRIGATE_MQTT_PASSWORD}'
stats_interval: 60
detectors:
# coral:
# type: edgetpu
# device: usb
#cpu1:
#type: cpu
ov:
type: openvino
device: CPU
model:
width: 300
height: 300
input_tensor: nhwc
input_pixel_format: bgr
path: /openvino-model/ssdlite_mobilenet_v2.xml
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
record:
enabled: True
retain:
days: 3
mode: motion
events:
retain:
default: 30
mode: motion
cameras:
server_room:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/server_room
roles:
- detect
- rtmp
- record
chaos:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/chaos
roles:
- detect
- rtmp
- record
cyber:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/cyber
roles:
- detect
- rtmp
- record
workshop:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/workshop
roles:
- detect
- rtmp
- record
---
# Source: frigate/templates/config-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: frigate-config
labels:
app.kubernetes.io/name: frigate
helm.sh/chart: frigate-7.6.0
app.kubernetes.io/instance: frigate
app.kubernetes.io/managed-by: Helm
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "100Mi"
storageClassName: "frigate-config"
---
# Source: frigate/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: frigate
labels:
app.kubernetes.io/name: frigate
helm.sh/chart: frigate-7.6.0
app.kubernetes.io/instance: frigate
app.kubernetes.io/version: "0.14.0"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ipFamilyPolicy: SingleStack
ports:
- name: http
port: 5000
protocol: TCP
targetPort: http
- name: http-auth
port: 8971
protocol: TCP
targetPort: http-auth
- name: rtmp
port: 1935
protocol: TCP
targetPort: rtmp
- name: rtsp
port: 8554
protocol: TCP
targetPort: rtsp
selector:
app.kubernetes.io/name: frigate
app.kubernetes.io/instance: frigate
---
# Source: frigate/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: frigate
labels:
app.kubernetes.io/name: frigate
helm.sh/chart: frigate-7.6.0
app.kubernetes.io/instance: frigate
app.kubernetes.io/version: "0.14.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
revisionHistoryLimit: 3
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: frigate
app.kubernetes.io/instance: frigate
template:
metadata:
labels:
app.kubernetes.io/name: frigate
app.kubernetes.io/instance: frigate
annotations:
checksum/configmap: 9de9e29d499af45a0e7392032d64d26d8e13e12211971a307f201c97ac91f173
spec:
containers:
- name: frigate
image: "ghcr.io/blakeblackshear/frigate:0.14.0"
imagePullPolicy: IfNotPresent
securityContext:
{}
ports:
- name: http
containerPort: 5000
protocol: TCP
- name: http-auth
containerPort: 8971
protocol: TCP
- name: rtmp
containerPort: 1935
protocol: TCP
- name: rtsp
containerPort: 8554
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
scheme: HTTP
initialDelaySeconds: 30
failureThreshold: 5
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /
port: http
scheme: HTTP
initialDelaySeconds: 30
failureThreshold: 5
timeoutSeconds: 10
env:
envFrom:
- secretRef:
name: frigate-rstp-credentials
- secretRef:
name: frigate-mqtt-credentials
volumeMounts:
- mountPath: /config/config.yml
subPath: config.yml
name: configmap
- mountPath: /config
name: config
- mountPath: /data
name: data
- mountPath: /media
name: media
- name: dshm
mountPath: /dev/shm
- name: tmp
mountPath: /tmp
resources:
{}
volumes:
- name: configmap
configMap:
name: frigate
- name: config
persistentVolumeClaim:
claimName: frigate-config
- name: data
emptyDir: {}
- name: media
persistentVolumeClaim:
claimName: frigate-storage
- name: dshm
emptyDir:
medium: Memory
sizeLimit: 4Gi
- name: tmp
emptyDir:
medium: Memory
sizeLimit: 4Gi
---
# Source: frigate/templates/ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: frigate
labels:
app.kubernetes.io/name: frigate
helm.sh/chart: frigate-7.6.0
app.kubernetes.io/instance: frigate
app.kubernetes.io/version: "0.14.0"
app.kubernetes.io/managed-by: Helm
annotations:
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: frigate-frigate@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
tls:
- hosts:
- "*.k-space.ee"
secretName:
rules:
- host: "frigate.k-space.ee"
http:
paths:
- path: /
pathType: "ImplementationSpecific"
backend:
service:
name: frigate
port:
name: http

10
frigate/auth.yml Normal file
View File

@ -0,0 +1,10 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: frigate
spec:
displayName: Frigate
uri: 'https://frigate.k-space.ee/'
allowedGroups:
- k-space:legalmember

12
frigate/rabbitmq.yml Normal file
View File

@ -0,0 +1,12 @@
apiVersion: rabbitmq.com/v1beta1
kind: RabbitmqCluster
metadata:
name: frigate-mqtt
spec:
replicas: 3
persistence:
storageClassName: rabbitmq
storage: 10Gi
rabbitmq:
additionalPlugins:
- rabbitmq_mqtt

28
frigate/storage-class.yml Normal file
View File

@ -0,0 +1,28 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: frigate-config
provisioner: csi.proxmox.sinextra.dev
parameters:
cache: none
csi.storage.k8s.io/fstype: xfs
ssd: 'true'
storage: ks-pvs
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: frigate-data
provisioner: csi.proxmox.sinextra.dev
parameters:
cache: none
csi.storage.k8s.io/fstype: xfs
shared: 'true'
ssd: 'false'
storage: ks-pvs-nas
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer

32
frigate/storage.yml Normal file
View File

@ -0,0 +1,32 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: frigate-storage
spec:
persistentVolumeReclaimPolicy: Retain
capacity:
storage: 1Ti
accessModes:
- ReadWriteMany
storageClassName: ""
nfs:
server: 172.21.0.7
path: /nas/k6/frigate
mountOptions:
- vers=4
- minorversion=1
- noac
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: frigate-storage
spec:
volumeName: frigate-storage
storageClassName: ""
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Ti

80
frigate/transcode.yml Normal file
View File

@ -0,0 +1,80 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: go2rtc
labels:
app.kubernetes.io/name: go2rtc
app.kubernetes.io/instance: go2rtc
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: go2rtc
app.kubernetes.io/instance: go2rtc
template:
metadata:
labels:
app.kubernetes.io/name: go2rtc
app.kubernetes.io/instance: go2rtc
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- frigate
topologyKey: "kubernetes.io/hostname"
# nodeSelector:
# dedicated: nvr
# tolerations:
# - key: dedicated
# operator: Equal
# value: nvr
# effect: NoSchedule
containers:
- name: go2rtc
image: alexxit/go2rtc
ports:
- name: rtsp
containerPort: 8554
protocol: TCP
- name: api
containerPort: 1984
protocol: TCP
volumeMounts:
- mountPath: /config/go2rtc.yaml
subPath: config.yml
name: config
resources:
{}
volumes:
- name: config
secret:
secretName: go2rtc-config
items:
- key: config.yml
path: config.yml
---
apiVersion: v1
kind: Service
metadata:
name: go2rtc
labels:
app.kubernetes.io/name: go2rtc
app.kubernetes.io/instance: go2rtc
spec:
type: ClusterIP
ipFamilyPolicy: SingleStack
ports:
- name: rtsp
port: 8554
protocol: TCP
targetPort: rtsp
selector:
app.kubernetes.io/name: go2rtc
app.kubernetes.io/instance: go2rtc

178
frigate/values.yaml Normal file
View File

@ -0,0 +1,178 @@
# Default values for frigate.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- upgrade strategy type (e.g. Recreate or RollingUpdate)
strategyType: Recreate
image:
# -- Docker registry/repository to pull the image from
repository: ghcr.io/blakeblackshear/frigate
# -- Overrides the default tag (appVersion) used in Chart.yaml ([Docker Hub](https://hub.docker.com/r/blakeblackshear/frigate/tags?page=1))
tag:
# -- Docker image pull policy
pullPolicy: IfNotPresent
# -- Docker image pull policy
imagePullSecrets: []
# -- additional ENV variables to set. Prefix with FRIGATE_ to target Frigate configuration values
env: {}
# TZ: UTC
# -- set environment variables from Secret(s)
envFromSecrets:
# secrets are required before `helm install`
- frigate-rstp-credentials
- frigate-mqtt-credentials
coral:
# -- enables the use of a Coral device
enabled: false
# -- path on the host to which to mount the Coral device
hostPath: /dev/bus/usb
gpu:
nvidia:
# -- Enables NVIDIA GPU compatibility. Must also use the "amd64nvidia" tagged image
enabled: false
# -- Overrides the default runtimeClassName
runtimeClassName:
# -- amount of shared memory to use for caching
shmSize: 4Gi
# -- use memory for tmpfs (mounted to /tmp)
tmpfs:
enabled: true
sizeLimit: 4Gi
# -- frigate configuration - see [Docs](https://docs.frigate.video/configuration/index) for more info
config: |
mqtt:
host: frigate-mqtt
port: 1883
topic_prefix: frigate
client_id: frigate
user: '{FRIGATE_MQTT_USERNAME}'
password: '{FRIGATE_MQTT_PASSWORD}'
stats_interval: 60
detectors:
# coral:
# type: edgetpu
# device: usb
#cpu1:
#type: cpu
ov:
type: openvino
device: CPU
model:
width: 300
height: 300
input_tensor: nhwc
input_pixel_format: bgr
path: /openvino-model/ssdlite_mobilenet_v2.xml
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
record:
enabled: True
retain:
days: 3
mode: motion
events:
retain:
default: 30
mode: motion
cameras:
server_room:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/server_room
roles:
- detect
- rtmp
- record
chaos:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/chaos
roles:
- detect
- rtmp
- record
cyber:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/cyber
roles:
- detect
- rtmp
- record
workshop:
ffmpeg:
inputs:
- path: rtsp://go2rtc:8554/workshop
roles:
- detect
- rtmp
- record
# Probes configuration
probes:
liveness:
enabled: true
initialDelaySeconds: 30
failureThreshold: 5
timeoutSeconds: 10
readiness:
enabled: true
initialDelaySeconds: 30
failureThreshold: 5
timeoutSeconds: 10
startup:
enabled: false
failureThreshold: 30
periodSeconds: 10
service:
type: ClusterIP
port: 5000
annotations: {}
labels: {}
loadBalancerIP:
ipFamilyPolicy: SingleStack
ipFamilies: []
ingress:
enabled: true
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.middlewares: frigate-frigate@kubernetescrd
hosts:
- host: frigate.k-space.ee
paths:
- path: '/'
portName: http
tls:
- hosts:
- "*.k-space.ee"
persistence:
config:
enabled: true
storageClass: "frigate-config"
accessMode: ReadWriteOnce
size: 100Mi
skipuninstall: false
media:
enabled: true
existingClaim: "frigate-storage"
skipuninstall: true

View File

@ -11,6 +11,7 @@ spec:
kind: ClusterIssuer
name: default
secretName: git-tls
revisionHistoryLimit: 1
---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
@ -32,8 +33,8 @@ spec:
- key: secret
value: "%(plaintext)s"
---
apiVersion: codemowers.io/v1alpha1
kind: OIDCGWClient
apiVersion: codemowers.cloud/v1beta1
kind: OIDCClient
metadata:
name: gitea
spec:
@ -52,7 +53,48 @@ spec:
availableScopes:
- openid
- profile
overrideIncomingScopes: true
pkce: false
secretRefreshPod:
apiVersion: v1
kind: Pod
metadata:
name: reset-oidc-config
spec:
volumes:
- name: tmp
emptyDir: {}
initContainers:
- name: jq
image: alpine/k8s:1.24.16@sha256:06f8942d87fa17b40795bb9a8eff029a9be3fc3c9bcc13d62071de4cc3324153
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /tmp
name: tmp
envFrom:
- secretRef:
name: oidc-client-gitea-owner-secrets
command:
- /bin/bash
- -c
- jq '{"strategyKey":"OpenID","config":{"Provider":"openidConnect","ClientID":$ENV.OIDC_CLIENT_ID,"ClientSecret":$ENV.OIDC_CLIENT_SECRET,"OpenIDConnectAutoDiscoveryURL":"https://auth.k-space.ee/.well-known/openid-configuration","CustomURLMapping":null,"IconURL":"","Scopes":null,"RequiredClaimName":"","RequiredClaimValue":"","GroupClaimName":"","AdminGroup":"","GroupTeamMap":"","GroupTeamMapRemoval":false,"RestrictedGroup":""}} | "UPDATE login_source SET cfg=\(.config|tostring|@sh) WHERE name=\(.strategyKey|tostring|@sh) LIMIT 1"' -n -r > /tmp/update.sql
containers:
- name: mysql
image: mysql
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /tmp
name: tmp
env:
- name: MYSQL_PWD
valueFrom:
secretKeyRef:
name: gitea-secrets
key: GITEA__DATABASE__PASSWD
command:
- /bin/bash
- -c
- mysql -u kspace_git kspace_git -h 172.20.36.1 -p${MYSQL_PWD} < /tmp/update.sql
---
apiVersion: apps/v1
kind: StatefulSet
@ -80,7 +122,7 @@ spec:
runAsNonRoot: true
containers:
- name: gitea
image: gitea/gitea:1.21.5-rootless
image: gitea/gitea:1.22.2-rootless
imagePullPolicy: IfNotPresent
securityContext:
readOnlyRootFilesystem: true

View File

@ -1,6 +1,6 @@
---
apiVersion: codemowers.io/v1alpha1
kind: OIDCGWClient
apiVersion: codemowers.cloud/v1beta1
kind: OIDCClient
metadata:
name: grafana
spec:
@ -18,10 +18,27 @@ spec:
availableScopes:
- openid
- profile
- groups
tokenEndpointAuthMethod: none
---
apiVersion: v1
kind: ConfigMap
metadata:
name: grafana-datasources
data:
prometheus.yaml: |
apiVersion: 1
prune: true
datasources:
- name: Prometheus
type: prometheus
orgId: 1
url: http://prometheus-operated.monitoring.svc.cluster.local:9090
version: 1
editable: false
---
apiVersion: v1
kind: ConfigMap
metadata:
name: grafana-config
data:
@ -31,14 +48,19 @@ data:
[server]
domain = grafana.k-space.ee
root_url = https://%(domain)s/
[auth]
oauth_allow_insecure_email_lookup=true
[auth.basic]
enabled = false
[auth.generic_oauth]
name = OAuth
icon = signin
enabled = true
empty_scopes = false
scopes = openid profile groups
allow_sign_up = true
use_pkce = true
role_attribute_path = contains(groups[*], 'github.com:codemowers') && 'Admin' || 'Viewer'
role_attribute_path = contains(groups[*], 'k-space:kubernetes:admins') && 'Admin' || contains(groups[*], 'k-space:floor') && 'Editor' || Viewer
allow_assign_grafana_admin = true
[security]
disable_initial_admin_creation = true
---
@ -63,7 +85,7 @@ spec:
fsGroup: 472
containers:
- name: grafana
image: grafana/grafana:8.5.24
image: grafana/grafana:11.1.0
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
@ -73,7 +95,7 @@ spec:
valueFrom:
secretKeyRef:
name: oidc-client-grafana-owner-secrets
key: OIDC_GATEWAY_URI
key: OIDC_IDP_URI
- name: GF_AUTH_GENERIC_OAUTH_CLIENT_ID
valueFrom:
secretKeyRef:
@ -93,17 +115,32 @@ spec:
valueFrom:
secretKeyRef:
name: oidc-client-grafana-owner-secrets
key: OIDC_GATEWAY_AUTH_URI
key: OIDC_IDP_AUTH_URI
- name: GF_AUTH_GENERIC_OAUTH_TOKEN_URL
valueFrom:
secretKeyRef:
name: oidc-client-grafana-owner-secrets
key: OIDC_GATEWAY_TOKEN_URI
key: OIDC_IDP_TOKEN_URI
- name: GF_AUTH_GENERIC_OAUTH_API_URL
valueFrom:
secretKeyRef:
name: oidc-client-grafana-owner-secrets
key: OIDC_GATEWAY_USERINFO_URI
key: OIDC_IDP_USERINFO_URI
- name: GF_DATABASE_TYPE
value: mysql
- name: GF_DATABASE_HOST
value: 172.20.36.1:3306
- name: GF_DATABASE_SSL_MODE
value: disable
- name: GF_DATABASE_NAME
value: kspace_grafana
- name: GF_DATABASE_USER
value: kspace_grafana
- name: GF_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: grafana-database
key: password
ports:
- containerPort: 3000
name: http-grafana
@ -135,20 +172,18 @@ spec:
name: grafana-data
- mountPath: /etc/grafana
name: grafana-config
- mountPath: /etc/grafana/provisioning/datasources
name: grafana-datasources
volumes:
- name: grafana-config
configMap:
name: grafana-config
volumeClaimTemplates:
- metadata:
name: grafana-data
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
- name: grafana-datasources
configMap:
name: grafana-datasources
- name: grafana-data
emptyDir:
sizeLimit: 500Mi
---
apiVersion: v1
kind: Service

8
hackerspace/README.md Normal file
View File

@ -0,0 +1,8 @@
## inventory.k-space.ee
Reads-writes to mongo.
<!-- Referenced/linked by https://wiki.k-space.ee/en/hosting/doors -->
A component of inventory is 'doorboy' (https://wiki.k-space.ee/en/hosting/doors)
## k6.ee
Reads from mongo, HTTP redirect to //inventory.k-space.ee/m/inventory/{uuid}/view

View File

@ -1,11 +1,9 @@
# Referenced/linked and documented by https://wiki.k-space.ee/en/hosting/doors
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: doorboy-proxy
annotations:
keel.sh/policy: force
keel.sh/trigger: poll
spec:
revisionHistoryLimit: 0
replicas: 3
@ -26,20 +24,30 @@ spec:
operator: In
values:
- doorboy-proxy
topologyKey: kubernetes.io/hostname
topologyKey: topology.kubernetes.io/zone
weight: 100
containers:
- name: doorboy-proxy
image: harbor.k-space.ee/k-space/doorboy-proxy:latest
envFrom:
- secretRef:
name: inventory-mongodb
- secretRef:
name: doorboy-api
env:
- name: MONGO_URI
- name: FLOOR_ACCESS_GROUP
value: 'k-space:floor'
- name: WORKSHOP_ACCESS_GROUP
value: 'k-space:workshop'
- name: CARD_URI
value: 'https://inventory.k-space.ee/cards'
- name: SWIPE_URI
value: 'https://inventory.k-space.ee/m/doorboy/swipe'
- name: INVENTORY_API_KEY
valueFrom:
secretKeyRef:
name: mongo-application-readwrite
key: connectionString.standard
name: inventory-api-key
key: INVENTORY_API_KEY
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true

View File

@ -34,11 +34,13 @@ spec:
value: https://inventory.k-space.ee/m/inventory/add-slug/%s
- name: GOREDIRECT_FOUND
value: https://inventory.k-space.ee/m/inventory/%s/view
- name: GOREDIRECT_NOPATH
value: https://inventory.k-space.ee/m/inventory
- name: MONGO_URI
valueFrom:
secretKeyRef:
key: connectionString.standard
name: inventory-mongodb-application-readwrite
key: MONGO_URI
name: inventory-mongodb
name: goredirect
ports:
- containerPort: 8080
@ -55,3 +57,22 @@ spec:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
---
apiVersion: v1
kind: Service
metadata:
name: goredirect
annotations:
external-dns.alpha.kubernetes.io/hostname: k6.ee
metallb.universe.tf/address-pool: elisa
spec:
ports:
- name: http
protocol: TCP
port: 80
targetPort: 8080
nodePort: 32120
selector:
app.kubernetes.io/name: goredirect
type: LoadBalancer
externalTrafficPolicy: Local

Some files were not shown because too many files have changed in this diff Show More