Merge remote-tracking branch 'origin/master'

This commit is contained in:
Erki Aas 2023-08-28 20:11:47 +03:00
commit ae00e766d7
101 changed files with 16120 additions and 1161 deletions

View File

@ -36,9 +36,9 @@ patch /etc/kubernetes/manifests/kube-apiserver.yaml - << EOF
- --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
- --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
- --etcd-servers=https://127.0.0.1:2379 - --etcd-servers=https://127.0.0.1:2379
+ - --oidc-issuer-url=https://auth.k-space.ee + - --oidc-issuer-url=https://auth2.k-space.ee/
+ - --oidc-client-id=kubelogin + - --oidc-client-id=kubelogin
+ - --oidc-username-claim=preferred_username + - --oidc-username-claim=sub
+ - --oidc-groups-claim=groups + - --oidc-groups-claim=groups
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
@ -77,8 +77,8 @@ users:
args: args:
- oidc-login - oidc-login
- get-token - get-token
- --oidc-issuer-url=https://auth.k-space.ee - --oidc-issuer-url=https://auth2.k-space.ee/
- --oidc-client-id=kubelogin - --oidc-client-id=oidc-gateway-kubelogin
- --oidc-use-pkce - --oidc-use-pkce
- --oidc-extra-scope=profile,email,groups - --oidc-extra-scope=profile,email,groups
- --listen-address=127.0.0.1:27890 - --listen-address=127.0.0.1:27890
@ -160,30 +160,7 @@ Added some ARM64 workers by using Ubuntu 22.04 server on Raspberry Pi.
After machines have booted up and you can reach them via SSH: After machines have booted up and you can reach them via SSH:
```bash ```
# Enable required kernel modules
cat > /etc/modules << EOF
overlay
br_netfilter
EOF
cat /etc/modules | xargs -L 1 -t modprobe
# Finetune sysctl:
cat > /etc/sysctl.d/99-k8s.conf << EOF
net.ipv4.conf.all.accept_redirects = 0
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
# Elasticsearch needs this
vm.max_map_count = 524288
# Bump inotify limits to make sure
fs.inotify.max_user_instances=1280
fs.inotify.max_user_watches=655360
EOF
sysctl --system
# Disable Ubuntu caching DNS resolver # Disable Ubuntu caching DNS resolver
systemctl disable systemd-resolved.service systemctl disable systemd-resolved.service
systemctl stop systemd-resolved systemctl stop systemd-resolved
@ -206,39 +183,6 @@ apt-get install -yqq linux-image-generic
apt-get remove -yq cloud-init linux-image-*-kvm apt-get remove -yq cloud-init linux-image-*-kvm
``` ```
Install packages:
```bash
OS=xUbuntu_22.04
VERSION=1.25
echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/ /"| sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
echo "deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/ /"|sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.list
rm -fv /etc/apt/trusted.gpg
curl -s https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | gpg --dearmor > /etc/apt/trusted.gpg.d/libcontainers-archive-keyring.gpg
curl -s https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/Release.key | gpg --dearmor > /etc/apt/trusted.gpg.d/libcontainers-crio-archive-keyring.gpg
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor > /etc/apt/trusted.gpg.d/packages-cloud-google.gpg
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list
apt-get update
apt-get install -yqq --allow-change-held-packages apt-transport-https curl cri-o cri-o-runc kubelet=1.25.12-00 kubectl=1.25.12-00 kubeadm=1.25.12-00 cri-o=1.25.3~0
apt-mark hold kubelet kubeadm kubectl cri-o
cat << \EOF > /etc/containers/registries.conf
unqualified-search-registries = ["docker.io"]
# To pull Docker images from a mirror uncomment following
#[[registry]]
#prefix = "docker.io"
#location = "mirror.gcr.io"
EOF
sudo systemctl restart crio
sudo systemctl daemon-reload
sudo systemctl enable crio --now
```
On master: On master:
``` ```

76
ansible-bind-primary.yml Normal file
View File

@ -0,0 +1,76 @@
- name: Setup primary nameserver
hosts: ns1.k-space.ee
tasks:
- name: Make sure bind9 is installed
ansible.builtin.apt:
name: bind9
state: present
- name: Configure Bind
register: bind
copy:
dest: /etc/bind/named.conf
content: |
# This file is managed by Ansible
# https://git.k-space.ee/k-space/kube/src/branch/master/ansible-bind-primary.yml
# Do NOT modify manually
include "/etc/bind/named.conf.local";
include "/etc/bind/readwrite.key";
include "/etc/bind/readonly.key";
options {
directory "/var/cache/bind";
version "";
listen-on { any; };
listen-on-v6 { any; };
pid-file "/var/run/named/named.pid";
notify explicit; also-notify { 172.20.53.1; 172.20.53.2; 172.20.53.3; };
allow-recursion { none; };
recursion no;
check-names master ignore;
dnssec-validation no;
auth-nxdomain no;
};
# https://kb.isc.org/docs/aa-00723
acl allowed {
172.20.3.0/24;
172.20.4.0/24;
};
acl rejected { !allowed; any; };
zone "." {
type hint;
file "/var/lib/bind/db.root";
};
zone "k-space.ee" {
type master;
file "/var/lib/bind/db.k-space.ee";
allow-update { !rejected; key readwrite; };
allow-transfer { !rejected; key readonly; key readwrite; };
};
zone "k6.ee" {
type master;
file "/var/lib/bind/db.k6.ee";
allow-update { !rejected; key readwrite; };
allow-transfer { !rejected; key readonly; key readwrite; };
};
zone "kspace.ee" {
type master;
file "/var/lib/bind/db.kspace.ee";
allow-update { !rejected; key readwrite; };
allow-transfer { !rejected; key readonly; key readwrite; };
};
- name: Check Bind config
ansible.builtin.shell: "named-checkconf"
- name: Reload Bind config
service:
name: bind9
state: reloaded
when: bind.changed

63
ansible-doors.yml Normal file
View File

@ -0,0 +1,63 @@
# ansible doors -m shell -a "ctr image pull harbor.k-space.ee/k-space/mjpg-streamer:latest"
# journalctl -u mjpg_streamer@video0.service -f
- name: Setup doors
hosts: doors
tasks:
- name: Make sure containerd is installed
ansible.builtin.apt:
name: containerd
state: present
- name: Copy systemd service for Doorboy controller
copy:
dest: /etc/systemd/system/godoor.service
content: |
[Unit]
Description=Doorboy service
Documentation=https://git.k-space.ee/k-space/godoor
After=network.target
[Service]
Environment=IMAGE=harbor.k-space.ee/k-space/godoor:latest
ExecStartPre=-ctr task kill --signal=9 %N
ExecStartPre=-ctr task rm %N
ExecStartPre=-ctr c rm %N
ExecStartPre=-ctr image pull $IMAGE
ExecStart=ctr run --rm --pid-file=/run/%N.pid --privileged --read-only --env-file=/etc/godoor --env=KDOORPI_API_ALLOWED=https://doorboy-proxy.k-space.ee/allowed --env=KDOORPI_API_LONGPOLL=https://doorboy-proxy.k-space.ee/longpoll --env=KDOORPI_API_SWIPE=https://doorboy-proxy.k-space.ee/swipe --env=KDOORPI_DOOR=%H --net-host --net-host --cwd /app $IMAGE %N /godoor
ExecStopPost=ctr task rm %N
ExecStopPost=ctr c rm %N
[Install]
WantedBy=multi-user.target
Restart=always
- name: Enable Doorboy controller
ansible.builtin.systemd:
state: restarted
daemon_reload: yes
name: godoor.service
- name: Copy systemd service for mjpg-streamer
copy:
dest: /etc/systemd/system/mjpg_streamer@.service
content: |
[Unit]
Description=A server for streaming Motion-JPEG from a video capture device
After=network.target
ConditionPathExists=/dev/%I
[Service]
Environment=IMAGE=harbor.k-space.ee/k-space/mjpg-streamer:latest
StandardOutput=tty
Type=forking
ExecStartPre=-ctr task kill --signal=9 %p_%i
ExecStartPre=-ctr task rm %p_%i
ExecStartPre=-ctr c rm %p_%i
ExecStartPre=-ctr image pull $IMAGE
ExecStart=ctr run --tty -d --rm --pid-file=/run/%i.pid --privileged --read-only --net-host $IMAGE %p_%i /usr/local/bin/mjpg_streamer -i 'input_uvc.so -d /dev/%I -r 1280x720 -f 10' -o 'output_http.so -w /usr/share/mjpg_streamer/www'
ExecStopPost=ctr task rm %p_%i
ExecStopPost=ctr c rm %p_%i
PIDFile=/run/%i.pid
[Install]
WantedBy=multi-user.target
- name: Enable mjpg-streamer
ansible.builtin.systemd:
state: restarted
daemon_reload: yes
name: mjpg_streamer@video0.service

82
ansible-kubernetes.yml Normal file
View File

@ -0,0 +1,82 @@
---
- name: Reconfigure graceful shutdown for kubelet
hosts: kubernetes
tasks:
- name: Reconfigure shutdownGracePeriod
ansible.builtin.lineinfile:
path: /var/lib/kubelet/config.yaml
regexp: '^shutdownGracePeriod:'
line: 'shutdownGracePeriod: 5m'
- name: Reconfigure shutdownGracePeriodCriticalPods
ansible.builtin.lineinfile:
path: /var/lib/kubelet/config.yaml
regexp: '^shutdownGracePeriodCriticalPods:'
line: 'shutdownGracePeriodCriticalPods: 5m'
- name: Work around unattended-upgrades
ansible.builtin.lineinfile:
path: /lib/systemd/logind.conf.d/unattended-upgrades-logind-maxdelay.conf
regexp: '^InhibitDelayMaxSec='
line: 'InhibitDelayMaxSec=5m0s'
- name: Pin kube components
hosts: kubernetes
tasks:
- name: Pin packages
loop:
- kubeadm
- kubectl
- kubelet
ansible.builtin.copy:
dest: "/etc/apt/preferences.d/{{ item }}"
content: |
Package: {{ item }}
Pin: version 1.26.*
Pin-Priority: 1001
- name: Reset /etc/containers/registries.conf
hosts: kubernetes
tasks:
- name: Copy /etc/containers/registries.conf
ansible.builtin.copy:
content: "unqualified-search-registries = [\"docker.io\"]\n"
dest: /etc/containers/registries.conf
register: registries
- name: Restart CRI-O
service:
name: cri-o
state: restarted
when: registries.changed
- name: Reset /etc/modules
hosts: kubernetes
tasks:
- name: Copy /etc/modules
ansible.builtin.copy:
content: |
overlay
br_netfilter
dest: /etc/modules
register: kernel_modules
- name: Load kernel modules
ansible.builtin.shell: "cat /etc/modules | xargs -L 1 -t modprobe"
when: kernel_modules.changed
- name: Reset /etc/sysctl.d/99-k8s.conf
hosts: kubernetes
tasks:
- name: Copy /etc/sysctl.d/99-k8s.conf
ansible.builtin.copy:
content: |
cat > /etc/sysctl.d/99-k8s.conf << EOF
net.ipv4.conf.all.accept_redirects = 0
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
vm.max_map_count = 524288
fs.inotify.max_user_instances = 1280
fs.inotify.max_user_watches = 655360
dest: /etc/sysctl.d/99-k8s.conf
register: sysctl
- name: Reload sysctl config
ansible.builtin.shell: "sysctl --system"
when: sysctl.changed

12
ansible.cfg Normal file
View File

@ -0,0 +1,12 @@
[defaults]
ansible_managed = This file is managed by Ansible, manual changes will be overwritten.
inventory = inventory.yml
nocows = 1
pipelining = True
pattern =
deprecation_warnings = False
fact_caching = jsonfile
fact_caching_connection = ~/.ansible/k-space-fact-cache
[ssh_connection]
ssh_args = -F ssh_config

1
asterisk/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
conf

91
asterisk/application.yml Normal file
View File

@ -0,0 +1,91 @@
---
apiVersion: v1
kind: Service
metadata:
name: asterisk
annotations:
external-dns.alpha.kubernetes.io/hostname: voip.k-space.ee
spec:
type: LoadBalancer
externalTrafficPolicy: Local
selector:
app: asterisk
ports:
- name: sip
protocol: UDP
port: 5060
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: asterisk
labels:
app: asterisk
spec:
selector:
matchLabels:
app: asterisk
replicas: 1
template:
metadata:
labels:
app: asterisk
spec:
containers:
- name: asterisk
image: harbor.k-space.ee/k-space/asterisk
command:
- /usr/sbin/asterisk
args:
- -TWBpvvvdddf
volumeMounts:
- name: config
mountPath: /etc/asterisk
ports:
- containerPort: 8088
name: metrics
volumes:
- name: config
secret:
secretName: asterisk-secrets
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: asterisk
spec:
selector:
matchLabels:
app: asterisk
podMetricsEndpoints:
- port: metrics
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: asterisk
spec:
groups:
- name: asterisk
rules:
- alert: AsteriskPhoneNotRegistered
expr: asterisk_endpoints_state{resource=~"1.*"} < 2
for: 5m
labels:
severity: critical
annotations:
summary: "{{ $labels.resource }} is not registered."
- alert: AsteriskOutboundNumberNotRegistered
expr: asterisk_pjsip_outbound_registration_status == 0
for: 5m
labels:
severity: critical
annotations:
summary: "{{ $labels.username }} is not registered with provider."
- alert: AsteriskCallsPerMinuteLimitExceed
expr: asterisk_channels_duration_seconds > 10*60
for: 20m
labels:
severity: warning
annotations:
summary: "Call at channel {{ $labels.name }} is taking longer than 10m."

1
bind/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
*.key

103
bind/README.md Normal file
View File

@ -0,0 +1,103 @@
# Bind setup
The Bind primary resides outside Kubernetes at `193.40.103.2` and
it's internally reachable via `172.20.0.2`
Bind secondaries are hosted inside Kubernetes and load balanced behind `62.65.250.2`
Ingresses and DNSEndpoints referring to `k-space.ee`, `kspace.ee`, `k6.ee`
are picked up automatically by `external-dns` and updated on primary.
The primary triggers notification events to `172.20.53.{1..3}`
which are internally exposed IP-s of the secondaries.
# Secrets
To configure TSIG secrets:
```
kubectl create secret generic -n bind bind-readonly-secret \
--from-file=readonly.key
kubectl create secret generic -n bind bind-readwrite-secret \
--from-file=readwrite.key
kubectl create secret generic -n bind external-dns
kubectl -n bind delete secret tsig-secret
kubectl -n bind create secret generic tsig-secret \
--from-literal=TSIG_SECRET=$(cat readwrite.key | grep secret | cut -d '"' -f 2)
kubectl -n cert-manager delete secret tsig-secret
kubectl -n cert-manager create secret generic tsig-secret \
--from-literal=TSIG_SECRET=$(cat readwrite.key | grep secret | cut -d '"' -f 2)
```
# Serving additional zones
## Bind primary configuration
To serve additional domains from this Bind setup add following
section to `named.conf.local` on primary `ns1.k-space.ee`:
```
key "foobar" {
algorithm hmac-sha512;
secret "...";
};
zone "foobar.com" {
type master;
file "/var/lib/bind/db.foobar.com";
allow-update { !rejected; key foobar; };
allow-transfer { !rejected; key readonly; key foobar; };
notify explicit; also-notify { 172.20.53.1; 172.20.53.2; 172.20.53.3; };
};
```
Initiate empty zonefile in `/var/lib/bind/db.foobar.com` on the primary `ns1.k-space.ee`:
```
foobar.com IN SOA ns1.foobar.com. hostmaster.foobar.com. (1 300 300 2592000 300)
NS ns1.foobar.com.
NS ns2.foobar.com.
ns1.foobar.com. A 193.40.103.2
ns2.foobar.com. A 62.65.250.2
```
Reload Bind config:
```
named-checkconf
systemctl reload bind9
```
## Bind secondary config
Add section to `bind-secondary-config-local` under key `named.conf.local`:
```
zone "foobar.com" { type slave; masters { 172.20.0.2 key readonly; }; };
```
And restart secondaries:
```
kubectl rollout restart -n bind statefulset/bind-secondary
```
## Registrar config
At your DNS registrar point your glue records to:
```
foobar.com. NS ns1.foobar.com.
foobar.com. NS ns2.foobar.com.
ns1.foobar.com. A 193.40.103.2
ns2.foobar.com. A 62.65.250.2
```
## Updating DNS records
With the configured TSIG key `foobar` you can now:
* Obtain Let's Encrypt certificates with DNS challenge.
Inside Kubernetes use `cert-manager` with RFC2136 provider.
* Update DNS records.
Inside Kubernetes use `external-dns` with RFC2136 provider.

178
bind/bind-secondary.yaml Normal file
View File

@ -0,0 +1,178 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: bind-secondary-config-local
data:
named.conf.local: |
zone "codemowers.ee" { type slave; masters { 172.20.0.2 key readonly; }; };
zone "codemowers.eu" { type slave; masters { 172.20.0.2 key readonly; }; };
zone "codemowers.cloud" { type slave; masters { 172.20.0.2 key readonly; }; };
---
apiVersion: v1
kind: ConfigMap
metadata:
name: bind-secondary-config
data:
named.conf: |
include "/etc/bind/named.conf.local";
include "/etc/bind/readonly.key";
options {
recursion no;
pid-file "/var/bind/named.pid";
allow-query { 0.0.0.0/0; };
allow-notify { 172.20.0.2; };
allow-transfer { none; };
check-names slave ignore;
notify no;
};
zone "k-space.ee" { type slave; masters { 172.20.0.2 key readonly; }; };
zone "k6.ee" { type slave; masters { 172.20.0.2 key readonly; }; };
zone "kspace.ee" { type slave; masters { 172.20.0.2 key readonly; }; };
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: bind-secondary
namespace: bind
spec:
replicas: 3
selector:
matchLabels:
app: bind-secondary
template:
metadata:
labels:
app: bind-secondary
spec:
volumes:
- name: run
emptyDir: {}
containers:
- name: bind-secondary
image: internetsystemsconsortium/bind9:9.19
volumeMounts:
- mountPath: /run/named
name: run
workingDir: /var/bind
command:
- named
- -g
- -c
- /etc/bind/named.conf
volumeMounts:
- name: bind-secondary-config
mountPath: /etc/bind
readOnly: true
- name: bind-data
mountPath: /var/bind
volumes:
- name: bind-secondary-config
projected:
sources:
- configMap:
name: bind-secondary-config
- configMap:
name: bind-secondary-config-local
optional: true
- secret:
name: bind-readonly-secret
- name: bind-data
emptyDir: {}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- bind-secondary
topologyKey: "kubernetes.io/hostname"
---
apiVersion: v1
kind: Service
metadata:
name: bind-secondary
namespace: bind
spec:
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: 62.65.250.2
selector:
app: bind-secondary
ports:
- protocol: TCP
port: 53
name: dns-tcp
targetPort: 53
- protocol: UDP
port: 53
name: dns-udp
targetPort: 53
---
apiVersion: v1
kind: Service
metadata:
name: bind-secondary-0
namespace: bind
spec:
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: 172.20.53.1
selector:
app: bind-secondary
statefulset.kubernetes.io/pod-name: bind-secondary-0
ports:
- protocol: TCP
port: 53
name: dns-tcp
targetPort: 53
- protocol: UDP
port: 53
name: dns-udp
targetPort: 53
---
apiVersion: v1
kind: Service
metadata:
name: bind-secondary-1
namespace: bind
spec:
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: 172.20.53.2
selector:
app: bind-secondary
statefulset.kubernetes.io/pod-name: bind-secondary-1
ports:
- protocol: TCP
port: 53
name: dns-tcp
targetPort: 53
- protocol: UDP
port: 53
name: dns-udp
targetPort: 53
---
apiVersion: v1
kind: Service
metadata:
name: bind-secondary-2
namespace: bind
spec:
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: 172.20.53.3
selector:
app: bind-secondary
statefulset.kubernetes.io/pod-name: bind-secondary-2
ports:
- protocol: TCP
port: 53
name: dns-tcp
targetPort: 53
- protocol: UDP
port: 53
name: dns-udp
targetPort: 53

View File

@ -0,0 +1,40 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns-k-space
spec:
revisionHistoryLimit: 0
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: external-dns
domain: k-space.ee
template:
metadata:
labels: *selectorLabels
spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.k8s.io/external-dns/external-dns:v0.13.5
envFrom:
- secretRef:
name: tsig-secret
args:
- --events
- --registry=txt
- --txt-prefix=external-dns-
- --txt-owner-id=k8s
- --provider=rfc2136
- --source=ingress
- --source=service
- --source=crd
- --domain-filter=k-space.ee
- --rfc2136-tsig-axfr
- --rfc2136-host=172.20.0.2
- --rfc2136-port=53
- --rfc2136-zone=k-space.ee
- --rfc2136-tsig-keyname=readwrite
- --rfc2136-tsig-secret-alg=hmac-sha512
- --rfc2136-tsig-secret=$(TSIG_SECRET)
# https://github.com/kubernetes-sigs/external-dns/issues/2446

71
bind/external-dns-k6.yaml Normal file
View File

@ -0,0 +1,71 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns-k6
spec:
revisionHistoryLimit: 0
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: external-dns
domain: k6.ee
template:
metadata:
labels: *selectorLabels
spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.k8s.io/external-dns/external-dns:v0.13.5
envFrom:
- secretRef:
name: tsig-secret
args:
- --log-level=debug
- --events
- --registry=noop
- --provider=rfc2136
- --source=service
- --source=crd
- --domain-filter=k6.ee
- --rfc2136-tsig-axfr
- --rfc2136-host=172.20.0.2
- --rfc2136-port=53
- --rfc2136-zone=k6.ee
- --rfc2136-tsig-keyname=readwrite
- --rfc2136-tsig-secret-alg=hmac-sha512
- --rfc2136-tsig-secret=$(TSIG_SECRET)
# https://github.com/kubernetes-sigs/external-dns/issues/2446
---
apiVersion: externaldns.k8s.io/v1alpha1
kind: DNSEndpoint
metadata:
name: k6
spec:
endpoints:
- dnsName: k6.ee
recordTTL: 300
recordType: SOA
targets:
- "ns1.k-space.ee. hostmaster.k-space.ee. (1 300 300 300 300)"
- dnsName: k6.ee
recordTTL: 300
recordType: NS
targets:
- ns1.k-space.ee
- ns2.k-space.ee
- dnsName: ns1.k-space.ee
recordTTL: 300
recordType: A
targets:
- 193.40.103.2
- dnsName: ns2.k-space.ee
recordTTL: 300
recordType: A
targets:
- 62.65.250.2
- dnsName: k-space.ee
recordTTL: 300
recordType: MX
targets:
- 10 mail.k-space.ee

View File

@ -0,0 +1,66 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns-kspace
spec:
revisionHistoryLimit: 0
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: external-dns
domain: kspace.ee
template:
metadata:
labels: *selectorLabels
spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.k8s.io/external-dns/external-dns:v0.13.5
envFrom:
- secretRef:
name: tsig-secret
args:
- --events
- --registry=noop
- --provider=rfc2136
- --source=ingress
- --source=service
- --source=crd
- --domain-filter=kspace.ee
- --rfc2136-tsig-axfr
- --rfc2136-host=172.20.0.2
- --rfc2136-port=53
- --rfc2136-zone=kspace.ee
- --rfc2136-tsig-keyname=readwrite
- --rfc2136-tsig-secret-alg=hmac-sha512
- --rfc2136-tsig-secret=$(TSIG_SECRET)
# https://github.com/kubernetes-sigs/external-dns/issues/2446
---
apiVersion: externaldns.k8s.io/v1alpha1
kind: DNSEndpoint
metadata:
name: kspace
spec:
endpoints:
- dnsName: kspace.ee
recordTTL: 300
recordType: SOA
targets:
- "ns1.k-space.ee. hostmaster.k-space.ee. (1 300 300 300 300)"
- dnsName: kspace.ee
recordTTL: 300
recordType: NS
targets:
- ns1.k-space.ee
- ns2.k-space.ee
- dnsName: ns1.k-space.ee
recordTTL: 300
recordType: A
targets:
- 193.40.103.2
- dnsName: ns2.k-space.ee
recordTTL: 300
recordType: A
targets:
- 62.65.250.2

58
bind/external-dns.yaml Normal file
View File

@ -0,0 +1,58 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: external-dns
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
- nodes
verbs:
- get
- watch
- list
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- externaldns.k8s.io
resources:
- dnsendpoints
verbs:
- get
- watch
- list
- apiGroups:
- externaldns.k8s.io
resources:
- dnsendpoints/status
verbs:
- update
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: external-dns-viewer
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-dns
subjects:
- kind: ServiceAccount
name: external-dns
namespace: bind

View File

@ -1,4 +1,12 @@
--- ---
apiVersion: codemowers.cloud/v1beta1
kind: MinioBucketClaim
metadata:
name: camtiler
spec:
capacity: 1Ti
class: external
---
apiVersion: apiextensions.k8s.io/v1 apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
@ -105,10 +113,6 @@ spec:
kind: Deployment kind: Deployment
metadata: metadata:
name: camera-foobar name: camera-foobar
# Make sure keel.sh pulls updates for this deployment
annotations:
keel.sh/policy: force
keel.sh/trigger: poll
spec: spec:
revisionHistoryLimit: 0 revisionHistoryLimit: 0
replicas: 1 replicas: 1
@ -166,9 +170,25 @@ spec:
- name: SOURCE_NAME - name: SOURCE_NAME
value: foobar value: foobar
- name: S3_BUCKET_NAME - name: S3_BUCKET_NAME
value: application valueFrom:
secretKeyRef:
name: miniobucket-camtiler-owner-secrets
key: BUCKET_NAME
- name: S3_ENDPOINT_URL - name: S3_ENDPOINT_URL
value: http://minio valueFrom:
secretKeyRef:
name: miniobucket-camtiler-owner-secrets
key: AWS_S3_ENDPOINT_URL
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: miniobucket-camtiler-owner-secrets
key: AWS_SECRET_ACCESS_KEY
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: miniobucket-camtiler-owner-secrets
key: AWS_ACCESS_KEY_ID
- name: BASIC_AUTH_PASSWORD - name: BASIC_AUTH_PASSWORD
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
@ -179,16 +199,6 @@ spec:
secretKeyRef: secretKeyRef:
name: mongodb-application-readwrite name: mongodb-application-readwrite
key: connectionString.standard key: connectionString.standard
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: minio-secrets
key: MINIO_ROOT_PASSWORD
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: minio-secrets
key: MINIO_ROOT_USER
# Make sure 2+ pods of same camera are scheduled on different hosts # Make sure 2+ pods of same camera are scheduled on different hosts
affinity: affinity:
@ -274,7 +284,7 @@ metadata:
spec: spec:
target: http://user@server-room.cam.k-space.ee:8080/?action=stream target: http://user@server-room.cam.k-space.ee:8080/?action=stream
secretRef: camera-secrets secretRef: camera-secrets
replicas: 1 replicas: 2
--- ---
apiVersion: k-space.ee/v1alpha1 apiVersion: k-space.ee/v1alpha1
kind: Camera kind: Camera
@ -317,7 +327,7 @@ kind: Camera
metadata: metadata:
name: back-door name: back-door
spec: spec:
target: http://user@back-door.cam.k-space.ee:8080/?action=stream target: http://user@100.102.3.3:8080/?action=stream
secretRef: camera-secrets secretRef: camera-secrets
replicas: 1 replicas: 1
--- ---
@ -326,6 +336,20 @@ kind: Camera
metadata: metadata:
name: ground-door name: ground-door
spec: spec:
target: http://user@ground-door.cam.k-space.ee:8080/?action=stream target: http://user@100.102.3.1:8080/?action=stream
secretRef: camera-secrets secretRef: camera-secrets
replicas: 1 replicas: 1
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: camera-motion-detect
spec:
selector:
matchLabels:
component: camera-motion-detect
podMetricsEndpoints:
- port: http
podTargetLabels:
- app.kubernetes.io/name
- component

View File

@ -6,6 +6,9 @@ metadata:
spec: spec:
displayName: Cameras displayName: Cameras
uri: 'https://cams.k-space.ee/tiled' uri: 'https://cams.k-space.ee/tiled'
allowedGroups:
- k-space:floor
- k-space:friends
--- ---
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1
kind: Ingress kind: Ingress

View File

@ -96,20 +96,23 @@ spec:
ports: ports:
- containerPort: 3000 - containerPort: 3000
env: env:
- name: MINIO_BUCKET
valueFrom:
secretKeyRef:
name: miniobucket-camtiler-owner-secrets
key: BUCKET_NAME
- name: AWS_SECRET_ACCESS_KEY - name: AWS_SECRET_ACCESS_KEY
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: minio-secrets name: miniobucket-camtiler-owner-secrets
key: MINIO_ROOT_PASSWORD key: AWS_SECRET_ACCESS_KEY
- name: AWS_ACCESS_KEY_ID - name: AWS_ACCESS_KEY_ID
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: minio-secrets name: miniobucket-camtiler-owner-secrets
key: MINIO_ROOT_USER key: AWS_ACCESS_KEY_ID
- name: MINIO_BUCKET
value: 'application'
- name: MINIO_HOSTNAME - name: MINIO_HOSTNAME
value: 'cams-s3.k-space.ee' value: 'external.minio-clusters.k-space.ee'
- name: MINIO_PORT - name: MINIO_PORT
value: '443' value: '443'
- name: MINIO_SCHEMA - name: MINIO_SCHEMA

View File

@ -1,199 +0,0 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: minio
labels:
app.kubernetes.io/name: minio
spec:
selector:
matchLabels:
app.kubernetes.io/name: minio
serviceName: minio-svc
replicas: 4
podManagementPolicy: Parallel
template:
metadata:
labels:
app.kubernetes.io/name: minio
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- minio
topologyKey: topology.kubernetes.io/zone
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
containers:
- name: minio
env:
- name: MINIO_PROMETHEUS_AUTH_TYPE
value: public
envFrom:
- secretRef:
name: minio-secrets
image: minio/minio:RELEASE.2022-12-12T19-27-27Z
args:
- server
- http://minio-{0...3}.minio-svc.camtiler.svc.cluster.local/data
- --address
- 0.0.0.0:9000
- --console-address
- 0.0.0.0:9001
ports:
- containerPort: 9000
name: http
- containerPort: 9001
name: console
readinessProbe:
httpGet:
path: /minio/health/ready
port: 9000
initialDelaySeconds: 2
periodSeconds: 5
resources:
requests:
cpu: 300m
memory: 1Gi
limits:
cpu: 4000m
memory: 2Gi
volumeMounts:
- name: minio-data
mountPath: /data
volumeClaimTemplates:
- metadata:
name: minio-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: '30Gi'
storageClassName: minio
---
apiVersion: v1
kind: Service
metadata:
name: minio
spec:
sessionAffinity: ClientIP
type: ClusterIP
ports:
- port: 80
targetPort: 9000
protocol: TCP
name: http
selector:
app.kubernetes.io/name: minio
---
kind: Service
apiVersion: v1
metadata:
name: minio-svc
spec:
selector:
app.kubernetes.io/name: minio
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: http
port: 9000
- name: console
port: 9001
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: minio
spec:
selector:
matchLabels:
app.kubernetes.io/name: minio
podMetricsEndpoints:
- port: http
path: /minio/v2/metrics/node
podTargetLabels:
- app.kubernetes.io/name
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: minio
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
honorLabels: true
port: minio
path: /minio/v2/metrics/cluster
selector:
matchLabels:
app.kubernetes.io/name: minio
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: minio
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: cams-s3.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: minio-svc
port:
name: http
tls:
- hosts:
- "*.k-space.ee"
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: minio
spec:
groups:
- name: minio
rules:
- alert: MinioClusterDiskOffline
expr: minio_cluster_disk_offline_total > 0
for: 0m
labels:
severity: critical
annotations:
summary: Minio cluster disk offline (instance {{ $labels.instance }})
description: "Minio cluster disk is offline"
- alert: MinioNodeDiskOffline
expr: minio_cluster_nodes_offline_total > 0
for: 0m
labels:
severity: critical
annotations:
summary: Minio node disk offline (instance {{ $labels.instance }})
description: "Minio cluster node disk is offline"
- alert: MinioDiskSpaceUsage
expr: disk_storage_available / disk_storage_total * 100 < 10
for: 0m
labels:
severity: warning
annotations:
summary: Minio disk space usage (instance {{ $labels.instance }})
description: "Minio available free space is low (< 10%)"

View File

@ -4,6 +4,9 @@ kind: MongoDBCommunity
metadata: metadata:
name: mongodb name: mongodb
spec: spec:
agent:
logLevel: ERROR
maxLogFileDurationHours: 1
additionalMongodConfig: additionalMongodConfig:
systemLog: systemLog:
quiet: true quiet: true

View File

@ -19,7 +19,7 @@ spec:
- from: - from:
- namespaceSelector: - namespaceSelector:
matchLabels: matchLabels:
kubernetes.io/metadata.name: prometheus-operator kubernetes.io/metadata.name: monitoring
podSelector: podSelector:
matchLabels: matchLabels:
app.kubernetes.io/name: prometheus app.kubernetes.io/name: prometheus
@ -64,7 +64,7 @@ spec:
- from: - from:
- namespaceSelector: - namespaceSelector:
matchLabels: matchLabels:
kubernetes.io/metadata.name: prometheus-operator kubernetes.io/metadata.name: monitoring
podSelector: podSelector:
matchLabels: matchLabels:
app.kubernetes.io/name: prometheus app.kubernetes.io/name: prometheus
@ -186,7 +186,7 @@ spec:
- from: - from:
- namespaceSelector: - namespaceSelector:
matchLabels: matchLabels:
kubernetes.io/metadata.name: prometheus-operator kubernetes.io/metadata.name: monitoring
podSelector: podSelector:
matchLabels: matchLabels:
app.kubernetes.io/name: prometheus app.kubernetes.io/name: prometheus

8
cnpg-system/README.md Normal file
View File

@ -0,0 +1,8 @@
# CloudNativePG
To deploy:
```
wget https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.20/releases/cnpg-1.20.2.yaml -O application.yml
kubectl apply -f application.yml
```

13215
cnpg-system/application.yml Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,14 +1,3 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: application-config
data:
DRONE_GITEA_SERVER: "https://git.k-space.ee"
DRONE_GIT_ALWAYS_AUTH: "false"
DRONE_PROMETHEUS_ANONYMOUS_ACCESS: "true"
DRONE_SERVER_HOST: "drone.k-space.ee"
DRONE_SERVER_PROTO: "https"
DRONE_USER_CREATE: "username:lauri,admin:true"
--- ---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
@ -59,14 +48,27 @@ spec:
httpGet: httpGet:
path: / path: /
port: http port: http
env:
- name: DRONE_GITEA_SERVER
value: https://git.k-space.ee
- name: DRONE_GIT_ALWAYS_AUTH
value: "false"
- name: DRONE_SERVER_HOST
value: drone.k-space.ee
- name: DRONE_SERVER_PROTO
value: https
- name: DRONE_USER_CREATE
value: username:lauri,admin:true
- name: DRONE_DEBUG
value: "true"
- name: DRONE_TRACE
value: "true"
envFrom: envFrom:
- secretRef: - secretRef:
name: application-secrets name: application-secrets
- configMapRef:
name: application-config
volumeMounts: volumeMounts:
- name: drone-data - name: drone-data
mountPath: /data mountPath: /data
volumeClaimTemplates: volumeClaimTemplates:
- metadata: - metadata:
name: drone-data name: drone-data
@ -78,6 +80,16 @@ spec:
requests: requests:
storage: 8Gi storage: 8Gi
--- ---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: redirect
spec:
redirectRegex:
regex: ^https://(.*)/register$
replacement: https://${1}/
permanent: false
---
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1
kind: Ingress kind: Ingress
metadata: metadata:
@ -87,6 +99,7 @@ metadata:
kubernetes.io/ingress.class: traefik kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true" traefik.ingress.kubernetes.io/router.tls: "true"
traefik.ingress.kubernetes.io/router.middlewares: drone-redirect@kubernetescrd
spec: spec:
tls: tls:
- hosts: - hosts:

View File

@ -1,4 +1,12 @@
--- ---
apiVersion: codemowers.io/v1alpha1
kind: OIDCGWMiddlewareClient
metadata:
name: sso
spec:
displayName: Etherpad
uri: 'https://pad.k-space.ee/'
---
apiVersion: apps/v1 apiVersion: apps/v1
kind: StatefulSet kind: StatefulSet
metadata: metadata:
@ -32,6 +40,8 @@ spec:
ports: ports:
- containerPort: 9001 - containerPort: 9001
env: env:
- name: MINIFY
value: 'false'
- name: DB_TYPE - name: DB_TYPE
value: mysql value: mysql
- name: DB_HOST - name: DB_HOST
@ -82,6 +92,7 @@ metadata:
traefik.ingress.kubernetes.io/router.entrypoints: websecure traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true" traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.middlewares: etherpad-sso@kubernetescrd
spec: spec:
rules: rules:
- host: pad.k-space.ee - host: pad.k-space.ee

View File

@ -1,15 +0,0 @@
Before applying replace the secret with the actual one.
For debugging add `- --log-level=debug`:
```
wget https://raw.githubusercontent.com/kubernetes-sigs/external-dns/master/docs/contributing/crd-source/crd-manifest.yaml -O crd.yml
kubectl apply -n external-dns -f application.yml -f crd.yml
```
Insert TSIG secret:
```
kubectl -n external-dns create secret generic tsig-secret \
--from-literal=TSIG_SECRET=<secret>
```

View File

@ -1,99 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: external-dns
namespace: external-dns
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
- nodes
verbs:
- get
- watch
- list
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- externaldns.k8s.io
resources:
- dnsendpoints
verbs:
- get
- watch
- list
- apiGroups:
- externaldns.k8s.io
resources:
- dnsendpoints/status
verbs:
- update
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns
namespace: external-dns
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: external-dns-viewer
namespace: external-dns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-dns
subjects:
- kind: ServiceAccount
name: external-dns
namespace: external-dns
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
namespace: external-dns
spec:
revisionHistoryLimit: 0
selector:
matchLabels:
app: external-dns
template:
metadata:
labels:
app: external-dns
spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: k8s.gcr.io/external-dns/external-dns:v0.13.1
envFrom:
- secretRef:
name: tsig-secret
args:
- --registry=txt
- --txt-prefix=external-dns-
- --txt-owner-id=k8s
- --provider=rfc2136
- --source=ingress
- --source=service
- --source=crd
- --domain-filter=k-space.ee
- --rfc2136-host=193.40.103.2
- --rfc2136-port=53
- --rfc2136-zone=k-space.ee
- --rfc2136-tsig-keyname=acme
- --rfc2136-tsig-secret-alg=hmac-sha512
- --rfc2136-tsig-secret=$(TSIG_SECRET)
# https://github.com/kubernetes-sigs/external-dns/issues/2446

View File

@ -124,6 +124,8 @@ spec:
secretKeyRef: secretKeyRef:
name: freescout-secrets name: freescout-secrets
key: DB_PASS key: DB_PASS
- name: AWS_USE_PATH_STYLE_ENDPOINT
value: "true"
- name: AWS_BUCKET - name: AWS_BUCKET
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:

View File

@ -0,0 +1,50 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: reset-oidc-config
spec:
template:
spec:
volumes:
- name: tmp
emptyDir: {}
initContainers:
- name: jq
image: alpine/k8s:1.24.16@sha256:06f8942d87fa17b40795bb9a8eff029a9be3fc3c9bcc13d62071de4cc3324153
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /tmp
name: tmp
envFrom:
- secretRef:
name: oidc-client-freescout-owner-secrets
command:
- /bin/bash
- -c
- rm -fv /tmp/update.sql;
jq '{"name":"oauth.client_id","value":$ENV.OIDC_CLIENT_ID} | "UPDATE options SET value=\(.value|tostring|@sh) WHERE name=\(.name|tostring|@sh) LIMIT 1;"' -n -r >> /tmp/update.sql;
jq '{"name":"oauth.client_secret","value":$ENV.OIDC_CLIENT_SECRET} | "UPDATE options SET value=\(.value|tostring|@sh) WHERE name=\(.name|tostring|@sh) LIMIT 1;"' -n -r >> /tmp/update.sql;
jq '{"name":"oauth.auth_url","value":$ENV.OIDC_GATEWAY_AUTH_URI} | "UPDATE options SET value=\(.value + "?scope=openid+profile" |tostring|@sh) WHERE name=\(.name|tostring|@sh) LIMIT 1;"' -n -r >> /tmp/update.sql;
jq '{"name":"oauth.token_url","value":$ENV.OIDC_GATEWAY_TOKEN_URI} | "UPDATE options SET value=\(.value|tostring|@sh) WHERE name=\(.name|tostring|@sh) LIMIT 1;"' -n -r >> /tmp/update.sql;
jq '{"name":"oauth.user_url","value":$ENV.OIDC_GATEWAY_USERINFO_URI} | "UPDATE options SET value=\(.value|tostring|@sh) WHERE name=\(.name|tostring|@sh) LIMIT 1;"' -n -r >> /tmp/update.sql;
cat /tmp/update.sql
containers:
- name: mysql
image: mysql
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /tmp
name: tmp
env:
- name: MYSQL_PWD
valueFrom:
secretKeyRef:
name: freescout-secrets
key: DB_PASS
command:
- /bin/bash
- -c
- mysql -u kspace_freescout kspace_freescout -h 172.20.36.1 -p${MYSQL_PWD} < /tmp/update.sql
restartPolicy: OnFailure
backoffLimit: 4

View File

@ -12,6 +12,26 @@ spec:
name: default name: default
secretName: git-tls secretName: git-tls
--- ---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
metadata:
name: gitea-security-secret-key
spec:
size: 32
mapping:
- key: secret
value: "%(plaintext)s"
---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
metadata:
name: gitea-security-internal-token
spec:
size: 32
mapping:
- key: secret
value: "%(plaintext)s"
---
apiVersion: codemowers.io/v1alpha1 apiVersion: codemowers.io/v1alpha1
kind: OIDCGWClient kind: OIDCGWClient
metadata: metadata:
@ -51,10 +71,22 @@ spec:
app.kubernetes.io/name: gitea app.kubernetes.io/name: gitea
spec: spec:
enableServiceLinks: false enableServiceLinks: false
securityContext:
fsGroup: 1000
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
containers: containers:
- name: gitea - name: gitea
image: gitea/gitea:1.20.1 image: gitea/gitea:1.20.2-rootless
imagePullPolicy: IfNotPresent
securityContext:
readOnlyRootFilesystem: true
env: env:
- name: GITEA__REPOSITORY__DISABLED_REPO_UNITS
value: repo.releases,repo.wiki
- name: GITEA__ADMIN__DISABLE_REGULAR_ORG_CREATION
value: "true"
- name: GITEA__SERVER__SSH_SERVER_HOST_KEYS - name: GITEA__SERVER__SSH_SERVER_HOST_KEYS
value: ssh/gitea.rsa,ssh/gitea.ecdsa,ssh/gitea.ed25519 value: ssh/gitea.rsa,ssh/gitea.ecdsa,ssh/gitea.ed25519
- name: GITEA__SERVER__START_SSH_SERVER - name: GITEA__SERVER__START_SSH_SERVER
@ -63,16 +95,18 @@ spec:
value: "/cert/tls.crt" value: "/cert/tls.crt"
- name: GITEA__SERVER__KEY_FILE - name: GITEA__SERVER__KEY_FILE
value: "/cert/tls.key" value: "/cert/tls.key"
- name: GITEA__SERVER__SSH_LISTEN_PORT - name: GITEA__SERVER__SSH_PORT
value: "2222" value: "22"
- name: GITEA__SERVER__PROTOCOL - name: GITEA__SERVER__PROTOCOL
value: https value: https
- name: GITEA__REDIRECT_OTHER_PORT - name: GITEA__SERVER__REDIRECT_OTHER_PORT
value: "true" value: "true"
- name: GITEA__PORT_TO_REDIRECT - name: GITEA__SERVER__PORT_TO_REDIRECT
value: "8080" value: "8080"
- name: GITEA__SERVER__DOMAIN - name: GITEA__SERVER__DOMAIN
value: git.k-space.ee value: git.k-space.ee
- name: GITEA__SERVER__SSH_DOMAIN
value: git.k-space.ee
- name: GITEA__SERVER__HTTP_ADDR - name: GITEA__SERVER__HTTP_ADDR
value: 0.0.0.0 value: 0.0.0.0
- name: GITEA__SERVER__ROOT_URL - name: GITEA__SERVER__ROOT_URL
@ -93,10 +127,6 @@ spec:
value: "false" value: "false"
- name: GITEA__SECURITY__INSTALL_LOCK - name: GITEA__SECURITY__INSTALL_LOCK
value: "true" value: "true"
- name: GITEA__SECURITY__SECRET_KEY
value: t2RrFCn4Q22MFPc
- name: GITEA__SECURITY__LOGIN_REMEMBER_DAYS
value: "30"
- name: GITEA__SERVICE__REGISTER_EMAIL_CONFIRM - name: GITEA__SERVICE__REGISTER_EMAIL_CONFIRM
value: "true" value: "true"
- name: GITEA__SERVICE__DISABLE_REGISTRATION - name: GITEA__SERVICE__DISABLE_REGISTRATION
@ -121,10 +151,6 @@ spec:
value: "false" value: "false"
- name: GITEA__CRON__ENABLED - name: GITEA__CRON__ENABLED
value: "true" value: "true"
- name: GITEA__I18N__LANGS
value: en-US
- name: GITEA__I18N__NAMES
value: English
- name: GITEA__DATABASE__PASSWD - name: GITEA__DATABASE__PASSWD
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
@ -143,8 +169,13 @@ spec:
- name: GITEA__SECURITY__INTERNAL_TOKEN - name: GITEA__SECURITY__INTERNAL_TOKEN
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: gitea-secrets name: gitea-security-internal-token
key: GITEA__SECURITY__INTERNAL_TOKEN key: secret
- name: GITEA__SECURITY__SECRET_KEY
valueFrom:
secretKeyRef:
name: gitea-security-secret-key
key: secret
ports: ports:
- containerPort: 8080 - containerPort: 8080
name: http name: http
@ -153,11 +184,19 @@ spec:
- containerPort: 2222 - containerPort: 2222
name: ssh name: ssh
volumeMounts: volumeMounts:
- mountPath: /tmp
name: tmp
- mountPath: /etc/gitea
name: etc
- mountPath: /cert - mountPath: /cert
name: cert name: cert
- mountPath: /data - mountPath: /var/lib/gitea
name: data name: data
volumes: volumes:
- name: tmp
emptyDir: {}
- name: etc
emptyDir: {}
- name: cert - name: cert
secret: secret:
secretName: git-tls secretName: git-tls
@ -195,4 +234,3 @@ spec:
name: https name: https
targetPort: 3000 targetPort: 3000
sessionAffinity: ClientIP sessionAffinity: ClientIP
publishNotReadyAddresses: true

View File

@ -5,7 +5,7 @@ metadata:
name: grafana name: grafana
spec: spec:
displayName: Grafana displayName: Grafana
uri: https://grafana.k-space.ee uri: https://grafana.k-space.ee/login/generic_oauth
redirectUris: redirectUris:
- https://grafana.k-space.ee/login/generic_oauth - https://grafana.k-space.ee/login/generic_oauth
allowedGroups: allowedGroups:

38
inventory.yml Normal file
View File

@ -0,0 +1,38 @@
all:
children:
bind:
hosts:
ns1.k-space.ee:
kubernetes:
children:
masters:
hosts:
master1.kube.k-space.ee:
master2.kube.k-space.ee:
master3.kube.k-space.ee:
kubelets:
children:
mon:
hosts:
mon1.kube.k-space.ee:
mon2.kube.k-space.ee:
mon3.kube.k-space.ee:
storage:
hosts:
storage1.kube.k-space.ee:
storage2.kube.k-space.ee:
storage3.kube.k-space.ee:
storage4.kube.k-space.ee:
workers:
hosts:
worker1.kube.k-space.ee:
worker2.kube.k-space.ee:
worker3.kube.k-space.ee:
worker4.kube.k-space.ee:
worker9.kube.k-space.ee:
doors:
hosts:
100.102.3.1:
100.102.3.2:
100.102.3.3:
100.102.3.4:

21
inventory/README.md Normal file
View File

@ -0,0 +1,21 @@
To deploy components:
```
kubectl create namespace members-site
kubectl apply -n members-site -f doorboy.yml
```
# Doorboy
Set up Doorboy UID hashing salt:
```
kubectl create secret generic -n members-site doorboy-api \
--from-literal=DOORBOY_SECRET=hg2NmVlf6JcS3w237ZXn
kubectl create secret generic -n members-site doorboy-uid-hash-salt \
--from-literal=KDOORPI_UID_SALT=hkRXwLlQKmCJoy5qaahp
kubectl create secret generic -n members-site mongo-application-readwrite \
--from-literal=connectionString.standard=mongodb://kspace_accounting:dBDCS21pHlZAd5isyfBI@mongodb.infra.k-space.ee:27017/kspace_accounting?replicaSet=kspace-mongo-set
```

View File

@ -1,10 +0,0 @@
To generate secrets and to deploy:
```
kubectl create secret generic -n $(basename $(pwd)) application-secrets \
--from-literal=BASIC_AUTH_PASSWORD=$(cat /dev/urandom | base64 | head -c 30) \
--from-literal=MAIL_SMTP_PASS=... \
--from-literal=SLACK_TOKEN=...
kubectl apply -n keel -f application.yml
kubectl -n keel rollout restart deployment.apps/keel
```

View File

@ -1,176 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: keel
namespace: keel
labels:
app: keel
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: keel
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- watch
- list
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- watch
- list
- apiGroups:
- ""
- extensions
- apps
- batch
resources:
- pods
- replicasets
- replicationcontrollers
- statefulsets
- deployments
- daemonsets
- jobs
- cronjobs
verbs:
- get
- delete # required to delete pods during force upgrade of the same tag
- watch
- list
- update
- apiGroups:
- ""
resources:
- configmaps
- pods/portforward
verbs:
- get
- create
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: keel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: keel
subjects:
- kind: ServiceAccount
name: keel
namespace: keel
---
apiVersion: v1
kind: Service
metadata:
name: keel
namespace: keel
labels:
app: keel
spec:
type: ClusterIP
ports:
- port: 9300
targetPort: 9300
protocol: TCP
name: keel
selector:
app: keel
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: keel
labels:
app: keel
annotations:
keel.sh/policy: force
keel.sh/trigger: poll
keel.sh/pollSchedule: "@midnight"
spec:
replicas: 1
serviceName: keel
selector:
matchLabels:
app: keel
template:
metadata:
labels:
app: keel
spec:
serviceAccountName: keel
containers:
- name: keel
image: keelhq/keel:latest
imagePullPolicy: Always
command: ["/bin/keel"]
volumeMounts:
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POLL
value: "true"
- name: HELM_PROVIDER
value: "false"
- name: TILLER_NAMESPACE
value: "kube-system"
- name: TILLER_ADDRESS
value: "tiller-deploy:44134"
- name: NOTIFICATION_LEVEL
value: "info"
- name: BASIC_AUTH_USER
value: admin
- name: SLACK_CHANNELS
value: kube-prod
- name: SLACK_BOT_NAME
value: keel.k-space.ee
envFrom:
- secretRef:
name: application-secrets
ports:
- containerPort: 9300
livenessProbe:
httpGet:
path: /healthz
port: 9300
initialDelaySeconds: 30
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /healthz
port: 9300
initialDelaySeconds: 30
timeoutSeconds: 10
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 50m
memory: 64Mi
volumeMounts:
- name: keel-data
mountPath: /data
volumeClaimTemplates:
- metadata:
name: keel-data
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi

View File

@ -159,7 +159,7 @@ spec:
spec: spec:
automountServiceAccountToken: true automountServiceAccountToken: true
containers: containers:
- image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.7.0 - image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.9.2
args: args:
- --metric-labels-allowlist=pods=[*] - --metric-labels-allowlist=pods=[*]
livenessProbe: livenessProbe:

View File

@ -7,6 +7,7 @@ spec:
displayName: Kubernetes pod log aggregator displayName: Kubernetes pod log aggregator
uri: 'https://log.k-space.ee' uri: 'https://log.k-space.ee'
allowedGroups: allowedGroups:
- k-space:kubernetes:developers
- k-space:kubernetes:admins - k-space:kubernetes:admins
headerMapping: headerMapping:
email: Remote-Email email: Remote-Email
@ -22,7 +23,7 @@ spec:
size: 32 size: 32
mapping: mapping:
- key: password - key: password
value: "%(password)s" value: "%(plaintext)s"
--- ---
apiVersion: codemowers.cloud/v1beta1 apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim kind: SecretClaim
@ -32,13 +33,16 @@ spec:
size: 32 size: 32
mapping: mapping:
- key: password - key: password
value: "%(password)s" value: "%(plaintext)s"
--- ---
apiVersion: mongodbcommunity.mongodb.com/v1 apiVersion: mongodbcommunity.mongodb.com/v1
kind: MongoDBCommunity kind: MongoDBCommunity
metadata: metadata:
name: logmower-mongodb name: logmower-mongodb
spec: spec:
agent:
logLevel: ERROR
maxLogFileDurationHours: 1
additionalMongodConfig: additionalMongodConfig:
systemLog: systemLog:
quiet: true quiet: true
@ -244,7 +248,7 @@ spec:
- from: - from:
- namespaceSelector: - namespaceSelector:
matchLabels: matchLabels:
kubernetes.io/metadata.name: prometheus-operator kubernetes.io/metadata.name: monitoring
podSelector: podSelector:
matchLabels: matchLabels:
app.kubernetes.io/name: prometheus app.kubernetes.io/name: prometheus

View File

@ -35,7 +35,9 @@ metadata:
namespace: metallb-system namespace: metallb-system
spec: spec:
ipAddressPools: ipAddressPools:
- zoo - zoo
- bind-secondary-external
- bind-secondary-internal
--- ---
# Slice of public EEnet subnet using MetalLB L3 method # Slice of public EEnet subnet using MetalLB L3 method
apiVersion: metallb.io/v1beta1 apiVersion: metallb.io/v1beta1
@ -57,6 +59,24 @@ spec:
addresses: addresses:
- 62.65.250.36/30 - 62.65.250.36/30
--- ---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: bind-secondary-internal
namespace: metallb-system
spec:
addresses:
- 172.20.53.0/24
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: bind-secondary-external
namespace: metallb-system
spec:
addresses:
- 62.65.250.2/32
---
apiVersion: metallb.io/v1beta2 apiVersion: metallb.io/v1beta2
kind: BGPPeer kind: BGPPeer
metadata: metadata:

View File

@ -1,10 +1,11 @@
# minio-clusters # minio-clusters
This namespace houses Minio clusters managed by This is namespace for Minio clusters managed by Codemowers' sample
[Codemowers' Operator Bundle](https://github.com/codemowers/operator-bundle) [minio-bucket-operator](https://github.com/codemowers/operatorlib/tree/main/samples/minio-bucket-operator)
which is deployed via [ArgoCD](https://argocd.k-space.ee/applications/argocd/minio-bucket-operator)
To update bump the `image` tag for the classes and issue: To update bump the `image` tag for the classes and issue:
``` ```
kubectl apply -n minio-clusters -f application.yaml kubectl apply -n minio-clusters -f dedicated.yaml -f external.yaml -f shared.yaml
``` ```

View File

@ -1,181 +0,0 @@
---
apiVersion: codemowers.io/v1alpha1
kind: ClusterBucketClass
metadata:
name: external
spec:
description: "Minio instance running on the ZFS storage box"
targetNamespace: minio-clusters
targetCluster: external
---
apiVersion: codemowers.io/v1alpha1
kind: ClusterBucketClass
metadata:
name: shared
spec:
description: "Shared Minio cluster"
targetNamespace: minio-clusters
targetCluster: shared
storageClass: minio
replicas: 4
podSpec:
containers:
- name: minio
env:
- name: MINIO_PROMETHEUS_AUTH_TYPE
value: public
image: minio/minio:RELEASE.2023-02-17T17-52-43Z
args:
- server
- --address
- 0.0.0.0:9000
- --console-address
- 0.0.0.0:9001
ports:
- containerPort: 9000
name: http
- containerPort: 9001
name: console
readinessProbe:
httpGet:
path: /minio/health/ready
port: 9000
initialDelaySeconds: 2
periodSeconds: 5
resources:
requests:
cpu: 300m
memory: 1Gi
limits:
cpu: 4000m
memory: 2Gi
volumeMounts:
- name: data
mountPath: /data
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
---
apiVersion: codemowers.io/v1alpha1
kind: ClusterBucketClass
metadata:
name: dedicated
spec:
targetNamespace: minio-clusters
description: "Dedicated Minio clusters"
replicas: 4
storageClass: minio
podSpec:
containers:
- name: minio
env:
- name: MINIO_PROMETHEUS_AUTH_TYPE
value: public
image: minio/minio:RELEASE.2023-02-17T17-52-43Z
args:
- server
- --address
- 0.0.0.0:9000
- --console-address
- 0.0.0.0:9001
ports:
- containerPort: 9000
name: http
- containerPort: 9001
name: console
readinessProbe:
httpGet:
path: /minio/health/ready
port: 9000
initialDelaySeconds: 2
periodSeconds: 5
resources:
requests:
cpu: 300m
memory: 1Gi
limits:
cpu: 4000m
memory: 2Gi
volumeMounts:
- name: data
mountPath: /data
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: minio-cluster-shared
namespace: minio-clusters
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: minio-cluster-shared.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: minio-cluster-shared
port:
name: http
tls:
- hosts:
- "*.k-space.ee"
---
apiVersion: v1
kind: Service
metadata:
name: minio-cluster-shared-console
namespace: minio-clusters
spec:
ports:
- name: http
protocol: TCP
port: 80
targetPort: 9001
selector:
app.kubernetes.io/instance: shared
app.kubernetes.io/name: minio
clusterIP: None
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: minio-cluster-shared-console
namespace: minio-clusters
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.middlewares: traefik-sso@kubernetescrd
spec:
rules:
- host: minio-cluster-shared-console.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: minio-cluster-shared-console
port:
name: http
tls:
- hosts:
- "*.k-space.ee"

13
minio-clusters/cert.yaml Normal file
View File

@ -0,0 +1,13 @@
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: wildcard-tls
namespace: minio-clusters
spec:
dnsNames:
- "*.minio-clusters.k-space.ee"
issuerRef:
kind: ClusterIssuer
name: default
secretName: wildcard-tls

View File

@ -0,0 +1,23 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: MinioBucketClass
metadata:
name: dedicated
annotations:
kubernetes.io/description: "Dedicated Minio cluster"
spec:
reclaimPolicy: Retain
replicas: 4
storageClass: minio
podSpec:
containers:
- name: minio
image: docker.io/minio/minio:RELEASE.2023-07-18T17-49-40Z@sha256:8879cd0b0c4087817430e21be3ad909d60e9da62fac3e8d9368d9fde51279229
imagePullPolicy: IfNotPresent
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage

View File

@ -0,0 +1,90 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: MinioBucketClass
metadata:
name: external
annotations:
kubernetes.io/description: "External Minio cluster"
spec:
reclaimPolicy: Retain
shared: true
---
apiVersion: v1
kind: Service
metadata:
name: external
namespace: minio-clusters
spec:
externalName: 172.20.9.2
ports:
- name: http
protocol: TCP
port: 9000
type: ExternalName
---
apiVersion: v1
kind: Service
metadata:
name: external-console
namespace: minio-clusters
spec:
externalName: 172.20.9.2
ports:
- name: http
protocol: TCP
port: 9001
type: ExternalName
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: external
namespace: minio-clusters
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: external.minio-clusters.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: external
port:
name: http
tls:
- hosts:
- "*.k-space.ee"
secretName: wildcard-tls
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: external-console
namespace: minio-clusters
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: external-console.minio-clusters.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: external-console
port:
name: http
tls:
- hosts:
- "*.k-space.ee"
secretName: wildcard-tls

View File

@ -0,0 +1,78 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: MinioBucketClass
metadata:
name: shared
annotations:
kubernetes.io/description: "Shared Minio cluster"
spec:
reclaimPolicy: Retain
shared: true
replicas: 4
storageClass: minio
podSpec:
containers:
- name: minio
image: docker.io/minio/minio:RELEASE.2023-07-18T17-49-40Z@sha256:8879cd0b0c4087817430e21be3ad909d60e9da62fac3e8d9368d9fde51279229
imagePullPolicy: IfNotPresent
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: shared
namespace: minio-clusters
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: shared.minio-clusters.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: shared
port:
name: http
tls:
- hosts:
- "*.k-space.ee"
secretName: wildcard-tls
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: shared-console
namespace: minio-clusters
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: shared-console.minio-clusters.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: shared-console
port:
name: http
tls:
- hosts:
- "*.k-space.ee"
secretName: wildcard-tls

28
monitoring/README.md Normal file
View File

@ -0,0 +1,28 @@
## Monitoring
This namespace is managed by
[ArgoCD](https://argocd.k-space.ee/applications/argocd/monitoring)
To reconfigure SNMP targets etc:
```
kubectl delete -n monitoring configmap snmp-exporter
kubectl create -n monitoring configmap snmp-exporter --from-file=snmp.yml=snmp-configs.yaml
```
To set Slack secrets:
```
kubectl create -n monitoring secret generic slack-secrets \
--from-literal=webhook-url=https://hooks.slack.com/services/...
```
To set Mikrotik secrets:
```
kubectl create -n monitoring secret generic mikrotik-exporter \
--from-literal=MIKROTIK_PASSWORD='f7W!H*Pu' \
--from-literal=PROMETHEUS_BEARER_TOKEN=$(cat /dev/urandom | base64 | head -c 30)
```

View File

@ -0,0 +1,62 @@
---
apiVersion: monitoring.coreos.com/v1alpha1
kind: AlertmanagerConfig
metadata:
name: alertmanager
labels:
app.kubernetes.io/name: alertmanager
spec:
route:
routes:
- continue: false
receiver: slack-notifications
matchers:
- matchType: "="
name: severity
value: critical
receiver: 'null'
receivers:
- name: 'null'
- name: 'slack-notifications'
slackConfigs:
- channel: '#kube-prod'
sendResolved: true
apiURL:
name: slack-secrets
key: webhook-url
---
apiVersion: monitoring.coreos.com/v1
kind: Alertmanager
metadata:
name: alertmanager
spec:
alertmanagerConfigMatcherStrategy:
type: None
alertmanagerConfigNamespaceSelector: {}
alertmanagerConfigSelector: {}
alertmanagerConfiguration:
name: alertmanager
secrets:
- slack-secrets
nodeSelector:
dedicated: monitoring
tolerations:
- key: dedicated
operator: Equal
value: monitoring
effect: NoSchedule
replicas: 3
serviceAccountName: alertmanager
externalUrl: http://am.k-space.ee/
routePrefix: "/"
securityContext:
fsGroup: 2000
runAsGroup: 2000
runAsNonRoot: true
runAsUser: 1000
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: alertmanager

View File

@ -156,7 +156,7 @@ metadata:
name: blackbox-exporter name: blackbox-exporter
spec: spec:
revisionHistoryLimit: 0 revisionHistoryLimit: 0
replicas: 3 replicas: 2
selector: selector:
matchLabels: matchLabels:
app: blackbox-exporter app: blackbox-exporter

View File

@ -26,28 +26,6 @@ spec:
targetLabel: target targetLabel: target
--- ---
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1
kind: Probe
metadata:
name: nodes-misc
spec:
targets:
staticConfig:
static:
- sprucecone.infra.k-space.ee:9100
- cedarcone.infra.k-space.ee:9100
relabelingConfigs:
- sourceLabels: [__param_target]
targetLabel: instance
- sourceLabels: [__param_target]
targetLabel: __address__
prober:
url: localhost
path: /metrics
metricRelabelings:
- sourceLabels: [__address__]
targetLabel: target
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule kind: PrometheusRule
metadata: metadata:
name: node-exporter name: node-exporter

View File

@ -1,29 +1,4 @@
--- ---
apiVersion: monitoring.coreos.com/v1alpha1
kind: AlertmanagerConfig
metadata:
name: alertmanager
labels:
app.kubernetes.io/name: alertmanager
spec:
route:
routes:
- continue: false
receiver: slack-notifications
matchers:
- matchType: "="
name: severity
value: critical
receiver: 'null'
receivers:
- name: 'slack-notifications'
slackConfigs:
- channel: '#kube-prod'
sendResolved: true
apiURL:
name: slack-secrets
key: webhook-url
---
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1
kind: PodMonitor kind: PodMonitor
metadata: metadata:
@ -36,41 +11,6 @@ spec:
- port: metrics - port: metrics
--- ---
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1
kind: Alertmanager
metadata:
name: alertmanager
spec:
alertmanagerConfigMatcherStrategy:
type: None
alertmanagerConfigNamespaceSelector: {}
alertmanagerConfigSelector: {}
alertmanagerConfiguration:
name: alertmanager
secrets:
- slack-secrets
nodeSelector:
dedicated: monitoring
tolerations:
- key: dedicated
operator: Equal
value: monitoring
effect: NoSchedule
replicas: 3
serviceAccountName: alertmanager
externalUrl: http://am.k-space.ee/
routePrefix: "/"
securityContext:
fsGroup: 2000
runAsGroup: 2000
runAsNonRoot: true
runAsUser: 1000
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: alertmanager
---
apiVersion: monitoring.coreos.com/v1
kind: Prometheus kind: Prometheus
metadata: metadata:
name: prometheus name: prometheus
@ -84,7 +24,7 @@ spec:
effect: NoSchedule effect: NoSchedule
alerting: alerting:
alertmanagers: alertmanagers:
- namespace: prometheus-operator - namespace: monitoring
name: alertmanager-operated name: alertmanager-operated
port: web port: web
externalUrl: "http://prom.k-space.ee/" externalUrl: "http://prom.k-space.ee/"
@ -156,7 +96,7 @@ roleRef:
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: prometheus name: prometheus
namespace: prometheus-operator namespace: monitoring
--- ---
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule kind: PrometheusRule
@ -171,7 +111,7 @@ spec:
description: "A Prometheus job has disappeared\n VALUE = {{ $value }}\n \ description: "A Prometheus job has disappeared\n VALUE = {{ $value }}\n \
\ LABELS = {{ $labels }}" \ LABELS = {{ $labels }}"
summary: Prometheus job missing (instance {{ $labels.instance }}) summary: Prometheus job missing (instance {{ $labels.instance }})
expr: absent(up{job="prometheus-operator/prometheus"}) expr: absent(up{job="monitoring/prometheus"})
for: 0m for: 0m
labels: labels:
severity: warning severity: warning
@ -220,7 +160,7 @@ spec:
\ $value }}\n LABELS = {{ $labels }}" \ $value }}\n LABELS = {{ $labels }}"
summary: Prometheus AlertManager job missing (instance {{ $labels.instance summary: Prometheus AlertManager job missing (instance {{ $labels.instance
}}) }})
expr: absent(up{job="prometheus-operator/alertmanager"}) expr: absent(up{job="monitoring/alertmanager"})
for: 0m for: 0m
labels: labels:
severity: warning severity: warning
@ -412,7 +352,7 @@ metadata:
traefik.ingress.kubernetes.io/router.entrypoints: websecure traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true" traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.middlewares: prometheus-operator-prometheus@kubernetescrd traefik.ingress.kubernetes.io/router.middlewares: monitoring-prometheus@kubernetescrd
spec: spec:
rules: rules:
- host: prom.k-space.ee - host: prom.k-space.ee
@ -437,7 +377,7 @@ metadata:
traefik.ingress.kubernetes.io/router.entrypoints: websecure traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true" traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.middlewares: prometheus-operator-alertmanager@kubernetescrd traefik.ingress.kubernetes.io/router.middlewares: monitoring-alertmanager@kubernetescrd
spec: spec:
rules: rules:
- host: am.k-space.ee - host: am.k-space.ee

View File

@ -13,9 +13,9 @@ spec:
app: snmp-exporter app: snmp-exporter
spec: spec:
containers: containers:
- image: prom/snmp-exporter:latest - image: prom/snmp-exporter:v0.22.0
name: snmp-exporter name: snmp-exporter
imagePullPolicy: Always imagePullPolicy: IfNotPresent
securityContext: securityContext:
runAsNonRoot: true runAsNonRoot: true
runAsUser: 1000 runAsUser: 1000
@ -143,7 +143,6 @@ spec:
targets: targets:
staticConfig: staticConfig:
static: static:
- mfp-cyber.pub.k-space.ee
- mfp-chaos.pub.k-space.ee - mfp-chaos.pub.k-space.ee
--- ---
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1

View File

@ -1,6 +1,8 @@
# MySQL clusters # MariaDB clusters
This is namespace for MySQL clusters managed by [operatorlib](https://github.com/codemowers/operatorlib/tree/main/samples/mysql-database-operator) This is namespace for MariaDB clusters managed by Codemowers' sample
[mysql-database-operator](https://github.com/codemowers/operatorlib/tree/main/samples/mysql-database-operator)
which is deployed via [ArgoCD](https://argocd.k-space.ee/applications/argocd/mysql-database-operator)
``` ```
kubectl create namespace mysql-clusters kubectl create namespace mysql-clusters

View File

@ -0,0 +1,24 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: MysqlDatabaseClass
metadata:
name: dedicated
annotations:
kubernetes.io/description: "Dedicated MySQL cluster"
spec:
reclaimPolicy: Retain
replicas: 3
routers: 2
storageClass: mysql
podSpec:
containers:
- name: mariadb
image: mariadb:10.9.7@sha256:198c7a5fea3d7285762042a628fe8f83f0a7ccef559605b4cc9502e65210880b
imagePullPolicy: IfNotPresent
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage

View File

@ -0,0 +1,22 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: MysqlDatabaseClass
metadata:
name: external
annotations:
kubernetes.io/description: "External MySQL cluster"
spec:
reclaimPolicy: Retain
shared: true
---
apiVersion: v1
kind: Service
metadata:
name: external
spec:
type: ExternalName
externalName: 172.20.36.1
ports:
- name: mysql
port: 3306
protocol: TCP

View File

@ -1,63 +1,4 @@
--- ---
apiVersion: codemowers.cloud/v1beta1
kind: MysqlDatabaseClass
metadata:
name: shared
annotations:
kubernetes.io/description: "Shared MySQL cluster"
spec:
reclaimPolicy: Retain
shared: true
replicas: 3
routers: 2
storageClass: mysql
podSpec:
containers:
- name: mariadb
image: mariadb:10.9.7@sha256:198c7a5fea3d7285762042a628fe8f83f0a7ccef559605b4cc9502e65210880b
imagePullPolicy: IfNotPresent
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
---
apiVersion: codemowers.cloud/v1beta1
kind: MysqlDatabaseClass
metadata:
name: dedicated
annotations:
kubernetes.io/description: "Dedicated MySQL cluster"
spec:
reclaimPolicy: Retain
replicas: 3
routers: 2
storageClass: mysql
podSpec:
containers:
- name: mariadb
image: mariadb:10.9.7@sha256:198c7a5fea3d7285762042a628fe8f83f0a7ccef559605b4cc9502e65210880b
imagePullPolicy: IfNotPresent
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
---
apiVersion: codemowers.cloud/v1beta1
kind: MysqlDatabaseClass
metadata:
name: external
annotations:
kubernetes.io/description: "External MySQL cluster"
spec:
reclaimPolicy: Retain
shared: true
---
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
@ -89,19 +30,15 @@ spec:
containers: containers:
- name: phpmyadmin - name: phpmyadmin
image: phpmyadmin/phpmyadmin image: phpmyadmin/phpmyadmin
volumeMounts:
- name: config
mountPath: /etc/phpmyadmin/config.user.inc.php
subPath: config.user.inc.php
ports: ports:
- name: web - name: web
containerPort: 80 containerPort: 80
protocol: TCP protocol: TCP
env: env:
- name: PMA_HOSTS - name: PMA_HOSTS
value: mysql-cluster-shared.mysql-clusters,mysql-cluster.authelia,mysql-cluster.etherpad,mariadb.authelia,mariadb.nextcloud,172.20.36.1 value: shared.mysql-clusters.svc.cluster.local,external.mysql-clusters.svc.cluster.local
- name: PMA_PORTS - name: PMA_PORTS
value: 6446,6446,6446,3306,3306,3306 value: 3306,3306
- name: PMA_ABSOLUTE_URI - name: PMA_ABSOLUTE_URI
value: https://phpmyadmin.k-space.ee/ value: https://phpmyadmin.k-space.ee/
- name: UPLOAD_LIMIT - name: UPLOAD_LIMIT

View File

@ -0,0 +1,25 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: MysqlDatabaseClass
metadata:
name: shared
annotations:
kubernetes.io/description: "Shared MySQL cluster"
spec:
reclaimPolicy: Retain
shared: true
replicas: 3
routers: 2
storageClass: mysql
podSpec:
containers:
- name: mariadb
image: mariadb:10.9.7@sha256:198c7a5fea3d7285762042a628fe8f83f0a7ccef559605b4cc9502e65210880b
imagePullPolicy: IfNotPresent
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage

View File

@ -0,0 +1,11 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: mysql
provisioner: rawfile.csi.openebs.io
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
fsType: "xfs"

View File

@ -1,3 +1,8 @@
# Oracle MySQL operator
We have switched to MariaDB operator and this is not actually deployed in the cluster anymore
```
helm template mysql-operator mysql-operator/mysql-operator --namespace mysql-operator --include-crds > application.yml helm template mysql-operator mysql-operator/mysql-operator --namespace mysql-operator --include-crds > application.yml
kubectl apply -n mysql-operator -f application.yml -f application-extras.yml -f networkpolicy-base.yml kubectl apply -n mysql-operator -f application.yml -f application-extras.yml -f networkpolicy-base.yml
```

View File

@ -72,7 +72,7 @@ spec:
serviceAccountName: oidc-gateway serviceAccountName: oidc-gateway
containers: containers:
- name: oidc-key-manager - name: oidc-key-manager
image: codemowers/oidc-gateway image: docker.io/codemowers/passmower
command: [ '/app/node_modules/.bin/key-manager', 'initialize', '-c', 'cluster' ] command: [ '/app/node_modules/.bin/key-manager', 'initialize', '-c', 'cluster' ]
restartPolicy: Never restartPolicy: Never
--- ---
@ -95,7 +95,7 @@ spec:
serviceAccountName: oidc-gateway serviceAccountName: oidc-gateway
containers: containers:
- name: oidc-gateway - name: oidc-gateway
image: docker.io/codemowers/oidc-gateway image: docker.io/codemowers/passmower
ports: ports:
- containerPort: 3000 - containerPort: 3000

View File

@ -18,3 +18,4 @@ spec:
availableScopes: availableScopes:
- openid - openid
- profile - profile
tokenEndpointAuthMethod: none

View File

@ -4,9 +4,127 @@ kind: ConfigMap
metadata: metadata:
name: oidc-gateway-tos-v1 name: oidc-gateway-tos-v1
data: data:
tos.txt: | tos.md: |
Terms of Service Rules
Also known as code of conduct, guidelines, member vows, values etc. Please keep em' on one wiki page. Canonical URL shall be [k-space.ee/rules](http://k-space.ee/rules) which will redirect to right wiki page.
These rules were approved 30. October 2020. Current version contains minor corrections. For future amendments of the rules please put your ideas and thoughts into [pad.k-space.ee/p/rules](https://pad.k-space.ee/p/rules), they will be taken into account during next General Assembly.
Upon becoming a member I solemny swear that
1. **Who's who**
- I acknowledge the management of the hackerspace is governed by K-SPACE MTÜ (non-profit organization), it's [charter](/pohikiri_80397632_716212.pdf) and [Non-profit Associations Act](https://www.riigiteataja.ee/en/eli/528052020003/consolide)
- I acknowledge there are different membership forms listed under [membership#packages](/about/membership#packages) which can be largely broken down to affiliates (Regulars, Associates, Residents) who are at the hackerspace to make use of some services and members who are more involved in making the hackerspace happen.
- I acknowledge that in order to participate in the K-SPACE MTÜ management processes I need to obtain the [Member Tier](/about/membership#member-tier) status, note that this is different from the 'Member package' which was renamed to 'Associate tier' in October of 2020 in order to clarify what member of an non-profit association means.
- Among the ['Member Tier'](https://members.k-space.ee/?filter=resident-member&filter=associate-member) people the board is elected which represents K-SPACE MTÜ in legal matters such as getting bills paid, signing rental contracts and so forth. Currently board members include Erki Naumanis only. Bearing the responsibility also means that board has the final word in many cases.
- I acknowledge K-SPACE MTÜ is renting rooms from SA Tallinna Teaduspark Tehnopol. The rental agreement imposes numerous conditions and constraints due to safety reasons. Eg corridors must be kept accessible and free of flammable materials.
1. **Stay positive.**
- I will set up a recurring payment (püsikorraldus) or pay up front in bulk.
- I will not go in debt.
- I acknowledge failing to pay membership fees for 2 months results in door access cancellation.
- I will mark my payments clearly and consistenly with my full name, so the payments can be automatically procesed.
1. **Clarity not obscurity.**
- I will mark my equipment, server, locker, desk with my full name, phone number and e-mail.
- I acknowledge that my unlabelled stuff will be unplugged.
- Before asking questions I will familiarize myself [how to ask questions](https://www.khanacademy.org/computing/computer-programming/programming/becoming-a-community-coder/a/ask-for-program-help) and provide enough details.
1. **Communication is vital**
- I will proactively communicate need to cancel or pause my subscription, e.g. in case of being unable to pay.
- Upon leaving I will vacate the locker and return any keys given to me.
- I acknowledge Slack is primary communications platform for K-SPACE MTÜ. This is mainly due to wanting to have a way to communicate even if all the selfhosted infra is down. I will request access to the channel from any of the onboarders. I will not complain about being left out of the loop about changes etc if I am not on the #members channel.
1. **Access controls**
- I acknowledge primary method of accessing the floor is by using some of my own NFC cards (eg green Ühiskaart) or keyfobs.
- I will get my token registered in the door system of the day. To be on the safe side I will register multiple cards and/or keyfobs.
- I acknowledge Slack bot for opening the doors supplements the primary one, eg I can get in if I have forgot my card at home.
- I acknowledge that when it comes to security of doorcards it's nonexistant. I will not make copies of my card or distribute it's unique identifier sequence.
1. **Treat as your own -- with care**
- I will do my best to take care of the equipment and the space.
- I acknowledge that I must turn off the lights and leave windows in microventilation mode when I am the last one to leave.
- In case of equiment breakage I will notify info@k-space.ee immediately for resolution options. Any K-SPACE MTÜ owned equiment replacements must be purchased by K-SPACE MTÜ, see below under 'Regarding the toys' why.
- Upon losing (physical metal) key I will compensate 10EUR to K-SPACE MTÜ for it
1. **Contributions are welcome**
- I acknowledge that my immaterial contributions (e.g. building something for the hackerspace or helping others) doesn't compensate my membership fees. The only discount option is the 'Student discount'. Flexing with your contributions on your CV is OK.
- Before bringing any items for donation I will consult with a board member. K-SPACE is not my free (e-)junk disposal station.
- Donations don't compensate for my membership fees. K-SPACE MTÜ still needs to pay rent and electricity bills.
1. **Underpromise, overdeliver**
- I acknowledge there are many fun activities I want to be part of.
- I acknowledge I have obligations elsewhere as well (eg school, work, family).
- I will do my best to fulfill what I have promised to other members.
- I will rather say no in first place than say yes and fail to fulfill the promise.
- I will let the relying party know as soon as possible if I am not able to handle something I promised to do.
1. **Regarding the toys**
- I acknowledge I can bring my own equipment to hackerspace for common use. I acknowledge many others already do that. Lasercutter, CNC cutter, and many power tools are not owned by K-SPACE MTÜ. I take extra care of other members' equipment and before making use of them request training from the machine owner.
- I agree not to utilise any tool or piece of equipment unless I am competent in its use (and have completed induction/training where necessary) or are under the direct supervision of a competent user.
- I agree that the space is a potentially dangerous environment and that I may be exposed to various risks as a result of work undertaken by me or others. Notwithstanding all reasonable attempts by K-Space to ensure the safety of me and others, I agree to the full extent permitted by law to assume complete responsibility for my own actions and their consequences.
- If I think some new equipment would be useful for the space to be purchased by K-SPACE MTÜ I will make a suggestion to K-SPACE MTÜ board via board@k-space.ee.
- If I want to build something for the hackerspace I will consult with a board member for the compensation of materials.
- I acknowledge that my membership fees contribute towards refreshing equipment in common use, that is soldering irons, switches in server room etc.
- Purchases related to the hackerspace must be made by K-SPACE MTÜ.
- This is the clearest option from accounting perspective.
- It's also important that so any warranty, receipts and other documents would be correctly assigned to K-SPACE MTÜ and usable should you give up your membership at some point.
- Preferred option is to ask for proforma invoice for K-SPACE MTÜ, Akadeemia tee 21/1 and send it to info@k-space.ee - the payment will be executed by a board member.
- In case of Internet shops ask for the credit card from a board member.
1. **No borrowing or lending equipment**
- I acknowledge that equipment provided by K-SPACE MTÜ or it's members is for on-prem use only
- I will not take any of the equipment with me
- Only exception to this is workshops happening outside (eg visiting Robotex, Lapikud, IT College etc)
- I will notify info@k-space.ee about what I am going to take with me and why
- I will return equipment immediately after the event
- I acknowledge that this rule is due to numerous occasions people forgetting to return borrowed stuff
1. **Sharing is caring**
- I acknowledge that flexidesks also known as hot desks (8 desks in the middle of big room with projector) are to be cleaned by the end of the day.
- Sometimes exprompt events are scheduled in the room - I will not leave my stuff on flexidesks over night. If I come in often I can leave my screen and keyboard on one of the desks near the window.
- I acknowledge that for more persistent setup I should really switch to resident tier
1. **Being Mindful**
- While being active in the hackerspace I will make sure that my activites are not disturbing others, my activites are not causing exess financial drain on the K-SPACE MTÜ.
- Wearing earphones is generally sign of working on something and not wanting to be interrupted.
- I will act frugal. For example running additional servers without paying or running Bitcoin miner on the expense of K-SPACE MTÜ is NOT okay.
- Causing interruptions in the server room operation during working hours is NOT okay. Designated timeslot for disruptive changes is Thursday 19:00 until Friday 6:00. I recognize that occasionally hardware fails and non-disruptive changes might turn into disruptive outside that timeslot.
- I will not smoke inside the hackerspace rooms. I will exit the building for a cigarette.
1. **Striving for improvement***
- I acknowledge that hackerspace is a vague term ranging from anarchocommunist potsmoking to <a href="https://www.fablab.berlin/">fullblown commercial operation</a>.
- I will do my best to educate myself about <a href="https://github.com/0x20/hackerspace-blueprint/releases/latest/download/hackerspace-blueprint.pdf">hackerspace and makerspace</a> scene in general and I will not indulge in flamewars.
1. **Lock and stock**
- I acknowledge one locker is available for me for no charge with supporter and member tiers.
- I acknowledge that if would switch to resident tier I would be not eligible for a free locker as I would already have whole desk for my stuff
- I acknowledge additional lockers are available for 10EUR per month.
1. **Run the jewels**
- I will not make use of a blade server without legit reason, eg running hypervisor host. For single web application obtain VM from our Proxmox cluster.
- I will shut down machine not in use and I will make sure it stays shut. It's recurring issue that servers that have been shut down are discovered powered on again, eg in BIOS make sure it says 'last state' not 'always on' or even better disconnect power.
1. **Community FTW!**
- I acknowledge that people who take time to deal with me are not paid, they do it voluntarily off their free time for sh\*ts and giggles.
- They still pay membership fees.
- I will treat them and their time with respect.
- I will not become a burden for them.
- I acknowledge rules above are not exhaustive.
- I will do my best to act in good faith and not try to work around the rules.
- I accept that there will always be certain amount chaos at the hackerspace.
**Disclaimer**: Lauri (K-SPACE founder) believes that:
* a) we can have a hackerspace that is not utter chaos
* b) that is financially in order
* c) offers friendly/flexible pricing for students
* d) keeps doors open for companies and startups
* e) allows reasonable buffer for experimentation.
Lauri's favourite example hackerspaces include [AFRA](https://wiki.hackerspaces.org/AFRA) and [Raumfahrtagentur](https://wiki.hackerspaces.org/Raumfahrtagentur)
--- ---
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
@ -15,11 +133,11 @@ metadata:
data: data:
approval.txt: | approval.txt: |
Dear User, Dear User,
Thank you for your interest in accessing the K-Space MTÜ infrastructure. To become a member, please contact us at info@k-space.ee Thank you for your interest in accessing the K-Space MTÜ infrastructure. To become a member, please contact us at info@k-space.ee
Also see https://www.k-space.ee/ Also see https://www.k-space.ee/
Best regards, K-Space MTÜ Best regards, K-Space MTÜ
--- ---
apiVersion: v1 apiVersion: v1
@ -29,11 +147,11 @@ metadata:
data: data:
tos.txt: | tos.txt: |
Hi, <%= name %>! Hi, <%= name %>!
You agreed with the Terms of Service at <%= timestamp %> You agreed with the Terms of Service at <%= timestamp %>
Content SHA256 hash: <%= hash %> Content SHA256 hash: <%= hash %>
Best regards, Best regards,
K-Space MTÜ K-Space MTÜ
tos.ejs: | tos.ejs: |

View File

@ -199,8 +199,8 @@ spec:
type: DirectoryOrCreate type: DirectoryOrCreate
containers: containers:
- name: csi-driver - name: csi-driver
image: "harbor.k-space.ee/k-space/rawfile-localpv:latest" image: "harbor.k-space.ee/k-space/rawfile-localpv@sha256:03452efdac5bccb9bfc0d4b9c371d17588c24b075d7d34fa01f63d7e262eecd8"
imagePullPolicy: Always imagePullPolicy: IfNotPresent
securityContext: securityContext:
privileged: true privileged: true
env: env:
@ -211,7 +211,7 @@ spec:
- name: IMAGE_REPOSITORY - name: IMAGE_REPOSITORY
value: "harbor.k-space.ee/k-space/rawfile-localpv" value: "harbor.k-space.ee/k-space/rawfile-localpv"
- name: IMAGE_TAG - name: IMAGE_TAG
value: "latest" value: "latest@sha256:03452efdac5bccb9bfc0d4b9c371d17588c24b075d7d34fa01f63d7e262eecd8"
- name: NODE_ID - name: NODE_ID
valueFrom: valueFrom:
fieldRef: fieldRef:
@ -329,8 +329,8 @@ spec:
emptyDir: {} emptyDir: {}
containers: containers:
- name: csi-driver - name: csi-driver
image: "harbor.k-space.ee/k-space/rawfile-localpv" image: "harbor.k-space.ee/k-space/rawfile-localpv@sha256:03452efdac5bccb9bfc0d4b9c371d17588c24b075d7d34fa01f63d7e262eecd8"
imagePullPolicy: Always imagePullPolicy: IfNotPresent
args: args:
- csi-driver - csi-driver
- --disable-metrics - --disable-metrics
@ -342,7 +342,7 @@ spec:
- name: IMAGE_REPOSITORY - name: IMAGE_REPOSITORY
value: "harbor.k-space.ee/k-space/rawfile-localpv" value: "harbor.k-space.ee/k-space/rawfile-localpv"
- name: IMAGE_TAG - name: IMAGE_TAG
value: "latest" value: "latest@sha256:03452efdac5bccb9bfc0d4b9c371d17588c24b075d7d34fa01f63d7e262eecd8"
volumeMounts: volumeMounts:
- name: socket-dir - name: socket-dir
mountPath: /csi mountPath: /csi

View File

@ -1,6 +1,8 @@
# Postgres clusters # Postgres clusters
This is namespace for Postgres clusters managed by operator-bundle This is namespace for Postgres clusters managed by Codemowers' sample
[postgres-database-operator](https://github.com/codemowers/operatorlib/tree/main/samples/postgres-database-operator)
which is deployed via [ArgoCD](https://argocd.k-space.ee/applications/argocd/postgres-database-operator)
``` ```
kubectl create namespace postgres-clusters kubectl create namespace postgres-clusters

View File

@ -0,0 +1,23 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: PostgresDatabaseClass
metadata:
name: dedicated
annotations:
kubernetes.io/description: "Dedicated Postgres cluster"
spec:
reclaimPolicy: Retain
replicas: 3
storageClass: postgres
podSpec:
containers:
- name: postgres
image: postgres:13.11@sha256:0f18de936266e03891e186db616e530e0e4365ef5fb300d4bb27318538b80604
imagePullPolicy: IfNotPresent
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage

View File

@ -1,43 +1,4 @@
--- ---
apiVersion: codemowers.io/v1alpha1
kind: ClusterPostgresDatabaseClass
metadata:
name: dedicated
spec:
targetNamespace: postgres-clusters
description: "Dedicated Postgres cluster"
replicas: 3
routers: 2
storageClass: postgres
podSpec:
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
---
apiVersion: codemowers.io/v1alpha1
kind: ClusterPostgresDatabaseClass
metadata:
name: shared
spec:
targetCluster: shared
targetNamespace: postgres-clusters
description: "Shared Postgres cluster"
replicas: 3
routers: 2
storageClass: postgres
podSpec:
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
@ -74,6 +35,16 @@ spec:
port: 8081 port: 8081
targetPort: 8081 targetPort: 8081
--- ---
apiVersion: codemowers.io/v1alpha1
kind: OIDCGWMiddlewareClient
metadata:
name: pgweb
spec:
displayName: pgweb
uri: 'https://pgweb.k-space.ee'
allowedGroups:
- k-space:floor
---
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1
kind: Ingress kind: Ingress
metadata: metadata:
@ -84,7 +55,7 @@ metadata:
traefik.ingress.kubernetes.io/router.entrypoints: websecure traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true" traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.middlewares: traefik-sso@kubernetescrd traefik.ingress.kubernetes.io/router.middlewares: postgres-clusters-pgweb@kubernetescrd
spec: spec:
rules: rules:
- host: pgweb.k-space.ee - host: pgweb.k-space.ee

View File

@ -0,0 +1,24 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: PostgresDatabaseClass
metadata:
name: shared
annotations:
kubernetes.io/description: "Shared Postgres cluster"
spec:
reclaimPolicy: Retain
shared: true
replicas: 3
storageClass: postgres
podSpec:
containers:
- name: postgres
image: postgres:13.11@sha256:0f18de936266e03891e186db616e530e0e4365ef5fb300d4bb27318538b80604
imagePullPolicy: IfNotPresent
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage

View File

@ -0,0 +1,11 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: postgres
provisioner: rawfile.csi.openebs.io
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
fsType: "xfs"

View File

@ -1,28 +1,11 @@
# Prometheus operator # Prometheus operator
To deploy Prometheus operator:
``` ```
curl -L https://github.com/prometheus-operator/prometheus-operator/releases/download/v0.61.1/bundle.yaml | sed -e 's/namespace: default/namespace: prometheus-operator/g' > bundle.yml curl -L https://github.com/prometheus-operator/prometheus-operator/releases/download/v0.67.1/bundle.yaml | sed -e 's/namespace: default/namespace: prometheus-operator/g' > bundle.yml
kubectl create namespace prometheus-operator kubectl create namespace prometheus-operator
kubectl apply --server-side -n prometheus-operator -f bundle.yml kubectl apply --server-side -n prometheus-operator -f bundle.yml
kubectl delete -n prometheus-operator configmap snmp-exporter
kubectl create -n prometheus-operator configmap snmp-exporter --from-file=snmp.yml
kubectl apply -n prometheus-operator -f application.yml -f node-exporter.yml -f blackbox-exporter.yml -f snmp-exporter.yml -f mikrotik-exporter.yml
```
# Slack
```
kubectl create -n prometheus-operator secret generic slack-secrets \
--from-literal=webhook-url=https://hooks.slack.com/services/...
```
# Mikrotik exporter
```
kubectl create -n prometheus-operator secret generic mikrotik-exporter \
--from-literal=MIKROTIK_PASSWORD='f7W!H*Pu' \
--from-literal=PROMETHEUS_BEARER_TOKEN=$(cat /dev/urandom | base64 | head -c 30)
``` ```
Note: Do not put any Prometheus instances or exporters in this namespace, instead have them in `monitoring` namespace

View File

@ -1,7 +1,8 @@
# Redis clusters # Redis clusters
This is namespace for Redis clusters managed by This is namespace for Redis clusters managed by Codemowers' sample
[Codemowers' Operator Bundle](https://github.com/codemowers/operator-bundle) [redis-operator](https://github.com/codemowers/operatorlib/tree/main/samples/redis-operator)
which is deployed via [ArgoCD](https://argocd.k-space.ee/applications/argocd/redis-operator)
``` ```
kubectl create namespace redis-clusters kubectl create namespace redis-clusters

View File

@ -1,43 +0,0 @@
---
apiVersion: codemowers.io/v1alpha1
kind: ClusterRedisClass
metadata:
name: persistent
spec:
targetNamespace: redis-clusters
description: "Persistent Redis instance"
replicas: 3
storageClass: redis
podSpec:
imagePullPolicy: Never
containers:
- name: redis
image: codemowers/keydb:6.3.2
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
---
apiVersion: codemowers.io/v1alpha1
kind: ClusterRedisClass
metadata:
name: ephemeral
spec:
targetNamespace: redis-clusters
description: "Ephemeral Redis instance"
replicas: 3
podSpec:
imagePullPolicy: Never
containers:
- name: redis
image: codemowers/keydb:6.3.2
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage

18
redis-clusters/cache.yaml Normal file
View File

@ -0,0 +1,18 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: RedisClass
metadata:
name: cache
annotations:
kubernetes.io/description: "Caching Redis instance"
spec:
reclaimPolicy: Delete
replicas: 1
podSpec:
containers:
- name: redis
image: docker.io/library/redis:7.0.11@sha256:1008c73f08e9f913868e2fa2e843212b62ea5bf3c66435d87bc7a6207bc0f1b4
imagePullPolicy: IfNotPresent
args:
- --maxmemory-policy
- volatile-lfu

View File

@ -0,0 +1,22 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: RedisClass
metadata:
name: ephemeral
annotations:
kubernetes.io/description: "Ephemeral Redis cluster"
spec:
reclaimPolicy: Delete
replicas: 3
podSpec:
containers:
- name: redis
image: docker.io/library/redis:7.0.11@sha256:1008c73f08e9f913868e2fa2e843212b62ea5bf3c66435d87bc7a6207bc0f1b4
imagePullPolicy: IfNotPresent
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage

View File

@ -0,0 +1,25 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: RedisClass
metadata:
name: persistent
annotations:
kubernetes.io/description: "Persistent Redis instance"
spec:
reclaimPolicy: Retain
replicas: 3
storageClass: redis
podSpec:
containers:
- name: redis
image: docker.io/library/redis:7.0.11@sha256:1008c73f08e9f913868e2fa2e843212b62ea5bf3c66435d87bc7a6207bc0f1b4
imagePullPolicy: IfNotPresent
args:
- --activedefrag yes
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage

View File

View File

@ -0,0 +1,11 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: redis
provisioner: rawfile.csi.openebs.io
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
fsType: "xfs"

View File

@ -23,7 +23,7 @@ data:
done done
if [[ `git status --porcelain` ]]; then if [[ `git status --porcelain` ]]; then
echo "Attempting Git check in" echo "Attempting Git check in"
git commit -m "Update $(git ls-files -m) file(s)" git commit -m "$(git diff --cached --shortstat)"
git push git push
else else
echo "No changes to commit" echo "No changes to commit"
@ -98,15 +98,14 @@ spec:
- Egress - Egress
egress: egress:
- to: - to:
- ipBlock: - namespaceSelector:
cidr: 193.40.103.0/24 matchLabels:
kubernetes.io/metadata.name: gitea
- ipBlock: - ipBlock:
cidr: 172.23.0.0/24 cidr: 172.23.0.0/24
- ipBlock: - ipBlock:
cidr: 100.102.1.0/24 cidr: 100.102.1.0/24
ports:
- protocol: TCP
port: 22
--- ---
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule kind: PrometheusRule

8
ssh_config Normal file
View File

@ -0,0 +1,8 @@
Host *
User root
ControlPersist 8h
ControlMaster auto
ControlPath ~/.ssh/cm-%r@%h:%p
Host ns1.k-space.ee
Hostname 172.20.0.2

View File

@ -12,17 +12,6 @@ parameters:
--- ---
apiVersion: storage.k8s.io/v1 apiVersion: storage.k8s.io/v1
kind: StorageClass kind: StorageClass
metadata:
name: minio
provisioner: rawfile.csi.openebs.io
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
fsType: "xfs"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata: metadata:
name: prometheus name: prometheus
provisioner: rawfile.csi.openebs.io provisioner: rawfile.csi.openebs.io
@ -34,28 +23,6 @@ parameters:
--- ---
apiVersion: storage.k8s.io/v1 apiVersion: storage.k8s.io/v1
kind: StorageClass kind: StorageClass
metadata:
name: postgres
provisioner: rawfile.csi.openebs.io
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
fsType: "xfs"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: mysql
provisioner: rawfile.csi.openebs.io
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
fsType: "xfs"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata: metadata:
name: woodpecker name: woodpecker
provisioner: driver.longhorn.io provisioner: driver.longhorn.io

View File

@ -10,6 +10,8 @@ websecure:
providers: providers:
kubernetesCRD: kubernetesCRD:
enabled: true enabled: true
allowEmptyServices: true
allowExternalNameServices: true
kubernetesIngress: kubernetesIngress:
allowEmptyServices: true allowEmptyServices: true

99
wiki/application.yml Normal file
View File

@ -0,0 +1,99 @@
---
apiVersion: codemowers.io/v1alpha1
kind: OIDCGWClient
metadata:
name: wiki
spec:
displayName: Wiki
uri: https://wiki.k-space.ee
redirectUris:
- https://wiki.k-space.ee/login/a4cdccdc-c879-4387-a64a-6584a02a85e9/callback
allowedGroups:
- k-space:floor
grantTypes:
- authorization_code
- refresh_token
responseTypes:
- code
availableScopes:
- openid
- profile
tokenEndpointAuthMethod: client_secret_post
pkce: false
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: wikijs
labels:
app.kubernetes.io/name: wikijs
spec:
serviceName: wikijs
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: wikijs
template:
metadata:
labels:
app.kubernetes.io/name: wikijs
spec:
containers:
- name: wikijs
image: requarks/wiki:2
env:
- name: DB_TYPE
value: mysql
- name: DB_HOST
value: mariadb.infra.k-space.ee
- name: DB_PORT
value: "3306"
- name: DB_USER
value: kspace_wiki
- name: DB_NAME
value: kspace_wiki
- name: DB_PASS
valueFrom:
secretKeyRef:
name: wikijs-secrets
key: DB_PASS
ports:
- containerPort: 3000
name: http
---
apiVersion: v1
kind: Service
metadata:
name: wikijs
spec:
selector:
app.kubernetes.io/name: wikijs
ports:
- port: 80
name: http
targetPort: http
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: wikijs
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: wiki.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: wikijs
port:
name: http
tls:
- hosts:
- "*.k-space.ee"

View File

@ -0,0 +1,44 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: reset-oidc-config
spec:
template:
spec:
volumes:
- name: tmp
emptyDir: {}
initContainers:
- name: jq
image: alpine/k8s:1.24.16@sha256:06f8942d87fa17b40795bb9a8eff029a9be3fc3c9bcc13d62071de4cc3324153
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /tmp
name: tmp
envFrom:
- secretRef:
name: oidc-client-wiki-owner-secrets
command:
- /bin/bash
- -c
- jq '{"strategyKey":"oidc","config":{"clientId":$ENV.OIDC_CLIENT_ID,"clientSecret":$ENV.OIDC_CLIENT_SECRET,"authorizationURL":$ENV.OIDC_GATEWAY_AUTH_URI,"tokenURL":$ENV.OIDC_GATEWAY_TOKEN_URI,"userInfoURL":$ENV.OIDC_GATEWAY_USERINFO_URI,"skipUserProfile":false,"issuer":$ENV.OIDC_GATEWAY_URI,"emailClaim":"email","displayNameClaim":"name","mapGroups":false,"groupsClaim":"groups","logoutURL":$ENV.OIDC_GATEWAY_URI,"acrValues":""}} | "UPDATE authentication SET config=\(.config|tostring|@sh) WHERE strategyKey=\(.strategyKey|tostring|@sh) LIMIT 1"' -n -r > /tmp/update.sql
containers:
- name: mysql
image: mysql
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /tmp
name: tmp
env:
- name: MYSQL_PWD
valueFrom:
secretKeyRef:
name: wikijs-secrets
key: DB_PASS
command:
- /bin/bash
- -c
- mysql -u kspace_wiki kspace_wiki -h 172.20.36.1 -p${MYSQL_PWD} < /tmp/update.sql
restartPolicy: OnFailure
backoffLimit: 4

1
wildduck/.gitignore vendored
View File

@ -1 +1,2 @@
dhparams.pem
secret.yml secret.yml

View File

@ -1,13 +1,31 @@
To deploy replace the API secret in configmaps/webmail.yml and: # Wildduck stack
This application is managed by
[ArgoCD](https://argocd.k-space.ee/applications/argocd/wildduck)
The mail stack consists of several moving parts:
* Haraka with several plugins
* Wildduck plugin to handle incoming mail on port 25 of mail.k-space.ee
* Fight spam with Rspamd
* Fight viruses with ClamAV
* Wildduck
* Provide API for interfacing with other systems such as
Wildduck webmail,
Wildflock e-mail alias generator and
Wildduck Kubernetes operator which deals with account provisioning
* Provide IMAP endpoint for accessing mailbox with traditional
MUA (mail user agents): Android, iPhone, Thunderbird, Evolution etc
* ZoneMTA for handling outbound mail
* Including mail submission from MUA with the help of Wildduck plugin
Outside Kubernetes there is NAT rule on the Mikrotik router
which rewrites source IP of any TCP port 25 headed traffic to
originate from the IP address of the mail exchange.
TODO: Figure out how to automate DH parameters generation:
``` ```
kubectl -n wildduck apply -f application.yml openssl dhparam -out dhparams.pem 2048
kubectl -n wildduck rollout restart deployment/webmail kubectl create secret generic -n wildduck dhparams --from-file=dhparams.pem
```
To generate secrets:
```
kubectl create secret generic -n wildduck wildduck \
--from-literal=WILDDUCK_API_TOKEN=$(cat /dev/urandom | base64 | head -c 30)
``` ```

12
wildduck/certificate.yaml Normal file
View File

@ -0,0 +1,12 @@
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: mail
spec:
dnsNames:
- mail.k-space.ee
issuerRef:
kind: ClusterIssuer
name: default
secretName: wildduck-tls

45
wildduck/clamav.yaml Normal file
View File

@ -0,0 +1,45 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: clamav
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: clamav
template:
metadata:
labels:
app.kubernetes.io/name: clamav
spec:
containers:
# TODO: Figure out how to trigger update
- name: clamd
image: docker.io/clamav/clamav:1.1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3310
name: api
volumeMounts:
- mountPath: /var/lib/clamav
name: avdata
- mountPath: /tmp
name: tmp
volumes:
- name: avdata
emptyDir: {}
- name: tmp
emptyDir:
medium: Memory
---
apiVersion: v1
kind: Service
metadata:
name: clamav
spec:
selector:
app.kubernetes.io/name: clamav
ports:
- port: 3310
name: clamav

17
wildduck/dns.yaml Normal file
View File

@ -0,0 +1,17 @@
---
apiVersion: externaldns.k8s.io/v1alpha1
kind: DNSEndpoint
metadata:
name: wildduck-mx
spec:
endpoints:
- dnsName: k-space.ee
recordTTL: 300
recordType: MX
targets:
- "10 mail.k-space.ee"
- dnsName: k-space.ee
recordTTL: 300
recordType: TXT
targets:
- "v=spf1 mx include:servers.mcsv.net -all"

168
wildduck/haraka.yaml Normal file
View File

@ -0,0 +1,168 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: haraka
data:
loglevel: info
plugin_timeout: "180"
queue_dir: /var/lib/haraka/queue
plugins: |-
spf
clamd
rspamd
dkim_verify
wildduck
tls
rspamd.ini: |-
host = rspamd
port = 11333
add_headers = always
timeout = 30
[dkim]
enabled = true
[header]
bar = X-Rspamd-Bar
report = X-Rspamd-Report
score = X-Rspamd-Score
spam = X-Rspamd-Spam
[check]
authenticated = true
private_ip = true
[reject]
spam = false
[soft_reject]
enabled = true
[rmilter_headers]
enabled = true
[spambar]
positive = +
negative = -
neutral = /
clamd.ini: |-
clamd_socket = clamav:3310
[reject]
virus=true
error=false
smtp.ini: |-
listen=0.0.0.0:2525
nodes=1
tls.ini: |-
key=/cert/tls.key
cert=/cert/tls.crt
wildduck.js: |-
module.exports = {
"redis": process.env.REDIS_URI,
"mongo": {
"url": process.env.MONGO_URI,
"sender": "zone-mta",
},
"sender": {
"enabled": true,
"zone": "default",
"gfs": "mail",
"collection": "zone-queue"
},
"srs": {
"secret": process.env.SRS_SECRET
},
"attachments": {
"type": "gridstore",
"bucket": "attachments",
"decodeBase64": true
},
"log": {
"authlogExpireDays": 30
},
"limits": {
"windowSize": 3600,
"rcptIp": 100,
"rcptWindowSize": 60,
"rcpt": 60
},
"gelf": {
"enabled": false
},
"rspamd": {
"forwardSkip": 10,
"blacklist": [
"DMARC_POLICY_REJECT"
],
"softlist": [
"RBL_ZONE"
],
"responses": {
"DMARC_POLICY_REJECT": "Unauthenticated email from {host} is not accepted due to domain's DMARC policy",
"RBL_ZONE": "[{host}] was found from Zone RBL"
}
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: haraka
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/name: wildduck
app.kubernetes.io/component: haraka
template:
metadata:
labels:
app.kubernetes.io/name: wildduck
app.kubernetes.io/component: haraka
spec:
containers:
- name: haraka
image: docker.io/codemowers/wildduck-haraka-inbound:latest@sha256:a130cc6a60ab2a47cb5971355ed2474136254613b4b8bd30aeabc6e123695ea3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 2525
name: haraka-mta
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65534
volumeMounts:
- name: wildduck-haraka-config
mountPath: /etc/haraka
readOnly: true
- name: wildduck-haraka-config
mountPath: /etc/haraka/config
readOnly: true
- name: var-lib-haraka
mountPath: /var/lib/haraka
- mountPath: /cert
name: cert
env:
- name: SRS_SECRET
valueFrom:
secretKeyRef:
name: srs
key: secret
- name: REDIS_URI
valueFrom:
secretKeyRef:
name: redis-wildduck-owner-secrets
key: REDIS_MASTER_0_URI
- name: MONGO_URI
valueFrom:
secretKeyRef:
name: wildduck
key: MONGO_URI
volumes:
- name: cert
secret:
secretName: wildduck-tls
- name: wildduck-haraka-config
projected:
sources:
- secret:
name: dhparams
- configMap:
name: haraka
- name: var-lib-haraka
emptyDir:
sizeLimit: 500Mi

View File

@ -0,0 +1,24 @@
---
apiVersion: v1
kind: Service
metadata:
name: wildduck
annotations:
external-dns.alpha.kubernetes.io/hostname: mail.k-space.ee
metallb.universe.tf/address-pool: wildduck
spec:
loadBalancerIP: 193.40.103.25
type: LoadBalancer
externalTrafficPolicy: Local
selector:
app.kubernetes.io/name: wildduck
ports:
- port: 993
name: wildduck-mda
targetPort: wildduck-mda
- port: 465
name: zonemta-msa
targetPort: zonemta-msa
- port: 25
name: haraka-mta
targetPort: haraka-mta

83
wildduck/mongo.yaml Normal file
View File

@ -0,0 +1,83 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
metadata:
name: wildduck-readwrite-password
spec:
mapping:
- key: password
value: "%(plaintext)s"
---
apiVersion: mongodbcommunity.mongodb.com/v1
kind: MongoDBCommunity
metadata:
name: wildduck-mongodb
spec:
additionalMongodConfig:
systemLog:
quiet: true
members: 2
arbiters: 1
type: ReplicaSet
version: "6.0.3"
security:
authentication:
modes: ["SCRAM"]
users:
- name: readwrite
db: wildduck
passwordSecretRef:
name: wildduck-readwrite-password
roles:
- name: readWrite
db: application
scramCredentialsSecretName: wildduck-readwrite
statefulSet:
spec:
logLevel: WARN
template:
spec:
containers:
- name: mongod
resources:
requests:
cpu: 100m
memory: 1Gi
limits:
cpu: 4000m
memory: 1Gi
- name: mongodb-agent
resources:
requests:
cpu: 1m
memory: 100Mi
limits: {}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- wildduck-mongodb-svc
topologyKey: topology.kubernetes.io/zone
volumeClaimTemplates:
- metadata:
name: logs-volume
spec:
storageClassName: mongo
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 512Mi
- metadata:
name: data-volume
spec:
storageClassName: mongo
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

View File

@ -0,0 +1 @@
../mongodb-operator/mongodb-support.yml

79
wildduck/rspamd.yaml Normal file
View File

@ -0,0 +1,79 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: rspamd
data:
logging.inc: |
type = console;
level = "notice";
worker-normal.inc: |
bind_socket = "*:11333";
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: rspamd
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/name: rspamd
template:
metadata:
labels:
app.kubernetes.io/name: rspamd
spec:
securityContext:
runAsUser: 1000
runAsNonRoot: true
fsGroup: 1000
containers:
- name: rspamd
image: docker.io/codemowers/rspamd
ports:
- containerPort: 11333
name: rspamd
securityContext:
readOnlyRootFilesystem: true
resources:
limits:
cpu: 500m
memory: 2Gi
requests:
cpu: 10m
memory: 400Mi
volumeMounts:
- name: var-lib-rspamd
mountPath: /var/lib/rspamd
- name: var-run
mountPath: /run/rspamd
- name: rspamd-config
mountPath: /etc/rspamd/local.d
readOnly: true
volumes:
- name: var-lib-rspamd
emptyDir:
sizeLimit: 1Gi
- name: var-run
emptyDir:
medium: Memory
- name: var-lib-nginx-tmp
emptyDir:
medium: Memory
- name: rspamd-config
projected:
sources:
- configMap:
name: rspamd
---
apiVersion: v1
kind: Service
metadata:
name: rspamd
spec:
selector:
app.kubernetes.io/name: rspamd
ports:
- port: 11333
name: rspamd

10
wildduck/srs.yaml Normal file
View File

@ -0,0 +1,10 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
metadata:
name: srs
spec:
size: 32
mapping:
- key: secret
value: "%(plaintext)s"

View File

@ -14,6 +14,9 @@ metadata:
spec: spec:
displayName: Wildduck Webmail displayName: Wildduck Webmail
uri: 'https://webmail.k-space.ee' uri: 'https://webmail.k-space.ee'
allowedGroups:
- k-space:floor
- k-space:friends
headerMapping: headerMapping:
user: Remote-Username user: Remote-Username
--- ---
@ -24,14 +27,12 @@ metadata:
namespace: wildduck namespace: wildduck
data: data:
www.toml: |- www.toml: |-
name="Wild Duck Mail"
title="wildduck-www"
[service] [service]
domain="k-space.ee"
identities=1 identities=1
allowIdentityEdit=false allowIdentityEdit=false
allowJoin=false allowJoin=false
domains=[] domains=["k-space.ee"]
allowSendFromOtherDomains=false
[service.sso.http] [service.sso.http]
enabled = true enabled = true
header = "Remote-Username" header = "Remote-Username"
@ -53,7 +54,7 @@ data:
secure=true secure=true
port=465 port=465
[api] [api]
url="https://mail.k-space.ee" url="http://wildduck-api:8080"
--- ---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
@ -61,6 +62,7 @@ metadata:
name: webmail name: webmail
namespace: wildduck namespace: wildduck
spec: spec:
revisionHistoryLimit: 0
replicas: 2 replicas: 2
selector: selector:
matchLabels: matchLabels:
@ -72,7 +74,7 @@ spec:
spec: spec:
containers: containers:
- name: webmail - name: webmail
image: harbor.k-space.ee/k-space/wildduck-webmail:latest image: nodemailer/wildduck-webmail:latest
command: command:
- node - node
- server.js - server.js
@ -94,8 +96,8 @@ spec:
- name: APPCONF_dbs_redis - name: APPCONF_dbs_redis
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: redis-webmail-owner-secrets name: redis-wildduck-owner-secrets
key: REDIS_MASTER_0_URI key: REDIS_MASTER_1_URI
volumes: volumes:
- name: webmail-config - name: webmail-config
projected: projected:
@ -114,7 +116,7 @@ spec:
ports: ports:
- protocol: TCP - protocol: TCP
port: 80 port: 80
targetPort: 5000 targetPort: 3000
--- ---
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1
kind: Ingress kind: Ingress
@ -124,7 +126,7 @@ metadata:
annotations: annotations:
kubernetes.io/ingress.class: traefik kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: wildduck-webmail@kubernetescrd traefik.ingress.kubernetes.io/router.middlewares: wildduck-webmail@kubernetescrd,wildduck-webmail-redirect@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true" traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec: spec:
@ -142,3 +144,13 @@ spec:
tls: tls:
- hosts: - hosts:
- "*.k-space.ee" - "*.k-space.ee"
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: webmail-redirect
spec:
redirectRegex:
regex: ^https://webmail.k-space.ee/$
replacement: https://webmail.k-space.ee/webmail/
permanent: false

View File

@ -0,0 +1,69 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
metadata:
name: wildduck-exporter
namespace: wildduck
spec:
size: 32
mapping:
- key: PROMETHEUS_BEARER_TOKEN
value: "%(plaintext)s"
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: wildduck-exporter
namespace: wildduck
spec:
podMetricsEndpoints:
- bearerTokenSecret:
key: PROMETHEUS_BEARER_TOKEN
name: wildduck-exporter
targetPort: 3001
selector:
matchLabels:
app: wildduck-exporter
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: wildduck-exporter
namespace: wildduck
spec:
revisionHistoryLimit: 0
replicas: 1
selector:
matchLabels:
app: wildduck-exporter
template:
metadata:
labels:
app: wildduck-exporter
spec:
containers:
- name: webmail
image: harbor.k-space.ee/k-space/wildduck-exporter
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65534
command:
- /wildduck_exporter.py
args:
- info
- accounting
ports:
- containerPort: 3001
name: metrics
env:
- name: MONGODB_HOST
valueFrom:
secretKeyRef:
name: wildduck
key: MONGO_URI
- name: PROMETHEUS_BEARER_TOKEN
valueFrom:
secretKeyRef:
name: wildduck-exporter
key: PROMETHEUS_BEARER_TOKEN

View File

@ -0,0 +1,40 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: codemowers-io-wildduck-operator
rules:
- apiGroups:
- codemowers.io
resources:
- oidcgatewayusers
verbs:
- get
- list
- watch
- apiGroups:
- codemowers.io
resources:
- oidcgatewayusers/status
verbs:
- patch
- update
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: codemowers-io-wildduck-operator
namespace: wildduck
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: codemowers-io-wildduck-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: codemowers-io-wildduck-operator
subjects:
- kind: ServiceAccount
name: codemowers-io-wildduck-operator
namespace: wildduck

View File

@ -0,0 +1,64 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: wildduck-operator
namespace: wildduck
spec:
replicas: 1
selector:
matchLabels:
app: wildduck-operator
serviceName: wildduck-operator
template:
metadata:
labels:
app: wildduck-operator
spec:
containers:
- image: docker.io/codemowers/wildduck-operator:latest
name: wildduck-operator
env:
- name: MANAGED_DOMAIN
value: k-space.ee
- name: ALLOWED_GROUPS
value: k-space:friends,k-space:floor
- name: WILDDUCK_API_URL
value: http://wildduck-api:8080
- name: WILDDUCK_API_TOKEN
valueFrom:
secretKeyRef:
name: wildduck
key: WILDDUCK_API_TOKEN
ports:
- containerPort: 8000
name: metrics
enableServiceLinks: false
serviceAccountName: codemowers-io-wildduck-operator
---
apiVersion: v1
kind: Service
metadata:
labels:
app: wildduck-operator
name: wildduck-operator
namespace: wildduck
spec:
ports:
- name: metrics
port: 8000
protocol: TCP
selector:
app: wildduck-operator
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: wildduck-operator
namespace: wildduck
spec:
endpoints:
- port: metrics
selector:
matchLabels:
app: wildduck-operator

93
wildduck/wildduck.yaml Normal file
View File

@ -0,0 +1,93 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: RedisClaim
metadata:
name: wildduck
spec:
class: ephemeral
capacity: 100Mi
---
apiVersion: v1
kind: Service
metadata:
name: wildduck-api
spec:
selector:
app.kubernetes.io/name: wildduck
app.kubernetes.io/component: wildduck
ports:
- port: 8080
name: wildduck-api
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: wildduck
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: wildduck
app.kubernetes.io/component: wildduck
template:
metadata:
labels:
app.kubernetes.io/name: wildduck
app.kubernetes.io/component: wildduck
spec:
containers:
- name: wildduck
image: docker.io/nodemailer/wildduck
ports:
- containerPort: 8080
name: wildduck-api
- containerPort: 9993
name: wildduck-mda
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
resources:
limits:
cpu: 500m
memory: 200Mi
requests:
cpu: 10m
memory: 100Mi
env:
- name: APPCONF_emailDomain
value: k-space.ee
- name: APPCONF_log_level
value: info
- name: APPCONF_maxForwards
value: "2000"
- name: APPCONF_hostname
value: mail.k-space.ee
- name: APPCONF_tls_key
value: /cert/tls.key
- name: APPCONF_tls_cert
value: /cert/tls.crt
- name: APPCONF_api_host
value: "0.0.0.0"
- name: APPCONF_api_accessToken
valueFrom:
secretKeyRef:
name: wildduck
key: WILDDUCK_API_TOKEN
- name: APPCONF_dbs_mongo
valueFrom:
secretKeyRef:
name: wildduck
key: MONGO_URI
- name: APPCONF_dbs_redis
valueFrom:
secretKeyRef:
name: redis-wildduck-owner-secrets
key: REDIS_MASTER_0_URI
volumeMounts:
- mountPath: /cert
name: cert
volumes:
- name: cert
secret:
secretName: wildduck-tls

View File

@ -1,9 +1,22 @@
--- ---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
metadata:
name: wildflock
spec:
size: 32
mapping:
- key: SESSION_SECRET
value: "%(plaintext)s"
---
apiVersion: codemowers.io/v1alpha1 apiVersion: codemowers.io/v1alpha1
kind: OIDCGWClient kind: OIDCGWClient
metadata: metadata:
name: wildflock name: wildflock
spec: spec:
allowedGroups:
- k-space:floor
- k-space:friends
displayName: Wildduck disposable alias generator displayName: Wildduck disposable alias generator
uri: "https://wildflock.k-space.ee/auth-oidc" uri: "https://wildflock.k-space.ee/auth-oidc"
redirectUris: redirectUris:
@ -64,10 +77,11 @@ metadata:
labels: labels:
app: wildflock app: wildflock
spec: spec:
revisionHistoryLimit: 0
selector: selector:
matchLabels: matchLabels:
app: wildflock app: wildflock
replicas: 1 replicas: 2
template: template:
metadata: metadata:
labels: labels:
@ -75,10 +89,15 @@ spec:
spec: spec:
containers: containers:
- name: wildflock - name: wildflock
image: harbor.k-space.ee/k-space/walias:latest image: harbor.k-space.ee/k-space/wildflock:latest
ports: ports:
- containerPort: 3030 - containerPort: 3030
env: env:
- name: REDIS_URL
valueFrom:
secretKeyRef:
name: redis-webmail-owner-secrets
key: REDIS_MASTER_1_URI
- name: CLIENT_URL - name: CLIENT_URL
value: https://wildflock.k-space.ee value: https://wildflock.k-space.ee
- name: WILDDUCK_DOMAIN - name: WILDDUCK_DOMAIN
@ -86,12 +105,17 @@ spec:
- name: NODE_ENV - name: NODE_ENV
value: prod value: prod
- name: WILDDUCK_URL - name: WILDDUCK_URL
value: https://mail.k-space.ee value: http://wildduck-api:8080
- name: WILDDUCK_TOKEN - name: WILDDUCK_TOKEN
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: wildduck name: wildduck
key: WILDDUCK_API_TOKEN key: WILDDUCK_API_TOKEN
- name: SESSION_SECRET
valueFrom:
secretKeyRef:
name: wildflock
key: SESSION_SECRET
envFrom: envFrom:
- secretRef: - secretRef:
name: oidc-client-wildflock-owner-secrets name: oidc-client-wildflock-owner-secrets

115
wildduck/zonemta.yaml Normal file
View File

@ -0,0 +1,115 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: zonemta
namespace: wildduck
data:
pools.toml: |-
[[default]]
address="0.0.0.0"
name="mail.k-space.ee"
plugin-wildduck.toml: |-
[wildduck]
enabled=["receiver", "sender"]
interfaces=["feeder"]
hostname="mail.k-space.ee"
authlogExpireDays=30
[wildduck.srs]
enabled=true
rewriteDomain="k-space.ee"
zonemta.toml: |-
[log]
level="info"
[smtpInterfaces.feeder]
key="/cert/tls.key"
cert="/cert/tls.crt"
port=9465
host="0.0.0.0"
secure=true
processes=1
authentication = true
maxRecipients=100
starttls=false
[plugins]
# @include "plugin-wildduck.toml"
[pools]
# @include "pools.toml"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: zonemta
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/name: wildduck
app.kubernetes.io/component: zonemta
template:
metadata:
labels:
app.kubernetes.io/name: wildduck
app.kubernetes.io/component: zonemta
spec:
containers:
- name: zonemta
image: docker.io/codemowers/wildduck-zonemta-outbound:latest@sha256:0878c803164e636820398f11a3811f3d92b7771c6202cfe229f97449d0009119
imagePullPolicy: IfNotPresent
command:
- /sbin/tini
- --
- node
- index.js
- --config
- /etc/zonemta/zonemta.toml
ports:
- containerPort: 9465
name: zonemta-msa
- containerPort: 10280
name: api
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
resources:
limits:
cpu: 500m
memory: 1000Mi
requests:
cpu: 10m
memory: 500Mi
env:
- name: APPCONF_plugins_wildduck_srs_secret
valueFrom:
secretKeyRef:
name: srs
key: secret
- name: APPCONF_dbs_sender
value: zone-mta
- name: APPCONF_dbs_mongo
valueFrom:
secretKeyRef:
name: wildduck
key: MONGO_URI
- name: APPCONF_dbs_redis
valueFrom:
secretKeyRef:
name: redis-wildduck-owner-secrets
key: REDIS_MASTER_0_URI
volumeMounts:
- name: cert
mountPath: /cert
- name: zonemta-config
mountPath: /etc/zonemta
readOnly: true
volumes:
- name: zonemta-config
projected:
sources:
- configMap:
name: zonemta
- name: cert
secret:
secretName: wildduck-tls

Some files were not shown because too many files have changed in this diff Show More