1 Commits

Author SHA1 Message Date
5e04a1bd43 provision new worker nodes with ansible 2024-08-09 12:07:03 +03:00
112 changed files with 18209 additions and 2461 deletions

View File

@@ -1,4 +0,0 @@
extends: default
ignore-from-file: .gitignore
rules:
line-length: disable

View File

@@ -24,12 +24,6 @@ Tip: Search the repo for `kind: xyz` for examples.
[^nonginx]: No nginx annotations! Use `kind: Ingress` instead. `IngressRoute` is not used as it doesn't support [`external-dns`](bind/README.md) out of the box.
[^authz]: Applications should use OpenID Connect (`kind: OIDCClient`) for authentication, whereever possible. If not possible, use `kind: OIDCMiddlewareClient` client, which will provide authentication via a Traefik middleware (`traefik.ingress.kubernetes.io/router.middlewares: passmower-proxmox@kubernetescrd`). Sometimes you might use both for extra security.
### Network
All nodes are in Infra VLAN 21. Routing is implemented with BGP, all nodes and the router make a full-mesh. Both Serice LB IPs and Pod IPs are advertised to the router. Router does NAT for outbound pod traffic.
See the [Calico installation](tigera-operator/application.yml) for Kube side and Routing / BGP in the router.
Static routes for 193.40.103.36/30 have been added in pve nodes to make them communicating with Passmower via Traefik more stable - otherwise packets coming back to the PVE are routed directly via VLAN 21 internal IPs by the worker nodes, breaking TCP.
<!-- Linked to by https://wiki.k-space.ee/e/en/hosting/storage -->
### Databases / -stores:
- KeyDB: `kind: KeydbClaim` (replaces Redis[^redisdead])

5
ansible/README.md Normal file
View File

@@ -0,0 +1,5 @@
#TODO:
- inventory
- running playbooks NB! about PWD
- ssh_config; updating
Include ssh_config (with known_hosts) to access all machines listed.

15
ansible/ansible.cfg Normal file
View File

@@ -0,0 +1,15 @@
[defaults]
inventory = inventory.yml
nocows = 1
pattern =
deprecation_warnings = False
fact_caching = jsonfile
fact_caching_connection = ~/.ansible/k-space-fact-cache
fact_caching_timeout = 7200
remote_user = root
[ssh_connection]
control_path = ~/.ssh/cm-%%r@%%h:%%p
ssh_args = -o ControlMaster=auto -o ControlPersist=8h -F ssh_config
pipelining = True

76
ansible/bind-primary.yml Normal file
View File

@@ -0,0 +1,76 @@
- name: Setup primary nameserver
hosts: ns1.k-space.ee
tasks:
- name: Make sure bind9 is installed
ansible.builtin.apt:
name: bind9
state: present
- name: Configure Bind
register: bind
copy:
dest: /etc/bind/named.conf
content: |
# This file is managed by Ansible
# https://git.k-space.ee/k-space/kube/src/branch/master/ansible-bind-primary.yml
# Do NOT modify manually
include "/etc/bind/named.conf.local";
include "/etc/bind/readwrite.key";
include "/etc/bind/readonly.key";
options {
directory "/var/cache/bind";
version "";
listen-on { any; };
listen-on-v6 { any; };
pid-file "/var/run/named/named.pid";
notify explicit; also-notify { 172.20.53.1; 172.20.53.2; 172.20.53.3; };
allow-recursion { none; };
recursion no;
check-names master ignore;
dnssec-validation no;
auth-nxdomain no;
};
# https://kb.isc.org/docs/aa-00723
acl allowed {
172.20.3.0/24;
172.20.4.0/24;
};
acl rejected { !allowed; any; };
zone "." {
type hint;
file "/var/lib/bind/db.root";
};
zone "k-space.ee" {
type master;
file "/var/lib/bind/db.k-space.ee";
allow-update { !rejected; key readwrite; };
allow-transfer { !rejected; key readonly; key readwrite; };
};
zone "k6.ee" {
type master;
file "/var/lib/bind/db.k6.ee";
allow-update { !rejected; key readwrite; };
allow-transfer { !rejected; key readonly; key readwrite; };
};
zone "kspace.ee" {
type master;
file "/var/lib/bind/db.kspace.ee";
allow-update { !rejected; key readwrite; };
allow-transfer { !rejected; key readonly; key readwrite; };
};
- name: Check Bind config
ansible.builtin.shell: "named-checkconf"
- name: Reload Bind config
service:
name: bind9
state: reloaded
when: bind.changed

65
ansible/doors.yml Normal file
View File

@@ -0,0 +1,65 @@
# ansible doors -m shell -a "ctr image pull harbor.k-space.ee/k-space/mjpg-streamer:latest"
# journalctl -u mjpg_streamer@video0.service -f
# Referenced/linked and documented by https://wiki.k-space.ee/en/hosting/doors
- name: Setup doors
hosts: doors
tasks:
- name: Make sure containerd is installed
ansible.builtin.apt:
name: containerd
state: present
- name: Copy systemd service for Doorboy controller # https://git.k-space.ee/k-space/godoor
copy:
dest: /etc/systemd/system/godoor.service
content: |
[Unit]
Description=Doorboy service
Documentation=https://git.k-space.ee/k-space/godoor
After=network.target
[Service]
Environment=IMAGE=harbor.k-space.ee/k-space/godoor:latest
ExecStartPre=-ctr task kill --signal=9 %N
ExecStartPre=-ctr task rm %N
ExecStartPre=-ctr c rm %N
ExecStartPre=-ctr image pull $IMAGE
ExecStart=ctr run --rm --pid-file=/run/%N.pid --privileged --read-only --env-file=/etc/godoor --env=KDOORPI_API_ALLOWED=https://doorboy-proxy.k-space.ee/allowed --env=KDOORPI_API_LONGPOLL=https://doorboy-proxy.k-space.ee/longpoll --env=KDOORPI_API_SWIPE=https://doorboy-proxy.k-space.ee/swipe --env=KDOORPI_DOOR=%H --net-host --net-host --cwd /app $IMAGE %N /godoor
ExecStopPost=ctr task rm %N
ExecStopPost=ctr c rm %N
Restart=always
[Install]
WantedBy=multi-user.target
- name: Enable Doorboy controller
ansible.builtin.systemd:
state: restarted
daemon_reload: yes
name: godoor.service
- name: Copy systemd service for mjpg-streamer # https://git.k-space.ee/k-space/mjpg-steramer
copy:
dest: /etc/systemd/system/mjpg_streamer@.service
content: |
[Unit]
Description=A server for streaming Motion-JPEG from a video capture device
After=network.target
ConditionPathExists=/dev/%I
[Service]
Environment=IMAGE=harbor.k-space.ee/k-space/mjpg-streamer:latest
StandardOutput=tty
Type=forking
ExecStartPre=-ctr task kill --signal=9 %p_%i
ExecStartPre=-ctr task rm %p_%i
ExecStartPre=-ctr c rm %p_%i
ExecStartPre=-ctr image pull $IMAGE
ExecStart=ctr run --tty -d --rm --pid-file=/run/%i.pid --privileged --read-only --net-host $IMAGE %p_%i /usr/local/bin/mjpg_streamer -i 'input_uvc.so -d /dev/%I -r 1280x720 -f 10' -o 'output_http.so -w /usr/share/mjpg_streamer/www'
ExecStopPost=ctr task rm %p_%i
ExecStopPost=ctr c rm %p_%i
PIDFile=/run/%i.pid
[Install]
WantedBy=multi-user.target
- name: Enable mjpg-streamer
ansible.builtin.systemd:
state: restarted
daemon_reload: yes
name: mjpg_streamer@video0.service

83
ansible/inventory.yml Normal file
View File

@@ -0,0 +1,83 @@
# This file is linked from /README.md as 'all infra'.
##### Not otherwise linked:
# Homepage: https://git.k-space.ee/k-space/homepage (on GitLab)
# Slack: https://k-space-ee.slack.com
# Routers/Switches: https://git.k-space.ee/k-space/rosdump
all:
vars:
admins:
- lauri
- eaas
extra_admins: []
children:
# https://wiki.k-space.ee/en/hosting/storage
nasgroup:
hosts:
nas.k-space.ee: { ansible_host: 172.23.0.7 }
offsite:
ansible_host: 78.28.64.17
ansible_port: 10648
vars:
offsite_dataset: offsite/backup_zrepl
misc:
children:
nasgroup:
hosts:
# https://git.k-space.ee/k-space/kube: bind/README.md (primary DNS, PVE VM)
ns1.k-space.ee: { ansible_host: 172.20.0.2 }
# https://wiki.k-space.ee/hosting/proxmox (depends on nas.k-space.ee)
proxmox: # aka PVE, Proxmox Virtualization Environment
vars:
extra_admins:
- rasmus
hosts:
pve1: { ansible_host: 172.21.20.1 }
pve2: { ansible_host: 172.21.20.2 }
pve8: { ansible_host: 172.21.20.8 }
pve9: { ansible_host: 172.21.20.9 }
# https://git.k-space.ee/k-space/kube: README.md
# CLUSTER.md (PVE VMs + external nas.k-space.ee)
kubernetes:
children:
masters:
hosts:
master1.kube.k-space.ee: { ansible_host: 172.21.3.51 }
master2.kube.k-space.ee: { ansible_host: 172.21.3.52 }
master3.kube.k-space.ee: { ansible_host: 172.21.3.53 }
kubelets:
children:
mon: # they sit in a priviledged VLAN
hosts:
mon1.kube.k-space.ee: { ansible_host: 172.21.3.61 }
mon2.kube.k-space.ee: { ansible_host: 172.21.3.62 }
mon3.kube.k-space.ee: { ansible_host: 172.21.3.63 }
storage: # longhorn, to be replaced with a more direct CSI
hosts:
storage1.kube.k-space.ee: { ansible_host: 172.21.3.71 }
storage2.kube.k-space.ee: { ansible_host: 172.21.3.72 }
storage3.kube.k-space.ee: { ansible_host: 172.21.3.73 }
storage4.kube.k-space.ee: { ansible_host: 172.21.3.74 }
workers:
hosts:
worker1.kube.k-space.ee: { ansible_host: 172.20.3.81 }
worker2.kube.k-space.ee: { ansible_host: 172.20.3.82 }
worker3.kube.k-space.ee: { ansible_host: 172.20.3.83 }
worker4.kube.k-space.ee: { ansible_host: 172.20.3.84 }
worker9.kube.k-space.ee: { ansible_host: 172.21.3.89 } # Nvidia Tegra Jetson-AGX
# https://wiki.k-space.ee/en/hosting/doors
# See also: https://git.k-space.ee/k-space/kube: camtiler/README.md
doors:
vars:
extra_admins:
- arti
hosts:
grounddoor: { ansible_host: 100.102.3.1 }
frontdoor: { ansible_host: 100.102.3.2 }
backdoor: { ansible_host: 100.102.3.3 }
workshopdoor: { ansible_host: 100.102.3.4 }

27
ansible/known_hosts Normal file
View File

@@ -0,0 +1,27 @@
# Use `ansible-playbook update-ssh-config.yml` to update this file
100.102.3.3 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBN4SifLddYAz8CasmFwX5TQbiM8atAYMFuDQRchclHM0sq9Pi8wRxSZK8SHON4Y7YFsIY+cXnQ2Wx4FpzKmfJYE= # backdoor
100.102.3.2 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE8/E7PDqTrTdU+MFurHkIPzTBTGcSJqXuv5n0Ugd/IlvOr2v+eYi3ma91pSBmF5Hjy9foWypCLZfH+vWMkV0gs= # frontdoor
100.102.3.1 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFcH8D2AhnESw3uu2f4EHBhT9rORQQJJ3TlbwN+kro5tRZsZk4p3MKabBiuCSZw2KWjfu0MY4yHSCrUUQrggJDM= # grounddoor
172.21.3.51 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMYy07yLlOiFvXzmVDIULS9VDCMz7T+qOq4M+x8Lo3KEKamI6ZD737mvimPTW6K1FRBzzq67Mq495UnoFKVnQWE= # master1.kube.k-space.ee
172.21.3.52 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKRFfYDaTH58FUw+9stBVsyCviaPCGEbe9Y1a9WKvj98S7m+qU03YvtfPkRfEH/3iXHDvngEDVpJrTWW4y6e6MI= # master2.kube.k-space.ee
172.21.3.53 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIqIepuMkMo/KO3bb4X6lgb6YViAifPmgHXVrbtHwbOZLll5Qqr4pXdLDxkuZsmiE7iZBw2gSzZLcNMGdDEnWrY= # master3.kube.k-space.ee
172.21.3.61 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCJ9XgDz2NEzvjw/nDmRIKUJAmNqzsaXMJn4WFiWfTz1x2HrRcXgY3UXKWUxUvJO1jJ7hIvyE+V/8UtwYRDP1uY= # mon1.kube.k-space.ee
172.21.3.62 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLveng7H/2Gek+HYDYRWFD0Dy+4l/zjrbF2mnnkBI5CFOtqK0zwBh41IlizkpmmI5fqEIXwhLFHZEWXbUvev5oo= # mon2.kube.k-space.ee
172.21.3.63 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMMgOIL43dgCYlwAI2O269iHxo7ymweG7NoXjnk2F529G5mP+mp5We4lDZEJVyLYtemvhQ2hEHI/WVPWy3SNiuM= # mon3.kube.k-space.ee
172.23.0.7 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBC15tWIbuBqd4UZLaRbpb6oTlwniS4cg2IYZYe5ys352azj2kzOnvtCGiPo0fynFadwfDHtge9JjK6Efwl87Wgc= # nas.k-space.ee
172.20.0.2 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBO77ffkJi903aA6cM7HnFfSyYbPP4jkydI/+/tIGeMv+c9BYOE27n+ylNERaEhYkyddIx93MB4M6GYRyQOjLWSc= # ns1.k-space.ee
[78.28.64.17]:10648 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE7J61p3YzsbRAYtXIrhQUeqc47LuVw1I38egHzi/kLG+CFPsyB9krd29yJMyLRjyM+m5qUjoxNiWK/x0g3jKOI= # offsite
172.21.20.1 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBHLHc3T/J5G1CIf33XeniJk5+D0cpaXe0OkHmpCQ3DoZC3KkFBpA+/U1mlo+qb8xf/GrMj6BMMMLXKSUxbEVGaU= # pve1
172.21.20.2 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFGSRetFdHExRT69pHJAcuhqzAu+Xx4K2AEmWJhUZ2JYF7aa0JbltiYQs58Bpx9s9NA793tiHLZXABy56dI+D9Q= # pve2
172.21.20.8 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMzNvX3ga56EELcI9gV7moyFdKllSwb81V2tCWIjhFVSFTo3QKH/gX/MBnjcs+RxeVV3GF7zIIv8492bCvgiO9s= # pve8
172.21.20.9 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNU4YzKSzzUSnAgh4L1DF3dlC1VEaKVaIeTgsL5VJ0UMqjPr+8QMjIvo28cSLfIQYtfoQbt7ASVsm0uDQvKOldM= # pve9
172.21.3.71 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBI2jy8EsMo7Voor4URCMdgiEzc0nmYDowV4gB2rZ6hnH7bcKGdaODsCyBH6nvbitgnESCC8136RmdxCnO9/TuJ0= # storage1.kube.k-space.ee
172.21.3.72 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKxa2PbOj7bV0AUkBZuPkQZ/3ZMeh1mUCD+rwB4+sXbvTc+ca+xgcPGdAozbY/cUA4GdaKelhjI9DEC46MeFymY= # storage2.kube.k-space.ee
172.21.3.73 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBGYqNHAxwwoZqne/uv5syRb+tEwpbaGeK8oct4IjIHcmPdU32JlMiSqLX7d58t/b8tqE1z2rM4gCc4bpzvNrHMQ= # storage3.kube.k-space.ee
172.21.3.74 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBI+FRuwbrUpMDg9gKf6AqcfovEkt8r5SgB4JXEuMD+I6pp+2PfbxMwrXQ8Xg3oHW+poG413KWw4FZOWv2gH4CEQ= # storage4.kube.k-space.ee
172.20.3.81 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPnmGiEWtWnNNcF872fhYKCD07QwOb75BDEwN3fC4QYmBAbiN0iX/UH96r02V5f7uga3a07/xxt5P0cfEOdtQwQ= # worker1.kube.k-space.ee
172.20.3.82 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBkSNAYeugxGvNmV3biY1s0BWPCEw3g3H0VWLomu/vPbg+GN10/A1pfgt62DHFCYDB6QZwkZM6HIFy8y0xhRl9g= # worker2.kube.k-space.ee
172.20.3.83 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBe+A9Bg54UwUvlPguKDyNAsX7mYbnfMOxhK2UP2YofPlzJ0KDUuH5mbmw76XWz0L6jhT6I7hyc0QsFBdO3ug68= # worker3.kube.k-space.ee
172.20.3.84 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKoNIL+kEYphi/yCdhIytxqRaucm2aTzFrmNN4gEjCrn4TK8A46fyqAuwmgyLQFm7RD5qcEKPWP57Cl0DhTU1T4= # worker4.kube.k-space.ee
172.21.3.89 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCoepYYnNMXkZ9dn4RSSMhFFsppPVkzmjkG3z9vK84454XkI4wizmhUlZ0p+Ovx2YbrjbKibfrrtk8RgWUMi0rY= # worker9.kube.k-space.ee
100.102.3.4 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMpkSqEOyYrKXChxl6PAV+q0KypOPnKsXoXWO1JSZSIOwAs5YTzt8Q1Ryb+nQnAOlGj1AY1H7sRllTzdv0cA/EM= # workshopdoor

239
ansible/kubernetes.yml Normal file
View File

@@ -0,0 +1,239 @@
---
# ansible-galaxy install -r requirements.yaml
- name: Install cri-o
hosts:
- worker9.kube.k-space.ee
vars:
CRIO_VERSION: "v1.30"
tasks:
- name: ensure curl is installed
ansible.builtin.apt:
name: curl
state: present
- name: Ensure /etc/apt/keyrings exists
ansible.builtin.file:
path: /etc/apt/keyrings
state: directory
# TODO: fix
# - name: add k8s repo apt key
# ansible.builtin.shell: "curl -fsSL https://pkgs.k8s.io/addons:/cri-o:/stable:/{{ CRIO_VERSION }}/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/cri-o-apt-keyring.gpg"
- name: add k8s repo
ansible.builtin.apt_repository:
repo: "deb [signed-by=/etc/apt/keyrings/cri-o-apt-keyring.gpg] https://pkgs.k8s.io/addons:/cri-o:/stable:/{{ CRIO_VERSION }}/deb/ /"
state: present
filename: cri-o
- name: check current crictl version
command: "/usr/bin/crictl --version"
failed_when: false
changed_when: false
register: crictl_version_check
- name: download crictl
unarchive:
src: "https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ CRIO_VERSION }}/crictl-{{ CRIO_VERSION }}-linux-{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}.tar.gz"
dest: /tmp
remote_src: true
when: >
crictl_version_check.stdout is not defined or CRIO_VERSION not in crictl_version_check.stdout
register: crictl_download_check
- name: move crictl binary into place
copy:
src: /tmp/crictl
dest: "/usr/bin/crictl"
when: >
exporter_download_check is changed
- name: ensure crio is installed
ansible.builtin.apt:
name: cri-o
state: present
- name: Reconfigure Kubernetes worker nodes
hosts:
- storage
- workers
tasks:
- name: Configure grub defaults
copy:
dest: "/etc/default/grub"
content: |
GRUB_DEFAULT=0
GRUB_TIMEOUT_STYLE=countdown
GRUB_TIMEOUT=5
GRUB_DISTRIBUTOR=`lsb_release -i -s 2> /dev/null || echo Debian`
GRUB_CMDLINE_LINUX_DEFAULT="quiet splash memhp_default_state=online"
GRUB_CMDLINE_LINUX="memhp_default_state=online rootflags=pquota"
register: grub_defaults
when: ansible_architecture == 'x86_64'
- name: Load grub defaults
ansible.builtin.shell: update-grub
when: grub_defaults.changed
- name: Ensure nfs-common is installed
ansible.builtin.apt:
name: nfs-common
state: present
- name: Reconfigure Kubernetes nodes
hosts: kubernetes
vars:
KUBERNETES_VERSION: v1.30.3
IP: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
tasks:
- name: Remove APT packages
ansible.builtin.apt:
name: "{{ item }}"
state: absent
loop:
- kubelet
- kubeadm
- kubectl
- name: Download kubectl, kubeadm, kubelet
ansible.builtin.get_url:
url: "https://cdn.dl.k8s.io/release/{{ KUBERNETES_VERSION }}/bin/linux/{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}/{{ item }}"
dest: "/usr/bin/{{ item }}-{{ KUBERNETES_VERSION }}"
mode: '0755'
loop:
- kubelet
- kubectl
- kubeadm
- name: Create /etc/systemd/system/kubelet.service
ansible.builtin.copy:
content: |
[Unit]
Description=kubelet: The Kubernetes Node Agent
Documentation=https://kubernetes.io/docs/home/
Wants=network-online.target
After=network-online.target
[Service]
ExecStart=/usr/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target
dest: /etc/systemd/system/kubelet.service
register: kubelet_service
- name: Create symlinks for kubectl, kubeadm, kubelet
ansible.builtin.file:
src: "/usr/bin/{{ item }}-{{ KUBERNETES_VERSION }}"
dest: "/usr/bin/{{ item }}"
state: link
loop:
- kubelet
- kubectl
- kubeadm
register: kubelet
- name: Restart Kubelet
service:
name: kubelet
enabled: true
state: restarted
daemon_reload: true
when: kubelet.changed or kubelet_service.changed
- name: Ensure /var/lib/kubelet exists
ansible.builtin.file:
path: /var/lib/kubelet
state: directory
- name: Configure kubelet
ansible.builtin.template:
src: kubelet.j2
dest: /var/lib/kubelet/config.yaml
mode: 644
- name: Ensure /etc/systemd/system/kubelet.service.d/ exists
ansible.builtin.file:
path: /etc/systemd/system/kubelet.service.d
state: directory
- name: Configure kubelet service
ansible.builtin.template:
src: 10-kubeadm.j2
dest: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
mode: 644
# TODO: register new node if needed
- name: Disable unneccesary services
ignore_errors: true
loop:
- gdm3
- snapd
- bluetooth
- multipathd
- zram
service:
name: "{{item}}"
state: stopped
enabled: no
- name: Ensure /etc/containers exists
ansible.builtin.file:
path: /etc/containers
state: directory
- name: Reset /etc/containers/registries.conf
ansible.builtin.copy:
content: "unqualified-search-registries = [\"docker.io\"]\n"
dest: /etc/containers/registries.conf
register: registries
- name: Restart CRI-O
service:
name: cri-o
state: restarted
when: registries.changed
- name: Reset /etc/modules
ansible.builtin.copy:
content: |
overlay
br_netfilter
dest: /etc/modules
register: kernel_modules
- name: Load kernel modules
ansible.builtin.shell: "cat /etc/modules | xargs -L 1 -t modprobe"
when: kernel_modules.changed
- name: Reset /etc/sysctl.d/99-k8s.conf
ansible.builtin.copy:
content: |
net.ipv4.conf.all.accept_redirects = 0
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
vm.max_map_count = 524288
fs.inotify.max_user_instances = 1280
fs.inotify.max_user_watches = 655360
dest: /etc/sysctl.d/99-k8s.conf
register: sysctl
- name: Reload sysctl config
ansible.builtin.shell: "sysctl --system"
when: sysctl.changed
- name: Reconfigure kube-apiserver to use Passmower OIDC endpoint
ansible.builtin.template:
src: kube-apiserver.j2
dest: /etc/kubernetes/manifests/kube-apiserver.yaml
mode: 600
register: apiserver
when:
- inventory_hostname in groups["masters"]
- name: Restart kube-apiserver
ansible.builtin.shell: "killall kube-apiserver"
when: apiserver.changed

211
ansible/ssh_config Normal file
View File

@@ -0,0 +1,211 @@
# Use `ansible-playbook update-ssh-config.yml` to update this file
# Use `ssh -F ssh_config ...` to connect to target machine or
# Add `Include ~/path/to/this/kube/ssh_config` in your ~/.ssh/config
Host backdoor 100.102.3.3
User root
Hostname 100.102.3.3
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host frontdoor 100.102.3.2
User root
Hostname 100.102.3.2
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host grounddoor 100.102.3.1
User root
Hostname 100.102.3.1
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host master1.kube.k-space.ee 172.21.3.51
User root
Hostname 172.21.3.51
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host master2.kube.k-space.ee 172.21.3.52
User root
Hostname 172.21.3.52
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host master3.kube.k-space.ee 172.21.3.53
User root
Hostname 172.21.3.53
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host mon1.kube.k-space.ee 172.21.3.61
User root
Hostname 172.21.3.61
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host mon2.kube.k-space.ee 172.21.3.62
User root
Hostname 172.21.3.62
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host mon3.kube.k-space.ee 172.21.3.63
User root
Hostname 172.21.3.63
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host nas.k-space.ee 172.23.0.7
User root
Hostname 172.23.0.7
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host ns1.k-space.ee 172.20.0.2
User root
Hostname 172.20.0.2
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host offsite 78.28.64.17
User root
Hostname 78.28.64.17
Port 10648
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host pve1 172.21.20.1
User root
Hostname 172.21.20.1
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host pve2 172.21.20.2
User root
Hostname 172.21.20.2
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host pve8 172.21.20.8
User root
Hostname 172.21.20.8
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host pve9 172.21.20.9
User root
Hostname 172.21.20.9
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host storage1.kube.k-space.ee 172.21.3.71
User root
Hostname 172.21.3.71
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host storage2.kube.k-space.ee 172.21.3.72
User root
Hostname 172.21.3.72
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host storage3.kube.k-space.ee 172.21.3.73
User root
Hostname 172.21.3.73
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host storage4.kube.k-space.ee 172.21.3.74
User root
Hostname 172.21.3.74
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host worker1.kube.k-space.ee 172.20.3.81
User root
Hostname 172.20.3.81
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host worker2.kube.k-space.ee 172.20.3.82
User root
Hostname 172.20.3.82
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host worker3.kube.k-space.ee 172.20.3.83
User root
Hostname 172.20.3.83
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host worker4.kube.k-space.ee 172.20.3.84
User root
Hostname 172.20.3.84
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host worker9.kube.k-space.ee 172.21.3.89
User root
Hostname 172.21.3.89
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host workshopdoor 100.102.3.4
User root
Hostname 100.102.3.4
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h

View File

@@ -0,0 +1,12 @@
# Note: This dropin only works with kubeadm and kubelet v1.11+
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/etc/default/kubelet
ExecStart=
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS
StandardOutput=null

View File

@@ -0,0 +1,132 @@
apiVersion: v1
kind: Pod
metadata:
annotations:
kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: {{ IP }}:6443
creationTimestamp: null
labels:
component: kube-apiserver
tier: control-plane
name: kube-apiserver
namespace: kube-system
spec:
containers:
- command:
- kube-apiserver
- --advertise-address={{ IP }}
- --allow-privileged=true
- --authorization-mode=Node,RBAC
- --client-ca-file=/etc/kubernetes/pki/ca.crt
- --enable-admission-plugins=NodeRestriction
- --enable-bootstrap-token-auth=true
- --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
- --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
- --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
- --etcd-servers=https://127.0.0.1:2379
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --oidc-client-id=passmower.kubelogin
- --oidc-groups-claim=groups
- --oidc-issuer-url=https://auth.k-space.ee/
- --oidc-username-claim=sub
- --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
- --requestheader-allowed-names=front-proxy-client
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --requestheader-group-headers=X-Remote-Group
- --requestheader-username-headers=X-Remote-User
- --secure-port=6443
- --service-account-issuer=https://kubernetes.default.svc.cluster.local
- --service-account-key-file=/etc/kubernetes/pki/sa.pub
- --service-account-signing-key-file=/etc/kubernetes/pki/sa.key
- --service-cluster-ip-range=10.96.0.0/12
- --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
- --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
image: registry.k8s.io/kube-apiserver:{{ KUBERNETES_VERSION }}
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
host: {{ IP }}
path: /livez
port: 6443
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
name: kube-apiserver
readinessProbe:
failureThreshold: 3
httpGet:
host: {{ IP }}
path: /readyz
port: 6443
scheme: HTTPS
periodSeconds: 1
timeoutSeconds: 15
resources:
requests:
cpu: 250m
startupProbe:
failureThreshold: 24
httpGet:
host: {{ IP }}
path: /livez
port: 6443
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
volumeMounts:
- mountPath: /etc/ssl/certs
name: ca-certs
readOnly: true
- mountPath: /etc/ca-certificates
name: etc-ca-certificates
readOnly: true
- mountPath: /etc/pki
name: etc-pki
readOnly: true
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
- mountPath: /usr/local/share/ca-certificates
name: usr-local-share-ca-certificates
readOnly: true
- mountPath: /usr/share/ca-certificates
name: usr-share-ca-certificates
readOnly: true
hostNetwork: true
priority: 2000001000
priorityClassName: system-node-critical
securityContext:
seccompProfile:
type: RuntimeDefault
volumes:
- hostPath:
path: /etc/ssl/certs
type: DirectoryOrCreate
name: ca-certs
- hostPath:
path: /etc/ca-certificates
type: DirectoryOrCreate
name: etc-ca-certificates
- hostPath:
path: /etc/pki
type: DirectoryOrCreate
name: etc-pki
- hostPath:
path: /etc/kubernetes/pki
type: DirectoryOrCreate
name: k8s-certs
- hostPath:
path: /usr/local/share/ca-certificates
type: DirectoryOrCreate
name: usr-local-share-ca-certificates
- hostPath:
path: /usr/share/ca-certificates
type: DirectoryOrCreate
name: usr-share-ca-certificates
status: {}

View File

@@ -0,0 +1,43 @@
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 0s
cacheUnauthorizedTTL: 0s
cgroupDriver: systemd
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
logging:
flushFrequency: 0
options:
json:
infoBufferSize: "0"
verbosity: 0
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 5m
shutdownGracePeriodCriticalPods: 5m
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s

View File

@@ -0,0 +1,72 @@
---
- name: Collect servers SSH public keys to known_hosts
hosts: localhost
connection: local
vars:
targets: "{{ hostvars[groups['all']] }}"
tasks:
- name: Generate ssh_config
ansible.builtin.copy:
dest: ssh_config
content: |
# Use `ansible-playbook update-ssh-config.yml` to update this file
# Use `ssh -F ssh_config ...` to connect to target machine or
# Add `Include ~/path/to/this/kube/ssh_config` in your ~/.ssh/config
{% for host in groups['all'] | sort %}
Host {{ [host, hostvars[host].get('ansible_host', host)] | unique | join(' ') }}
User root
Hostname {{ hostvars[host].get('ansible_host', host) }}
Port {{ hostvars[host].get('ansible_port', 22) }}
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
{% endfor %}
- name: Generate known_hosts
ansible.builtin.copy:
dest: known_hosts
content: |
# Use `ansible-playbook update-ssh-config.yml` to update this file
{% for host in groups['all'] | sort %}
{{ lookup('ansible.builtin.pipe', 'ssh-keyscan -p %d -t ecdsa %s' % (
hostvars[host].get('ansible_port', 22),
hostvars[host].get('ansible_host', host))) }} # {{ host }}
{% endfor %}
- name: Pull authorized keys from Gitea
hosts: localhost
connection: local
vars:
targets: "{{ hostvars[groups['all']] }}"
tasks:
- name: Download https://git.k-space.ee/user.keys
loop:
- arti
- eaas
- lauri
- rasmus
ansible.builtin.get_url:
url: https://git.k-space.ee/{{ item }}.keys
dest: "./{{ item }}.keys"
- name: Push authorized keys to targets
hosts:
- misc
- kubernetes
- doors
tasks:
- name: Generate /root/.ssh/authorized_keys
ansible.builtin.copy:
dest: "/root/.ssh/authorized_keys"
owner: root
group: root
mode: '0644'
content: |
# Use `ansible-playbook update-ssh-config.yml` from https://git.k-space.ee/k-space/kube/ to update this file
{% for user in admins + extra_admins | unique | sort %}
{% for line in lookup("ansible.builtin.file", user + ".keys").split("\n") %}
{% if line.startswith("sk-") %}
{{ line }} # {{ user }}
{% endif %}
{% endfor %}
{% endfor %}

View File

@@ -0,0 +1,49 @@
# Referenced/linked and documented by https://wiki.k-space.ee/en/hosting/storage#zrepl
- name: zrepl
hosts: nasgroup
tasks:
- name: 'apt: zrepl gpg'
ansible.builtin.get_url:
url: 'https://zrepl.cschwarz.com/apt/apt-key.asc'
dest: /usr/share/keyrings/zrepl.asc
- name: 'apt: zrepl repo'
apt_repository:
repo: 'deb [arch=amd64 signed-by=/usr/share/keyrings/zrepl.asc] https://zrepl.cschwarz.com/apt/debian bookworm main'
- name: 'apt: ensure packages'
apt:
state: latest
pkg: zrepl
- name: 'zrepl: ensure config'
ansible.builtin.template:
src: "zrepl_{{ansible_hostname}}.yml.j2"
dest: /etc/zrepl/zrepl.yml
mode: 600
register: zreplconf
- name: 'zrepl: restart service after config change'
when: zreplconf.changed
service:
state: restarted
enabled: true
name: zrepl
- name: 'zrepl: ensure service'
when: not zreplconf.changed
service:
state: started
enabled: true
name: zrepl
# avoid accidental conflicts of changes on recv (would err 'will not overwrite without force')
- name: 'zfs: ensure recv mountpoint=off'
hosts: offsite
tasks:
- name: 'zfs: get mountpoint'
shell: zfs get mountpoint -H -o value {{offsite_dataset}}
register: result
changed_when: false
- when: result.stdout != "none"
name: 'zfs: ensure mountpoint=off'
changed_when: true
shell: zfs set mountpoint=none {{offsite_dataset}}
register: result

23
ansible/zrepl/prom.yaml Normal file
View File

@@ -0,0 +1,23 @@
---
apiVersion: monitoring.coreos.com/v1
kind: Probe
metadata:
name: zrepl
spec:
scrapeTimeout: 30s
targets:
staticConfig:
static:
- nas.mgmt.k-space.ee:9811
# - offsite.k-space.ee:9811 # TODO: unreachable
relabelingConfigs:
- sourceLabels: [__param_target]
targetLabel: instance
- sourceLabels: [__param_target]
targetLabel: __address__
prober:
url: localhost
path: /metrics
metricRelabelings:
- sourceLabels: [__address__]
targetLabel: target

View File

@@ -0,0 +1,47 @@
global:
logging:
- type: syslog
format: logfmt
level: warn
monitoring:
- type: prometheus
listen: ':9811'
jobs:
- name: k6zrepl
type: snap
# "<" aka recursive, https://zrepl.github.io/configuration/filter_syntax.html
filesystems:
'nas/k6<': true
snapshotting:
type: periodic
prefix: zrepl_
interval: 1h
pruning:
keep:
# Keep non-zrepl snapshots
- type: regex
negate: true
regex: '^zrepl_'
- type: last_n
regex: "^zrepl_.*"
count: 4
- type: grid
regex: "^zrepl_.*"
grid: 4x1h | 6x4h | 3x1d | 2x7d
- name: k6zrepl_offsite_src
type: source
send:
encrypted: true # zfs native already-encrypted, filesystems not encrypted will log to error-level
serve:
type: tcp
listen: "{{ansible_host}}:35566" # NAT-ed to 193.40.103.250
clients: {
"78.28.64.17": "offsite.k-space.ee",
}
filesystems:
'nas/k6': true
snapshotting: # handled by above job, separated for secuwurity (isolation of domains)
type: manual

View File

@@ -0,0 +1,41 @@
global:
logging:
- type: syslog
format: logfmt
level: warn
monitoring:
- type: prometheus
listen: ':9811'
jobs:
- name: k6zrepl_offsite_dest
type: pull
recv:
placeholder:
encryption: off # https://zrepl.github.io/configuration/sendrecvoptions.html#placeholders
# bandwidth_limit:
# max: 9 MiB # 75.5 Mbps
connect:
type: tcp
address: '193.40.103.250:35566' # firewall whitelisted to offsite
root_fs: {{offsite_dataset}}
interval: 10m # start interval, does nothing when no snapshots to recv
replication:
concurrency:
steps: 2
pruning:
keep_sender: # offsite does not dictate nas snapshot policy
- type: regex
regex: '.*'
keep_receiver:
# Keep non-zrepl snapshots
- negate: true
type: regex
regex: "^zrepl_"
- type: last_n
regex: "^zrepl_"
count: 4
- type: grid
regex: "^zrepl_"
grid: 4x1h | 6x4h | 3x1d | 2x7d

View File

@@ -41,14 +41,9 @@ kubectl -n argocd create secret generic gitea-kube-members \
--from-literal=type=git \
--from-literal=url=git@git.k-space.ee:k-space/kube-members \
--from-file=sshPrivateKey=id_ecdsa
kubectl -n argocd create secret generic gitea-members \
--from-literal=type=git \
--from-literal=url=git@git.k-space.ee:k-space/kube-members \
--from-file=sshPrivateKey=id_ecdsa
kubectl label -n argocd secret gitea-kube argocd.argoproj.io/secret-type=repository
kubectl label -n argocd secret gitea-kube-staging argocd.argoproj.io/secret-type=repository
kubectl label -n argocd secret gitea-kube-members argocd.argoproj.io/secret-type=repository
kubectl label -n argocd secret gitea-members argocd.argoproj.io/secret-type=repository
rm -fv id_ecdsa
```

View File

@@ -1,20 +0,0 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: members
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/members.git'
path: members
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: passmower
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,50 +0,0 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
metadata:
name: argocd-redis
namespace: argocd
spec:
size: 32
mapping:
- key: redis-password
value: "%(plaintext)s"
- key: REDIS_URI
value: "redis://:%(plaintext)s@argocd-redis"
---
apiVersion: dragonflydb.io/v1alpha1
kind: Dragonfly
metadata:
name: argocd-redis
namespace: argocd
spec:
authentication:
passwordFromSecret:
key: redis-password
name: argocd-redis
replicas: 3
resources:
limits:
cpu: 1000m
memory: 1Gi
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app: argocd-redis
app.kubernetes.io/part-of: dragonfly
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: argocd-redis
namespace: argocd
spec:
selector:
matchLabels:
app: argocd-redis
app.kubernetes.io/part-of: dragonfly
podMetricsEndpoints:
- port: admin

View File

@@ -5,13 +5,9 @@ global:
dex:
enabled: false
redis:
enabled: false
# Maybe one day switch to Redis HA?
redis-ha:
enabled: false
externalRedis:
host: argocd-redis
existingSecret: argocd-redis
server:
# HTTPS is implemented by Traefik
@@ -27,6 +23,20 @@ server:
- hosts:
- "*.k-space.ee"
configfucked:
resource.customizations: |
# https://github.com/argoproj/argo-cd/issues/1704
networking.k8s.io/Ingress:
health.lua: |
hs = {}
hs.status = "Healthy"
return hs
apiextensions.k8s.io/CustomResourceDefinition:
ignoreDifferences: |
jsonPointers:
- "x-kubernetes-validations"
metrics:
enabled: true
@@ -71,17 +81,7 @@ configs:
p, role:developers, applications, update, default/camtiler, allow
cm:
admin.enabled: "false"
resource.customizations: |
# https://github.com/argoproj/argo-cd/issues/1704
networking.k8s.io/Ingress:
health.lua: |
hs = {}
hs.status = "Healthy"
return hs
apiextensions.k8s.io/CustomResourceDefinition:
ignoreDifferences: |
jsonPointers:
- "x-kubernetes-validations"
oidc.config: |
name: OpenID Connect
issuer: https://auth.k-space.ee/

View File

@@ -1,35 +1,29 @@
# Bind namespace
#TODO:
The Bind secondary servers and `external-dns` service pods are running in this namespace.
The `external-dns` pods are used to declaratively update DNS records on the
[Bind primary](https://git.k-space.ee/k-space/ansible/src/branch/main/authoritative-nameserver.yaml).
- cert-manager talks to master to add domain names, and DNS-01 TLS through ns1.k-space.ee
^ both-side link to cert-manager
The Bind primary `ns1.k-space.ee` resides outside Kubernetes at `193.40.103.2` and
bind-services (zone transfer to HA replicas from ns1.k-space.ee)
### ns1.k-space.ee
Primary authoritive nameserver replica. Other replicas live on Kube nodes
Idea to move it to Zone.
dns.yaml files add DNS records
# Bind setup
The Bind primary resides outside Kubernetes at `193.40.103.2` and
it's internally reachable via `172.20.0.2`.
Bind secondaries perform AXFR (zone transfer) from `ns1.k-space.ee` using
shared secret autentication.
The primary triggers notification events to `172.20.53.{1..3}`
which are internally exposed IP-s of the secondaries.
Bind secondaries are hosted inside Kubernetes, load balanced behind `62.65.250.2` and
under normal circumstances managed by [ArgoCD](https://argocd.k-space.ee/applications/argocd/bind).
Note that [cert-manager](https://git.k-space.ee/k-space/kube/src/branch/master/cert-manager/) also performs DNS updates on the Bind primary.
# For user
`Ingresses` and `DNSEndpoint` resources under `k-space.ee`, `kspace.ee`, `k6.ee`
domains are picked up automatically by `external-dns` and updated on the Bind primary.
To find usage examples in this repository use
`grep -r -A25 "^kind: Ingress" .` and
`grep -R -r -A100 "^kind: DNSEndpoint" .`
# For administrator
Ingresses and DNSEndpoints referring to `k-space.ee`, `kspace.ee`, `k6.ee`
are picked up automatically by `external-dns` and updated on primary.
The primary triggers notification events to `172.21.53.{1..3}`
The primary triggers notification events to `172.20.53.{1..3}`
which are internally exposed IP-s of the secondaries.
# Secrets
@@ -68,7 +62,7 @@ zone "foobar.com" {
file "/var/lib/bind/db.foobar.com";
allow-update { !rejected; key foobar; };
allow-transfer { !rejected; key readonly; key foobar; };
notify explicit; also-notify { 172.21.53.1; 172.21.53.2; 172.21.53.3; };
notify explicit; also-notify { 172.20.53.1; 172.20.53.2; 172.20.53.3; };
};
```

View File

@@ -3,7 +3,6 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: bind-secondary-config-local
namespace: bind
data:
named.conf.local: |
zone "codemowers.ee" { type slave; masters { 172.20.0.2 key readonly; }; };
@@ -14,7 +13,6 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: bind-secondary-config
namespace: bind
data:
named.conf: |
include "/etc/bind/named.conf.local";
@@ -38,7 +36,6 @@ metadata:
name: bind-secondary
namespace: bind
spec:
revisionHistoryLimit: 0
replicas: 3
selector:
matchLabels:
@@ -48,16 +45,15 @@ spec:
labels:
app: bind-secondary
spec:
volumes:
- name: run
emptyDir: {}
containers:
- name: bind-secondary
image: internetsystemsconsortium/bind9:9.20
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 1m
memory: 35Mi
volumeMounts:
- mountPath: /run/named
name: run
workingDir: /var/bind
command:
- named
@@ -83,13 +79,16 @@ spec:
name: bind-readonly-secret
- name: bind-data
emptyDir: {}
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app: bind-secondary
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- bind-secondary
topologyKey: "kubernetes.io/hostname"
---
apiVersion: v1
kind: Service
@@ -120,7 +119,7 @@ metadata:
spec:
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: 172.21.53.1
loadBalancerIP: 172.20.53.1
selector:
app: bind-secondary
statefulset.kubernetes.io/pod-name: bind-secondary-0
@@ -142,7 +141,7 @@ metadata:
spec:
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: 172.21.53.2
loadBalancerIP: 172.20.53.2
selector:
app: bind-secondary
statefulset.kubernetes.io/pod-name: bind-secondary-1
@@ -164,7 +163,7 @@ metadata:
spec:
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: 172.21.53.3
loadBalancerIP: 172.20.53.3
selector:
app: bind-secondary
statefulset.kubernetes.io/pod-name: bind-secondary-2

View File

@@ -3,7 +3,6 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns-k-space
namespace: bind
spec:
revisionHistoryLimit: 0
selector:
@@ -18,13 +17,6 @@ spec:
containers:
- name: external-dns
image: registry.k8s.io/external-dns/external-dns:v0.14.2
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 2m
memory: 35Mi
envFrom:
- secretRef:
name: tsig-secret

View File

@@ -3,7 +3,6 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns-k6
namespace: bind
spec:
revisionHistoryLimit: 0
selector:
@@ -18,13 +17,6 @@ spec:
containers:
- name: external-dns
image: registry.k8s.io/external-dns/external-dns:v0.14.2
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 2m
memory: 35Mi
envFrom:
- secretRef:
name: tsig-secret
@@ -49,32 +41,31 @@ apiVersion: externaldns.k8s.io/v1alpha1
kind: DNSEndpoint
metadata:
name: k6
namespace: bind
spec:
endpoints:
- dnsName: k6.ee
recordTTL: 300
recordType: SOA
targets:
- "ns1.k-space.ee. hostmaster.k-space.ee. (1 300 300 300 300)"
- dnsName: k6.ee
recordTTL: 300
recordType: NS
targets:
- ns1.k-space.ee
- ns2.k-space.ee
- dnsName: ns1.k-space.ee
recordTTL: 300
recordType: A
targets:
- 193.40.103.2
- dnsName: ns2.k-space.ee
recordTTL: 300
recordType: A
targets:
- 62.65.250.2
- dnsName: k-space.ee
recordTTL: 300
recordType: MX
targets:
- 10 mail.k-space.ee
- dnsName: k6.ee
recordTTL: 300
recordType: SOA
targets:
- "ns1.k-space.ee. hostmaster.k-space.ee. (1 300 300 300 300)"
- dnsName: k6.ee
recordTTL: 300
recordType: NS
targets:
- ns1.k-space.ee
- ns2.k-space.ee
- dnsName: ns1.k-space.ee
recordTTL: 300
recordType: A
targets:
- 193.40.103.2
- dnsName: ns2.k-space.ee
recordTTL: 300
recordType: A
targets:
- 62.65.250.2
- dnsName: k-space.ee
recordTTL: 300
recordType: MX
targets:
- 10 mail.k-space.ee

View File

@@ -3,7 +3,6 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns-kspace
namespace: bind
spec:
revisionHistoryLimit: 0
selector:
@@ -18,16 +17,9 @@ spec:
containers:
- name: external-dns
image: registry.k8s.io/external-dns/external-dns:v0.14.2
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 2m
memory: 35Mi
envFrom:
- secretRef:
name: tsig-secret
- secretRef:
name: tsig-secret
args:
- --events
- --registry=noop
@@ -49,27 +41,26 @@ apiVersion: externaldns.k8s.io/v1alpha1
kind: DNSEndpoint
metadata:
name: kspace
namespace: bind
spec:
endpoints:
- dnsName: kspace.ee
recordTTL: 300
recordType: SOA
targets:
- "ns1.k-space.ee. hostmaster.k-space.ee. (1 300 300 300 300)"
- dnsName: kspace.ee
recordTTL: 300
recordType: NS
targets:
- ns1.k-space.ee
- ns2.k-space.ee
- dnsName: ns1.k-space.ee
recordTTL: 300
recordType: A
targets:
- 193.40.103.2
- dnsName: ns2.k-space.ee
recordTTL: 300
recordType: A
targets:
- 62.65.250.2
- dnsName: kspace.ee
recordTTL: 300
recordType: SOA
targets:
- "ns1.k-space.ee. hostmaster.k-space.ee. (1 300 300 300 300)"
- dnsName: kspace.ee
recordTTL: 300
recordType: NS
targets:
- ns1.k-space.ee
- ns2.k-space.ee
- dnsName: ns1.k-space.ee
recordTTL: 300
recordType: A
targets:
- 193.40.103.2
- dnsName: ns2.k-space.ee
recordTTL: 300
recordType: A
targets:
- 62.65.250.2

View File

@@ -4,57 +4,55 @@ kind: ClusterRole
metadata:
name: external-dns
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
- nodes
verbs:
- get
- watch
- list
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- externaldns.k8s.io
resources:
- dnsendpoints
verbs:
- get
- watch
- list
- apiGroups:
- externaldns.k8s.io
resources:
- dnsendpoints/status
verbs:
- update
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
- nodes
verbs:
- get
- watch
- list
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- externaldns.k8s.io
resources:
- dnsendpoints
verbs:
- get
- watch
- list
- apiGroups:
- externaldns.k8s.io
resources:
- dnsendpoints/status
verbs:
- update
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns
namespace: bind
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: external-dns-viewer
namespace: bind
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-dns
subjects:
- kind: ServiceAccount
name: external-dns
namespace: bind
- kind: ServiceAccount
name: external-dns
namespace: bind

View File

@@ -1,33 +1,18 @@
# cert-manager
`cert-manager` is used to obtain TLS certificates from Let's Encrypt.
It uses DNS-01 challenge in conjunction with Bind primary
at `ns1.k-space.ee`.
Refer to the [Bind primary Ansible playbook](https://git.k-space.ee/k-space/ansible/src/branch/main/authoritative-nameserver.yaml) and
[Bind namespace on Kubernetes cluster](https://git.k-space.ee/k-space/kube/src/branch/master/bind)
for more details
# For user
Use `Certificate` CRD of cert-manager, refer to
[official documentation](https://cert-manager.io/docs/usage/certificate/).
To find usage examples in this repository use
`grep -r -A10 "^kind: Certificate" .`
# For administrator
Deployed with:
Added manifest with:
```
curl -L https://github.com/jetstack/cert-manager/releases/download/v1.15.1/cert-manager.yaml -O
```
To update certificate issuer
```
kubectl apply -f cert-manager.yaml
```
To update the issuer configuration or TSIG secret:
```
kubectl apply -f default-issuer.yml
kubectl apply -f issuer.yml
kubectl -n cert-manager create secret generic tsig-secret \
--from-literal=TSIG_SECRET=<secret>
```

File diff suppressed because it is too large Load Diff

View File

@@ -1,21 +0,0 @@
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: default
namespace: cert-manager
spec:
acme:
email: info@k-space.ee
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: example-issuer-account-key
solvers:
- dns01:
rfc2136:
nameserver: 193.40.103.2
tsigKeyName: readwrite.
tsigAlgorithm: HMACSHA512
tsigSecretSecretRef:
name: tsig-secret
key: TSIG_SECRET

19
cert-manager/issuer.yml Normal file
View File

@@ -0,0 +1,19 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: default
spec:
acme:
email: info@k-space.ee
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: example-issuer-account-key
solvers:
- dns01:
rfc2136:
nameserver: 193.40.103.2
tsigKeyName: acme.
tsigAlgorithm: HMACSHA512
tsigSecretSecretRef:
name: tsig-secret
key: TSIG_SECRET

View File

@@ -1,44 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: netshoot
spec:
replicas: 1
selector:
matchLabels:
app: netshoot
template:
metadata:
creationTimestamp: null
labels:
app: netshoot
spec:
containers:
- name: netshoot
image: nicolaka/netshoot
command:
- /bin/bash
args:
- '-c'
- while true; do ping localhost; sleep 60;done
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600

View File

@@ -1,38 +1,5 @@
# Dragonfly Operator
Dragonfly operator is the preferred way to add Redis support to your application
as it is modern Go rewrite and it supports high availability.
Following alternatives were considered, but are discouraged:
* Vanilla Redis without replication is unusable during pod reschedule or Kubernetes worker outage
* Vanilla Redis' replication is clunky and there is no reliable operator for Kubernetes
to use vanilla redis
* KeyDB Cluster was unable to guarantee strong consistency
Note that vanilla Redis
[has changed it's licensing policy](https://redis.io/blog/redis-adopts-dual-source-available-licensing/)
# For users
Refer to [official documentation on usage](https://www.dragonflydb.io/docs/getting-started/kubernetes-operator#create-a-dragonfly-instance-with-replicas)
For example deployment see
[here](https://git.k-space.ee/k-space/kube/src/branch/master/passmower/dragonfly.yaml).
To find other instances in this repository use `grep -r "kind: Dragonfly"`
Use storage class `redis` for persistent instances.
To achieve high availabilllity use 2+ replicas with correctly configured
`topologySpreadConstraints`.
# For administrators
The operator was deployed with following snippet:
```
kubectl apply -f https://raw.githubusercontent.com/dragonflydb/dragonfly-operator/v1.1.6/manifests/dragonfly-operator.yaml
```
To upgrade refer to
[github.com/dragonflydb/dragonfly-operator](https://github.com/dragonflydb/dragonfly-operator/releases),
bump version and reapply

View File

@@ -150,7 +150,7 @@ metadata:
spec:
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: 172.21.51.4
loadBalancerIP: 172.20.51.4
ports:
- name: filebeat-syslog
port: 514
@@ -169,7 +169,7 @@ metadata:
spec:
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: 172.21.51.4
loadBalancerIP: 172.20.51.4
ports:
- name: filebeat-syslog
port: 514

View File

@@ -1,15 +1,4 @@
# Etherpad namespace
# For users
Etherpad is a simple publicly available application for taking notes
running at [pad.k-space.ee](https://pad.k-space.ee/)
# For administrators
This application is managed by [ArgoCD](https://argocd.k-space.ee/applications/argocd/etherpad)
In case ArgoCD is broken you can manually deploy changes with:
To apply changes:
```
kubectl apply -n etherpad -f application.yml

View File

@@ -3,7 +3,6 @@ apiVersion: codemowers.io/v1alpha1
kind: OIDCGWMiddlewareClient
metadata:
name: sso
namespace: etherpad
spec:
displayName: Etherpad
uri: 'https://pad.k-space.ee/'
@@ -16,7 +15,6 @@ metadata:
spec:
# Etherpad does NOT support running multiple replicas due to
# in-application caching https://github.com/ether/etherpad-lite/issues/3680
revisionHistoryLimit: 0
replicas: 1
serviceName: etherpad
selector:
@@ -77,8 +75,8 @@ spec:
selector:
app: etherpad
ports:
- protocol: TCP
port: 9001
- protocol: TCP
port: 9001
---
apiVersion: networking.k8s.io/v1
kind: Ingress
@@ -92,19 +90,19 @@ metadata:
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: pad.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: etherpad
port:
number: 9001
- host: pad.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: etherpad
port:
number: 9001
tls:
- hosts:
- "*.k-space.ee"
- hosts:
- "*.k-space.ee"
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy

View File

@@ -1,20 +1,5 @@
# Freescout
# For user
Freescout scrapes `info@k-space.ee` and `accounting@k-space.ee` mailboxes
from Wildduck and builds issue tracker on top the mailbox.
The Freescout user interface is accessible at
[freescout.k-space.ee](https://freescout.k-space.ee/)
Note that Freescout notifications are sent to `@k-space.ee` mailboxes.
Forwarding to personal eg. `@gmail.com` mailbox can be configured via
[Wildduck webmail](https://webmail.k-space.ee/account/profile)
# For administrator
This application is managed by [ArgoCD](https://argocd.k-space.ee/applications/argocd/freescout)
Should ArgoCD be down manifests here can be applied with:
@@ -22,9 +7,3 @@ Should ArgoCD be down manifests here can be applied with:
```
kubectl apply -n freescout -f application.yaml
```
If the Kubernetes cronjob for picking up mail is not working for more than
3 days the mails will not get synced by default. To manually synchronize
Freescout head to [Freescout system tools](https://freescout.k-space.ee/system/tools)
page, increase `Days` to appropriate number and hit `Fetch Emails` button.
Select `All` if some mails have been opened via Wildduck Webmail during debug process.

View File

@@ -3,7 +3,6 @@ apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: freescout
namespace: freescout
spec:
displayName: Freescout Middleware
uri: 'https://freescout.k-space.ee'
@@ -19,7 +18,6 @@ apiVersion: codemowers.cloud/v1beta1
kind: OIDCClient
metadata:
name: freescout
namespace: freescout
spec:
displayName: Freescout
uri: https://freescout.k-space.ee
@@ -107,7 +105,6 @@ apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: freescout
namespace: freescout
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
@@ -134,7 +131,6 @@ apiVersion: v1
kind: Service
metadata:
name: freescout
namespace: freescout
spec:
type: ClusterIP
selector:
@@ -148,11 +144,9 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: freescout
namespace: freescout
labels:
app: freescout
spec:
revisionHistoryLimit: 0
selector:
matchLabels:
app: freescout
@@ -215,12 +209,11 @@ apiVersion: batch/v1
kind: CronJob
metadata:
name: freescout-cron
namespace: freescout
spec:
schedule: "0,30 * * * *" # Should be every minute in theory, keeps hanging
schedule: "0,30 * * * *" # Should be every minute in theory, keeps hanging
jobTemplate:
spec:
activeDeadlineSeconds: 1800 # this is unholy https://github.com/freescout-helpdesk/freescout/blob/dist/app/Console/Kernel.php
activeDeadlineSeconds: 1800 # this is unholy https://github.com/freescout-helpdesk/freescout/blob/dist/app/Console/Kernel.php
ttlSecondsAfterFinished: 100
template:
spec:
@@ -281,7 +274,6 @@ apiVersion: codemowers.cloud/v1beta1
kind: MinioBucketClaim
metadata:
name: attachments
namespace: freescout
spec:
capacity: 10Gi
class: external
@@ -290,15 +282,14 @@ apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: freescout
namespace: freescout
spec:
groups:
- name: freescout
rules:
- alert: FreescoutSyncBroken
expr: time() - wildduck_last_login{email=~"(info|accounting)@k-space.ee"} > 300
for: 10m
labels:
severity: warning
annotations:
summary: Freescout mailbox synchronization is broken
- alert: FreescoutSyncBroken
expr: time() - wildduck_last_login{email=~"(info|accounting)@k-space.ee"} > 300
for: 10m
labels:
severity: warning
annotations:
summary: Freescout mailbox synchronization is broken

View File

@@ -0,0 +1,50 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: reset-oidc-config
spec:
template:
spec:
volumes:
- name: tmp
emptyDir: {}
initContainers:
- name: jq
image: alpine/k8s:1.24.16@sha256:06f8942d87fa17b40795bb9a8eff029a9be3fc3c9bcc13d62071de4cc3324153
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /tmp
name: tmp
envFrom:
- secretRef:
name: oidc-client-freescout-owner-secrets
command:
- /bin/bash
- -c
- rm -fv /tmp/update.sql;
jq '{"name":"oauth.client_id","value":$ENV.OIDC_CLIENT_ID} | "UPDATE options SET value=\(.value|tostring|@sh) WHERE name=\(.name|tostring|@sh) LIMIT 1;"' -n -r >> /tmp/update.sql;
jq '{"name":"oauth.client_secret","value":$ENV.OIDC_CLIENT_SECRET} | "UPDATE options SET value=\(.value|tostring|@sh) WHERE name=\(.name|tostring|@sh) LIMIT 1;"' -n -r >> /tmp/update.sql;
jq '{"name":"oauth.auth_url","value":$ENV.OIDC_GATEWAY_AUTH_URI} | "UPDATE options SET value=\(.value + "?scope=openid+profile" |tostring|@sh) WHERE name=\(.name|tostring|@sh) LIMIT 1;"' -n -r >> /tmp/update.sql;
jq '{"name":"oauth.token_url","value":$ENV.OIDC_GATEWAY_TOKEN_URI} | "UPDATE options SET value=\(.value|tostring|@sh) WHERE name=\(.name|tostring|@sh) LIMIT 1;"' -n -r >> /tmp/update.sql;
jq '{"name":"oauth.user_url","value":$ENV.OIDC_GATEWAY_USERINFO_URI} | "UPDATE options SET value=\(.value|tostring|@sh) WHERE name=\(.name|tostring|@sh) LIMIT 1;"' -n -r >> /tmp/update.sql;
cat /tmp/update.sql
containers:
- name: mysql
image: mysql
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /tmp
name: tmp
env:
- name: MYSQL_PWD
valueFrom:
secretKeyRef:
name: freescout-secrets
key: DB_PASS
command:
- /bin/bash
- -c
- mysql -u kspace_freescout kspace_freescout -h 172.20.36.1 -p${MYSQL_PWD} < /tmp/update.sql
restartPolicy: OnFailure
backoffLimit: 4

View File

@@ -11,7 +11,6 @@ spec:
kind: ClusterIssuer
name: default
secretName: git-tls
revisionHistoryLimit: 1
---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim

View File

@@ -22,22 +22,6 @@ spec:
---
apiVersion: v1
kind: ConfigMap
metadata:
name: grafana-datasources
data:
prometheus.yaml: |
apiVersion: 1
prune: true
datasources:
- name: Prometheus
type: prometheus
orgId: 1
url: http://prometheus-operated.monitoring.svc.cluster.local:9090
version: 1
editable: false
---
apiVersion: v1
kind: ConfigMap
metadata:
name: grafana-config
data:
@@ -168,18 +152,20 @@ spec:
name: grafana-data
- mountPath: /etc/grafana
name: grafana-config
- mountPath: /etc/grafana/provisioning/datasources
name: grafana-datasources
volumes:
- name: grafana-config
configMap:
name: grafana-config
- name: grafana-datasources
configMap:
name: grafana-datasources
- name: grafana-data
emptyDir:
sizeLimit: 500Mi
volumeClaimTemplates:
- metadata:
name: grafana-data
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service

View File

@@ -55,22 +55,3 @@ spec:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
---
apiVersion: v1
kind: Service
metadata:
name: goredirect
annotations:
external-dns.alpha.kubernetes.io/hostname: k6.ee
metallb.universe.tf/address-pool: elisa
spec:
ports:
- name: http
protocol: TCP
port: 80
targetPort: 8080
nodePort: 32120
selector:
app.kubernetes.io/name: goredirect
type: LoadBalancer
externalTrafficPolicy: Local

View File

@@ -8,15 +8,6 @@ spec:
replacement: https://inventory.k-space.ee/${1}
permanent: false
---
# Creates a dummy/stub in auth.k-space.ee user-facing service listing (otherwise only inventory.k-space.ee is listed).
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: doorboy
spec:
displayName: Doorboy
uri: 'https://inventory.k-space.ee/m/doorboy'
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
@@ -32,12 +23,3 @@ spec:
services:
- kind: TraefikService
name: api@internal
---
apiVersion: codemowers.cloud/v1beta1
kind: MinioBucketClaim
metadata:
name: inventory-external
namespace: hackerspace
spec:
capacity: 1Gi
class: external

View File

@@ -24,7 +24,7 @@ spec:
- name: PYTHONUNBUFFERED
value: "1"
- name: INVENTORY_ASSETS_BASE_URL
value: https://external.minio-clusters.k-space.ee/hackerspace-701d9303-0f27-4829-a2be-b1084021ad91/
value: https://minio-cluster-shared.k-space.ee/inventory-5b342be1-60a1-4290-8061-e0b8fc17d40d/
- name: OIDC_USERS_NAMESPACE
value: passmower
- name: SECRET_KEY
@@ -49,7 +49,7 @@ spec:
name: slack-secrets
envFrom:
- secretRef:
name: miniobucket-inventory-external-owner-secrets
name: miniobucket-inventory-owner-secrets
- secretRef:
name: oidc-client-inventory-app-owner-secrets
- secretRef:
@@ -138,7 +138,6 @@ spec:
availableScopes:
- 'openid'
- 'profile'
- 'groups'
tokenEndpointAuthMethod: 'client_secret_basic'
pkce: false
---

View File

@@ -180,180 +180,6 @@ data:
# the max time for execution in running state without new task created
max_dangling_hours: 168
---
# Source: harbor/templates/nginx/configmap-https.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: harbor-nginx
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0"
data:
nginx.conf: |+
worker_processes auto;
pid /tmp/nginx.pid;
events {
worker_connections 3096;
use epoll;
multi_accept on;
}
http {
client_body_temp_path /tmp/client_body_temp;
proxy_temp_path /tmp/proxy_temp;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
tcp_nodelay on;
# this is necessary for us to be able to disable request buffering in all cases
proxy_http_version 1.1;
upstream core {
server "harbor-core:80";
}
upstream portal {
server "harbor-portal:80";
}
log_format timed_combined '[$time_local]:$remote_addr - '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent" '
'$request_time $upstream_response_time $pipe';
access_log /dev/stdout timed_combined;
map $http_x_forwarded_proto $x_forwarded_proto {
default $http_x_forwarded_proto;
"" $scheme;
}
server {
listen 8443 ssl;
listen [::]:8443 ssl;
# server_name harbordomain.com;
server_tokens off;
# SSL
ssl_certificate /etc/nginx/cert/tls.crt;
ssl_certificate_key /etc/nginx/cert/tls.key;
# Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:';
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)
chunked_transfer_encoding on;
# Add extra headers
add_header Strict-Transport-Security "max-age=31536000; includeSubdomains; preload";
add_header X-Frame-Options DENY;
add_header Content-Security-Policy "frame-ancestors 'none'";
location / {
proxy_pass http://portal/;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_cookie_path / "/; HttpOnly; Secure";
proxy_buffering off;
proxy_request_buffering off;
}
location /api/ {
proxy_pass http://core/api/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_cookie_path / "/; Secure";
proxy_buffering off;
proxy_request_buffering off;
}
location /chartrepo/ {
proxy_pass http://core/chartrepo/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_cookie_path / "/; Secure";
proxy_buffering off;
proxy_request_buffering off;
}
location /c/ {
proxy_pass http://core/c/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_cookie_path / "/; Secure";
proxy_buffering off;
proxy_request_buffering off;
}
location /v1/ {
return 404;
}
location /v2/ {
proxy_pass http://core/v2/;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_buffering off;
proxy_request_buffering off;
}
location /service/ {
proxy_pass http://core/service/;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_cookie_path / "/; Secure";
proxy_buffering off;
proxy_request_buffering off;
}
location /service/notifications {
return 404;
}
}
server {
listen 8080;
listen [::]:8080;
#server_name harbordomain.com;
return 301 https://$host$request_uri;
}
}
---
# Source: harbor/templates/portal/configmap.yaml
apiVersion: v1
kind: ConfigMap
@@ -443,7 +269,7 @@ data:
delete:
enabled: true
redirect:
disable: true
disable: false
redis:
addr: dragonfly:6379
db: 2
@@ -603,39 +429,6 @@ spec:
app: "harbor"
component: jobservice
---
# Source: harbor/templates/nginx/service.yaml
apiVersion: v1
kind: Service
metadata:
name: harbor
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0"
annotations:
cert-manager.io/cluster-issuer: default
external-dns.alpha.kubernetes.io/hostname: harbor.k-space.ee
metallb.universe.tf/address-pool: elisa
spec:
type: LoadBalancer
ports:
- name: http
port: 80
targetPort: 8080
- name: https
port: 443
targetPort: 8443
selector:
release: harbor
app: "harbor"
component: nginx
---
# Source: harbor/templates/portal/service.yaml
apiVersion: v1
kind: Service
@@ -730,8 +523,8 @@ spec:
app.kubernetes.io/component: core
annotations:
checksum/configmap: 9ea7f1881e4fe5b908355ee28e246b67c8c498d2f719dd74a5536a51ee2d9865
checksum/secret: 7827f00e118d39ccc4caad6df2df2125a0cef6b6ad9583cb30a6b17e62e1b934
checksum/secret-jobservice: f6fcc2a7c9a0224eefa0b4ed2deed3fb22335c417d5645067efdc1341de26bc7
checksum/secret: af720060dbb42f2109b7fd0811a83c48c55313f95c3ba2e6e68010be0a2b2cd4
checksum/secret-jobservice: fdcf96de5337fccbcdac406929acbb799cb61e43c21be4f6affce7b2d7eaef3f
spec:
securityContext:
runAsUser: 10000
@@ -828,15 +621,9 @@ spec:
secretName: harbor-core
- name: ca-download
secret:
secretName: "harbor-ingress"
- name: psc
emptyDir: {}
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
---
# Source: harbor/templates/exporter/exporter-dpl.yaml
apiVersion: apps/v1
@@ -974,8 +761,8 @@ spec:
annotations:
checksum/configmap: 3a35bef831e58536bf86670117b43e2913a4c1a60d0e74d948559d7a7d564684
checksum/configmap-env: 80e8b81abf755707210d6112ad65167a7d53088b209f63c603d308ef68c4cfad
checksum/secret: 35297960a512675e6dcdff9d387587916f748c2c2ca2b5b8e5cbe5853488971b
checksum/secret-core: 72ed9c9917dd47ba68b05273e113792198afa5e09a696689e1063fbaffc80034
checksum/secret: 6902f5ee11437ee5149ff54e363487163c43e21ddce1b120ea5528f3def513c6
checksum/secret-core: ed0bce05c92f40e7b854d7206e08d4c1581aac476956839e42075ab9cdd61e45
spec:
securityContext:
runAsUser: 10000
@@ -1036,110 +823,6 @@ spec:
- name: job-logs
persistentVolumeClaim:
claimName: harbor-jobservice
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
---
# Source: harbor/templates/nginx/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: harbor-nginx
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0"
component: nginx
app.kubernetes.io/component: nginx
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
release: harbor
app: "harbor"
component: nginx
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0"
component: nginx
app.kubernetes.io/component: nginx
annotations:
checksum/configmap: 7114a5d89af834358c44d0e87c66e2c69da2e3dd545c02472a416c8a7857b983
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
containers:
- name: nginx
image: "goharbor/nginx-photon:v2.11.0"
imagePullPolicy: "IfNotPresent"
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 1
periodSeconds: 10
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
ports:
- containerPort: 8080
- containerPort: 8443
volumeMounts:
- name: config
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
- name: certificate
mountPath: /etc/nginx/cert
volumes:
- name: config
configMap:
name: harbor-nginx
- name: certificate
secret:
secretName: harbor-ingress
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
---
# Source: harbor/templates/portal/deployment.yaml
apiVersion: apps/v1
@@ -1224,13 +907,6 @@ spec:
- name: portal-config
configMap:
name: "harbor-portal"
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
---
# Source: harbor/templates/registry/registry-dpl.yaml
apiVersion: apps/v1
@@ -1274,10 +950,10 @@ spec:
component: registry
app.kubernetes.io/component: registry
annotations:
checksum/configmap: b6973055b0a56022c00f9460283665c292d00f4ec15c0b36ae334781fd72ff93
checksum/secret: b246f895959725e4164cb10bc8c1c5d4d50618736c48129c8ee233b126164339
checksum/secret-jobservice: 37d8a246aaaed2ca66ea98c8e6b0fd875de5cb0cf2660abd7bda98fa6d630ccb
checksum/secret-core: a3deaec6a79903eb0619162ab91a87581ae2da37bc3f894792a2f48912a2b7c8
checksum/configmap: b11f146e734a9ac7c3df9f83562e7ac5fea9e2b10b89118f19207c9b95104496
checksum/secret: dca1f41d66de90e85f5979631e3653bd898df32609307e2e794a72004dec22f9
checksum/secret-jobservice: 1728caf6daf5c1b1770da4133efe152d0a10260cb6e5271b7545696ff3b8a1f4
checksum/secret-core: 7c8aefdcb5f56e17ceb9dc21105e5b98d5a9294b70e1bea13ef83cc40fb595e2
spec:
securityContext:
runAsUser: 10000
@@ -1403,13 +1079,83 @@ spec:
name: "harbor-registry"
- name: registry-data
emptyDir: {}
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
---
# Source: harbor/templates/ingress/ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: "harbor-ingress"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.11.0"
annotations:
cert-manager.io/cluster-issuer: default
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
ingress.kubernetes.io/proxy-body-size: "0"
ingress.kubernetes.io/ssl-redirect: "true"
kubernetes.io/ingress.class: traefik
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
tls:
- secretName: harbor-ingress
hosts:
- harbor.k-space.ee
rules:
- http:
paths:
- path: /api/
pathType: Prefix
backend:
service:
name: harbor-core
port:
number: 80
- path: /service/
pathType: Prefix
backend:
service:
name: harbor-core
port:
number: 80
- path: /v2/
pathType: Prefix
backend:
service:
name: harbor-core
port:
number: 80
- path: /chartrepo/
pathType: Prefix
backend:
service:
name: harbor-core
port:
number: 80
- path: /c/
pathType: Prefix
backend:
service:
name: harbor-core
port:
number: 80
- path: /
pathType: Prefix
backend:
service:
name: harbor-portal
port:
number: 80
host: harbor.k-space.ee
---
# Source: harbor/templates/metrics/metrics-svcmon.yaml
apiVersion: monitoring.coreos.com/v1

View File

@@ -1,21 +1,17 @@
expose:
type: loadBalancer
type: ingress
tls:
enabled: true
certSource: secret
secret:
secretName: "harbor-ingress"
loadBalancer:
name: harbor
ports:
httpPort: 80
httpsPort: 443
ingress:
hosts:
core: harbor.k-space.ee
annotations:
cert-manager.io/cluster-issuer: default
external-dns.alpha.kubernetes.io/hostname: harbor.k-space.ee
metallb.universe.tf/address-pool: elisa
cert-manager.io/cluster-issuer: default
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
labels: {}
sourceRanges: []
externalURL: https://harbor.k-space.ee
@@ -48,7 +44,7 @@ persistence:
# Refer to
# https://github.com/distribution/distribution/blob/main/docs/configuration.md#redirect
# for the detail.
disableredirect: true
disableredirect: false
type: s3
s3:
# Set an existing secret for S3 accesskey and secretkey
@@ -143,49 +139,3 @@ redis:
addr: "dragonfly:6379"
username: ""
password: "MvYcuU0RaIu1SX7fY1m1JrgLUSaZJjge"
nginx:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
portal:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
core:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
jobservice:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
registry:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule

View File

@@ -15,6 +15,136 @@ spec:
name: Remote-Name
user: Remote-Username
---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
metadata:
name: logmower-readwrite-password
spec:
size: 32
mapping:
- key: password
value: "%(plaintext)s"
---
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
metadata:
name: logmower-readonly-password
spec:
size: 32
mapping:
- key: password
value: "%(plaintext)s"
---
apiVersion: mongodbcommunity.mongodb.com/v1
kind: MongoDBCommunity
metadata:
name: logmower-mongodb
spec:
agent:
logLevel: ERROR
maxLogFileDurationHours: 1
additionalMongodConfig:
systemLog:
quiet: true
members: 2
arbiters: 1
type: ReplicaSet
version: "6.0.3"
security:
authentication:
modes: ["SCRAM"]
users:
- name: readwrite
db: application
passwordSecretRef:
name: logmower-readwrite-password
roles:
- name: readWrite
db: application
scramCredentialsSecretName: logmower-readwrite
- name: readonly
db: application
passwordSecretRef:
name: logmower-readonly-password
roles:
- name: read
db: application
scramCredentialsSecretName: logmower-readonly
statefulSet:
spec:
logLevel: WARN
template:
spec:
containers:
- name: mongod
resources:
requests:
cpu: 100m
memory: 1Gi
limits:
cpu: 4000m
memory: 1Gi
volumeMounts:
- name: journal-volume
mountPath: /data/journal
- name: mongodb-agent
resources:
requests:
cpu: 1m
memory: 100Mi
limits: {}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- logmower-mongodb-svc
topologyKey: kubernetes.io/hostname
nodeSelector:
dedicated: monitoring
tolerations:
- key: dedicated
operator: Equal
value: monitoring
effect: NoSchedule
volumeClaimTemplates:
- metadata:
name: logs-volume
labels:
usecase: logs
spec:
storageClassName: mongo
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
- metadata:
name: journal-volume
labels:
usecase: journal
spec:
storageClassName: mongo
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 512Mi
- metadata:
name: data-volume
labels:
usecase: data
spec:
storageClassName: mongo
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
apiVersion: apps/v1
kind: DaemonSet
metadata:

View File

@@ -0,0 +1 @@
../mongodb-operator/mongodb-support.yml

47
logmower/mongoexpress.yml Normal file
View File

@@ -0,0 +1,47 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: logmower-mongoexpress
spec:
revisionHistoryLimit: 0
replicas: 1
selector:
matchLabels:
app: logmower-mongoexpress
template:
metadata:
labels:
app: logmower-mongoexpress
spec:
containers:
- name: mongoexpress
image: mongo-express
ports:
- name: mongoexpress
containerPort: 8081
env:
- name: ME_CONFIG_MONGODB_URL
valueFrom:
secretKeyRef:
name: logmower-mongodb-application-readonly
key: connectionString.standard
- name: ME_CONFIG_MONGODB_ENABLE_ADMIN
value: "true"
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-mongoexpress
spec:
podSelector:
matchLabels:
app: logmower-mongoexpress
policyTypes:
- Egress
egress:
- to:
- podSelector:
matchLabels:
app: logmower-mongodb-svc
ports:
- port: 27017

View File

@@ -1 +0,0 @@
longhorn.yaml

View File

@@ -1,41 +1,19 @@
# Longhorn distributed block storage system
## For users
You should really avoid using Longhorn as it has over time
[proven to be unreliable system](https://www.reddit.com/r/kubernetes/comments/1cbggo8/longhorn_is_unreliable/).
Prefer using remote databases in your application via
the Kubernetes operator pattern.
Use Longhorn for applications that need persistent storage, but are unable
to provide replication in the application layer:
* Applications that insist writing into filesystem
* Applications that serve Git repositories (eg Gitea)
* Applications that check out Git repositories (eg Woodpecker, Drone and CI systems)
* Applications that need to use SQLite
Instead of using built-in `longhorn` storage class, please add new storage class
with suitable replication, data locality parameters and reclaim policy
[here](https://git.k-space.ee/k-space/kube/src/branch/master/storage-class.yaml)
Longhorn backups are made once per day and it's configured to be uploaded to
the Minio S3 bucket hosted at nas.k-space.ee
## For administrators
Longhorn was last upgraded with following snippet:
Pull the manifest and apply changes
```
wget https://raw.githubusercontent.com/longhorn/longhorn/v1.6.2/deploy/longhorn.yaml
wget https://raw.githubusercontent.com/longhorn/longhorn/v1.5.1/deploy/longhorn.yaml -O application.yml
patch -p0 < changes.diff
kubectl -n longhorn-system apply -f longhorn.yml -f application-extras.yml -f backup.yaml
```
After initial deployment `dedicated=storage:NoSchedule` was specified
To upgrade use following:
```
kubectl -n longhorn-system apply -f application.yml -f application-extras.yml
```
After deploying specify `dedicated=storage:NoSchedule`
for `Kubernetes Taint Toleration` under `Setting -> General` on
[Longhorn Dashboard](https://longhorn.k-space.ee/).
Suitable nodes were tagged with `storage` and Longhorn scheduling was disabled on others.
This is to prevent scheduling Longhorn data on arbitrary Kubernetes nodes as
`storage[1-4].kube.k-space.ee` nodes are the ones which have additional 200G volume mounted at `/mnt/persistent/`
Proceed to tag suitable nodes with `storage` and disable Longhorn scheduling on others.

View File

@@ -27,19 +27,19 @@ metadata:
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
rules:
- host: longhorn.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: longhorn-frontend
port:
number: 80
- host: longhorn.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: longhorn-frontend
port:
number: 80
tls:
- hosts:
- "*.k-space.ee"
- hosts:
- "*.k-space.ee"
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
@@ -59,81 +59,81 @@ spec:
groups:
- name: longhorn
rules:
- alert: LonghornVolumeActualSpaceUsedWarning
annotations:
description: The accumulated snapshots for volume use up more space than the volume's capacity
summary: The actual used space of Longhorn volume is twice the size of the volume capacity.
expr: longhorn_volume_actual_size_bytes > longhorn_volume_capacity_bytes * 2
for: 5m
labels:
issue: The actual used space of Longhorn volume {{$labels.volume}} on {{$labels.node}} is high.
severity: warning
- alert: LonghornVolumeStatusCritical
annotations:
description: Longhorn volume {{$labels.volume}} on {{$labels.node}} is Fault for
more than 2 minutes.
summary: Longhorn volume {{$labels.volume}} is Fault
expr: longhorn_volume_robustness == 3
for: 5m
labels:
issue: Longhorn volume {{$labels.volume}} is Fault.
severity: critical
- alert: LonghornVolumeStatusWarning
annotations:
description: Longhorn volume {{$labels.volume}} on {{$labels.node}} is Degraded for
more than 5 minutes.
summary: Longhorn volume {{$labels.volume}} is Degraded
expr: longhorn_volume_robustness == 2
for: 5m
labels:
issue: Longhorn volume {{$labels.volume}} is Degraded.
severity: warning
- alert: LonghornNodeStorageWarning
annotations:
description: The used storage of node {{$labels.node}} is at {{$value}}% capacity for
more than 5 minutes.
summary: The used storage of node is over 70% of the capacity.
expr: (longhorn_node_storage_usage_bytes / longhorn_node_storage_capacity_bytes) * 100 > 70
for: 5m
labels:
issue: The used storage of node {{$labels.node}} is high.
severity: warning
- alert: LonghornDiskStorageWarning
annotations:
description: The used storage of disk {{$labels.disk}} on node {{$labels.node}} is at {{$value}}% capacity for
more than 5 minutes.
summary: The used storage of disk is over 70% of the capacity.
expr: (longhorn_disk_usage_bytes / longhorn_disk_capacity_bytes) * 100 > 70
for: 5m
labels:
issue: The used storage of disk {{$labels.disk}} on node {{$labels.node}} is high.
severity: warning
- alert: LonghornNodeDown
annotations:
description: There are {{$value}} Longhorn nodes which have been offline for more than 5 minutes.
summary: Longhorn nodes is offline
expr: (avg(longhorn_node_count_total) or on() vector(0)) - (count(longhorn_node_status{condition="ready"} == 1) or on() vector(0)) > 0
for: 5m
labels:
issue: There are {{$value}} Longhorn nodes are offline
severity: critical
- alert: LonghornIntanceManagerCPUUsageWarning
annotations:
description: Longhorn instance manager {{$labels.instance_manager}} on {{$labels.node}} has CPU Usage / CPU request is {{$value}}% for
more than 5 minutes.
summary: Longhorn instance manager {{$labels.instance_manager}} on {{$labels.node}} has CPU Usage / CPU request is over 300%.
expr: (longhorn_instance_manager_cpu_usage_millicpu/longhorn_instance_manager_cpu_requests_millicpu) * 100 > 300
for: 5m
labels:
issue: Longhorn instance manager {{$labels.instance_manager}} on {{$labels.node}} consumes 3 times the CPU request.
severity: warning
- alert: LonghornNodeCPUUsageWarning
annotations:
description: Longhorn node {{$labels.node}} has CPU Usage / CPU capacity is {{$value}}% for
more than 5 minutes.
summary: Longhorn node {{$labels.node}} experiences high CPU pressure for more than 5m.
expr: (longhorn_node_cpu_usage_millicpu / longhorn_node_cpu_capacity_millicpu) * 100 > 90
for: 5m
labels:
issue: Longhorn node {{$labels.node}} experiences high CPU pressure.
severity: warning
- alert: LonghornVolumeActualSpaceUsedWarning
annotations:
description: The accumulated snapshots for volume use up more space than the volume's capacity
summary: The actual used space of Longhorn volume is twice the size of the volume capacity.
expr: longhorn_volume_actual_size_bytes > longhorn_volume_capacity_bytes * 2
for: 5m
labels:
issue: The actual used space of Longhorn volume {{$labels.volume}} on {{$labels.node}} is high.
severity: warning
- alert: LonghornVolumeStatusCritical
annotations:
description: Longhorn volume {{$labels.volume}} on {{$labels.node}} is Fault for
more than 2 minutes.
summary: Longhorn volume {{$labels.volume}} is Fault
expr: longhorn_volume_robustness == 3
for: 5m
labels:
issue: Longhorn volume {{$labels.volume}} is Fault.
severity: critical
- alert: LonghornVolumeStatusWarning
annotations:
description: Longhorn volume {{$labels.volume}} on {{$labels.node}} is Degraded for
more than 5 minutes.
summary: Longhorn volume {{$labels.volume}} is Degraded
expr: longhorn_volume_robustness == 2
for: 5m
labels:
issue: Longhorn volume {{$labels.volume}} is Degraded.
severity: warning
- alert: LonghornNodeStorageWarning
annotations:
description: The used storage of node {{$labels.node}} is at {{$value}}% capacity for
more than 5 minutes.
summary: The used storage of node is over 70% of the capacity.
expr: (longhorn_node_storage_usage_bytes / longhorn_node_storage_capacity_bytes) * 100 > 70
for: 5m
labels:
issue: The used storage of node {{$labels.node}} is high.
severity: warning
- alert: LonghornDiskStorageWarning
annotations:
description: The used storage of disk {{$labels.disk}} on node {{$labels.node}} is at {{$value}}% capacity for
more than 5 minutes.
summary: The used storage of disk is over 70% of the capacity.
expr: (longhorn_disk_usage_bytes / longhorn_disk_capacity_bytes) * 100 > 70
for: 5m
labels:
issue: The used storage of disk {{$labels.disk}} on node {{$labels.node}} is high.
severity: warning
- alert: LonghornNodeDown
annotations:
description: There are {{$value}} Longhorn nodes which have been offline for more than 5 minutes.
summary: Longhorn nodes is offline
expr: (avg(longhorn_node_count_total) or on() vector(0)) - (count(longhorn_node_status{condition="ready"} == 1) or on() vector(0)) > 0
for: 5m
labels:
issue: There are {{$value}} Longhorn nodes are offline
severity: critical
- alert: LonghornIntanceManagerCPUUsageWarning
annotations:
description: Longhorn instance manager {{$labels.instance_manager}} on {{$labels.node}} has CPU Usage / CPU request is {{$value}}% for
more than 5 minutes.
summary: Longhorn instance manager {{$labels.instance_manager}} on {{$labels.node}} has CPU Usage / CPU request is over 300%.
expr: (longhorn_instance_manager_cpu_usage_millicpu/longhorn_instance_manager_cpu_requests_millicpu) * 100 > 300
for: 5m
labels:
issue: Longhorn instance manager {{$labels.instance_manager}} on {{$labels.node}} consumes 3 times the CPU request.
severity: warning
- alert: LonghornNodeCPUUsageWarning
annotations:
description: Longhorn node {{$labels.node}} has CPU Usage / CPU capacity is {{$value}}% for
more than 5 minutes.
summary: Longhorn node {{$labels.node}} experiences high CPU pressure for more than 5m.
expr: (longhorn_node_cpu_usage_millicpu / longhorn_node_cpu_capacity_millicpu) * 100 > 90
for: 5m
labels:
issue: Longhorn node {{$labels.node}} experiences high CPU pressure.
severity: warning

File diff suppressed because it is too large Load Diff

View File

@@ -24,7 +24,7 @@ value: 'miniobucket-backup-owner-secrets'
apiVersion: longhorn.io/v1beta1
kind: RecurringJob
metadata:
name: backup
name: backup
namespace: longhorn-system
spec:
cron: "0 2 * * *"

View File

@@ -1,5 +1,5 @@
--- longhorn.yaml 2024-07-07 14:16:47.953593433 +0300
+++ longhorn.modded 2024-07-07 14:18:51.103452617 +0300
--- application.yml 2024-07-07 14:16:47.953593433 +0300
+++ application.modded 2024-07-07 14:18:51.103452617 +0300
@@ -86,14 +86,14 @@
storageclass.kubernetes.io/is-default-class: "true"
provisioner: driver.longhorn.io

View File

@@ -26,7 +26,19 @@ metadata:
namespace: metallb-system
spec:
addresses:
- 172.21.51.0/24
- 172.20.51.0/24
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: zoo
namespace: metallb-system
spec:
ipAddressPools:
- zoo
- bind-secondary-external
- bind-secondary-internal
- wildduck
---
# Slice of public EEnet subnet using MetalLB L3 method
apiVersion: metallb.io/v1beta1
@@ -55,7 +67,7 @@ metadata:
namespace: metallb-system
spec:
addresses:
- 172.21.53.0/24
- 172.20.53.0/24
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
@@ -87,3 +99,13 @@ spec:
passwordSecret:
name: mikrotik-router
namespace: metallb-system
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: public
namespace: metallb-system
spec:
ipAddressPools:
- eenet
- elisa

View File

@@ -11,4 +11,3 @@ spec:
kind: ClusterIssuer
name: default
secretName: wildcard-tls
revisionHistoryLimit: 1

View File

@@ -1,38 +1,8 @@
# Monitoring namespace
## Monitoring
Additional docs: https://wiki.k-space.ee/en/hosting/monitoring
Prometheus is accessible at [prom.k-space.ee](https://prom.k-space.ee/)
and the corresponding AlertManager is accessible at [am.k-space.ee](https://am.k-space.ee/).
Both are [deployed by ArgoCD](https://argocd.k-space.ee/applications/monitoring)
from this Git repo directory using Prometheus operator.
Note that Prometheus and other monitoring stack components should use appropriate
node selector to make sure the components get scheduled on nodes which are
hosted in a privileged VLAN where they have access to UPS SNMP targets,
Mikrotik router/switch API-s etc.
## For users
To add monitoring targets inside the Kubernetes cluster make use of
[PodMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md#using-podmonitors) or ServiceMonitor custom
resource definitions.
For external targets (ab)use the Probe CRD as seen in `node-exporter.yaml`
or `ping-exporter.yaml`
Alerts are sent to #kube-prod Slack channel. The alerting rules are automatically
picked up by Prometheus operator via Kubernetes manifests utilizing
the operator's
[PrometheusRule](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/alerting.md#deploying-prometheus-rules) custom resource definitions.
Sample queries:
* [SSD/HDD temperatures](https://prom.k-space.ee/graph?g0.expr=%7B__name__%3D~%22smartmon_(temperature_celsius%7Cairflow_temperature_cel)_raw_value%22%7D&g0.tab=0&g0.stacked=0&g0.range_input=1d)
* [HDD power on hours](https://prom.k-space.ee/graph?g0.range_input=30m&g0.expr=smartmon_power_on_hours_raw_value&g0.tab=0), 8760 hours per year
* [CPU/NB temperatures](https://prom.k-space.ee/graph?g0.range_input=1h&g0.expr=node_hwmon_temp_celsius&g0.tab=0)
* [Disk space left](https://prom.k-space.ee/graph?g0.range_input=1h&g0.expr=node_filesystem_avail_bytes&g0.tab=1)
* Minio [s3 egress](https://prom.k-space.ee/graph?g0.expr=rate(minio_s3_traffic_sent_bytes%5B3m%5D)&g0.tab=0&g0.display_mode=lines&g0.show_exemplars=0&g0.range_input=6h), [internode egress](https://prom.k-space.ee/graph?g0.expr=rate(minio_inter_node_traffic_sent_bytes%5B2m%5D)&g0.tab=0&g0.display_mode=lines&g0.show_exemplars=0&g0.range_input=6h), [storage used](https://prom.k-space.ee/graph?g0.expr=minio_node_disk_used_bytes&g0.tab=0&g0.display_mode=lines&g0.show_exemplars=0&g0.range_input=6h)
# For administrators
This namespace is managed by
[ArgoCD](https://argocd.k-space.ee/applications/argocd/monitoring)
To reconfigure SNMP targets etc:
@@ -56,3 +26,4 @@ To set Mikrotik secrets:
--from-literal=PROMETHEUS_BEARER_TOKEN=$(cat /dev/urandom | base64 | head -c 30)
```

View File

@@ -3,7 +3,6 @@ apiVersion: monitoring.coreos.com/v1alpha1
kind: AlertmanagerConfig
metadata:
name: alertmanager
namespace: monitoring
labels:
app.kubernetes.io/name: alertmanager
spec:
@@ -25,21 +24,13 @@ spec:
apiURL:
name: slack-secrets
key: webhook-url
---
apiVersion: monitoring.coreos.com/v1
kind: Alertmanager
metadata:
name: alertmanager
namespace: monitoring
spec:
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app.kubernetes.io/instance: alertmanager
app.kubernetes.io/name: alertmanager
alertmanagerConfigMatcherStrategy:
type: None
alertmanagerConfigNamespaceSelector: {}
@@ -49,20 +40,13 @@ spec:
secrets:
- slack-secrets
nodeSelector:
node-role.kubernetes.io/control-plane: ''
dedicated: monitoring
tolerations:
- key: node-role.kubernetes.io/control-plane
- key: dedicated
operator: Equal
value: ''
value: monitoring
effect: NoSchedule
replicas: 3
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 8m
memory: 35Mi
serviceAccountName: alertmanager
externalUrl: http://am.k-space.ee/
routePrefix: "/"
@@ -76,4 +60,3 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: alertmanager
namespace: monitoring

View File

@@ -33,9 +33,9 @@ spec:
static:
- 193.40.103.2
- 62.65.250.2
- 172.21.53.1
- 172.21.53.2
- 172.21.53.3
- 172.20.53.1
- 172.20.53.2
- 172.20.53.3
---
apiVersion: monitoring.coreos.com/v1
kind: Probe
@@ -169,7 +169,7 @@ spec:
spec:
containers:
- name: blackbox-exporter
image: mirror.gcr.io/prom/blackbox-exporter:v0.25.0
image: prom/blackbox-exporter:v0.24.0
ports:
- name: http
containerPort: 9115
@@ -180,20 +180,24 @@ spec:
- name: blackbox-exporter-config
configMap:
name: blackbox-exporter-config
# TODO: Results in odd 6s connection lag if scheduled in VLAN20
nodeSelector:
node-role.kubernetes.io/control-plane: ''
dedicated: monitoring
tolerations:
- key: node-role.kubernetes.io/control-plane
- key: dedicated
operator: Equal
value: ''
value: monitoring
effect: NoSchedule
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app: blackbox-exporter
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- blackbox-exporter
topologyKey: "kubernetes.io/hostname"
---
kind: Service
apiVersion: v1

View File

@@ -77,20 +77,13 @@ spec:
envFrom:
- secretRef:
name: mikrotik-exporter
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app: mikrotik-exporter
nodeSelector:
node-role.kubernetes.io/control-plane: ''
dedicated: monitoring
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Equal
value: ''
effect: NoSchedule
- key: dedicated
operator: Equal
value: monitoring
effect: NoSchedule
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:

View File

@@ -364,7 +364,6 @@ metadata:
app: node-exporter
name: node-exporter
spec:
revisionHistoryLimit: 0
selector:
matchLabels:
app: node-exporter
@@ -384,7 +383,7 @@ spec:
- --collector.netclass.ignored-devices=^(veth|cali|vxlan|cni|vnet|tap|lo|wg)
- --collector.netdev.device-exclude=^(veth|cali|vxlan|cni|vnet|tap|lo|wg)
- --collector.diskstats.ignored-devices=^(sr[0-9][0-9]*)$
image: mirror.gcr.io/prom/node-exporter:v1.8.2
image: prom/node-exporter:v1.8.2
resources:
limits:
cpu: 50m

View File

@@ -29,7 +29,7 @@ spec:
spec:
containers:
- name: ping-exporter
image: mirror.gcr.io/czerwonk/ping_exporter
image: czerwonk/ping_exporter
args:
- ./ping_exporter
- '1.1.1.1'
@@ -39,19 +39,22 @@ spec:
- NET_ADMIN
- NET_RAW
nodeSelector:
node-role.kubernetes.io/control-plane: ''
dedicated: monitoring
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Equal
value: ''
effect: NoSchedule
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app: ping-exporter
- key: dedicated
operator: Equal
value: monitoring
effect: NoSchedule
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- ping-exporter
topologyKey: "kubernetes.io/hostname"
---
kind: Service
apiVersion: v1

View File

@@ -3,7 +3,6 @@ apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: metrics
namespace: monitoring
spec:
namespaceSelector: {}
selector: {}
@@ -15,22 +14,13 @@ apiVersion: monitoring.coreos.com/v1
kind: Prometheus
metadata:
name: prometheus
namespace: monitoring
spec:
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app.kubernetes.io/instance: prometheus
app.kubernetes.io/name: prometheus
nodeSelector:
node-role.kubernetes.io/control-plane: ''
dedicated: monitoring
tolerations:
- key: node-role.kubernetes.io/control-plane
- key: dedicated
operator: Equal
value: ''
value: monitoring
effect: NoSchedule
alerting:
alertmanagers:
@@ -55,18 +45,11 @@ spec:
ruleNamespaceSelector: {}
ruleSelector: {}
retentionSize: 8GB
resources:
limits:
cpu: 500m
memory: 2Gi
requests:
cpu: 100m
memory: 700Mi
storage:
volumeClaimTemplate:
spec:
accessModes:
- ReadWriteOnce
- ReadWriteOnce
resources:
requests:
storage: 10Gi
@@ -76,50 +59,36 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus
namespace: monitoring
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus
namespace: monitoring
rules:
- resources:
- nodes
- nodes/metrics
- services
- endpoints
- pods
apiGroups:
- ""
verbs:
- get
- list
- watch
- resources:
- configmaps
apiGroups:
- ""
verbs:
- get
- resources:
- ingresses
apiGroups:
- networking.k8s.io
verbs:
- get
- list
- watch
- nonResourceURLs:
- /metrics
verbs:
- get
- apiGroups: [""]
resources:
- nodes
- nodes/metrics
- services
- endpoints
- pods
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources:
- configmaps
verbs: ["get"]
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: prometheus
namespace: monitoring
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -133,7 +102,6 @@ apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: prometheus
namespace: monitoring
spec:
groups:
- name: prometheus
@@ -380,7 +348,6 @@ apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: prometheus
namespace: monitoring
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
@@ -406,7 +373,6 @@ apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: alertmanager
namespace: monitoring
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
@@ -432,7 +398,6 @@ apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: prometheus
namespace: monitoring
spec:
selector:
matchLabels:
@@ -444,7 +409,6 @@ apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: alertmanager
namespace: monitoring
spec:
selector:
matchLabels:
@@ -456,7 +420,6 @@ apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: operator
namespace: monitoring
spec:
selector:
matchLabels:
@@ -468,7 +431,6 @@ apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: kubelet
namespace: monitoring
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
@@ -497,7 +459,6 @@ apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: prometheus
namespace: monitoring
spec:
displayName: Prometheus
uri: 'https://prom.k-space.ee'
@@ -513,7 +474,6 @@ apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: alertmanager
namespace: monitoring
spec:
displayName: AlertManager
uri: 'https://am.k-space.ee'

View File

@@ -3,7 +3,6 @@ kind: Deployment
metadata:
name: snmp-exporter
spec:
revisionHistoryLimit: 0
replicas: 2
selector:
matchLabels:
@@ -14,7 +13,7 @@ spec:
app: snmp-exporter
spec:
containers:
- image: mirror.gcr.io/prom/snmp-exporter:v0.22.0
- image: prom/snmp-exporter:v0.22.0
name: snmp-exporter
imagePullPolicy: IfNotPresent
securityContext:
@@ -40,19 +39,22 @@ spec:
configMap:
name: snmp-exporter
nodeSelector:
node-role.kubernetes.io/control-plane: ''
dedicated: monitoring
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Equal
value: ''
effect: NoSchedule
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app: snmp-exporter
- key: dedicated
operator: Equal
value: monitoring
effect: NoSchedule
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- snmp-exporter
topologyKey: "kubernetes.io/hostname"
---
kind: Service
apiVersion: v1

View File

@@ -1,23 +0,0 @@
---
apiVersion: monitoring.coreos.com/v1
kind: Probe
metadata:
name: zrepl
spec:
scrapeTimeout: 30s
targets:
staticConfig:
static:
- nas.mgmt.k-space.ee:9811
# - offsite.k-space.ee:9811 # TODO: unreachable
relabelingConfigs:
- sourceLabels: [__param_target]
targetLabel: instance
- sourceLabels: [__param_target]
targetLabel: __address__
prober:
url: localhost
path: /metrics
metricRelabelings:
- sourceLabels: [__address__]
targetLabel: target

1
monitoring/zrepl.yaml Symbolic link
View File

@@ -0,0 +1 @@
../ansible/zrepl/prom.yaml

View File

@@ -2,40 +2,20 @@
apiVersion: codemowers.cloud/v1beta1
kind: SecretClaim
metadata:
name: dragonfly-auth
name: nextcloud-admin-secrets
spec:
size: 32
mapping:
- key: REDIS_PASSWORD
- key: password
value: "%(plaintext)s"
- key: REDIS_URI
value: "redis://:%(plaintext)s@dragonfly"
---
apiVersion: dragonflydb.io/v1alpha1
kind: Dragonfly
apiVersion: codemowers.cloud/v1beta1
kind: KeydbClaim
metadata:
name: dragonfly
name: nextcloud
spec:
authentication:
passwordFromSecret:
key: REDIS_PASSWORD
name: dragonfly-auth
replicas: 3
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app: dragonfly
app.kubernetes.io/part-of: dragonfly
resources:
limits:
cpu: 600m
memory: 500Mi
requests:
cpu: 100m
memory: 30Mi
class: ephemeral
capacity: 100Mi
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCClient
@@ -65,7 +45,6 @@ metadata:
labels:
app.kubernetes.io/name: nextcloud
spec:
revisionHistoryLimit: 0
serviceName: nextcloud
replicas: 1
selector:
@@ -88,13 +67,6 @@ spec:
containers:
- name: nextcloud
image: nextcloud@sha256:072d9d3b8428d6b31fe7ed603737d4173f0ca85c0f1d0d8607fd4741fdfa49a9
resources:
limits:
cpu: 4000m
memory: 2Gi
requests:
cpu: 400m
memory: 500Mi
readinessProbe:
exec:
command:
@@ -164,11 +136,14 @@ spec:
name: nextcloud-admin-secrets
key: password
- name: REDIS_HOST
value: dragonfly
valueFrom:
secretKeyRef:
name: keydb-nextcloud-owner-secrets
key: REDIS_MASTER
- name: REDIS_HOST_PASSWORD
valueFrom:
secretKeyRef:
name: dragonfly-auth
name: keydb-nextcloud-owner-secrets
key: REDIS_PASSWORD
- name: MYSQL_PASSWORD
valueFrom:

8
oidc-gateway/README.md Normal file
View File

@@ -0,0 +1,8 @@
# OIDC Gateway
To deploy
```
kubectl create namespace oidc-gateway
kubectl apply -n oidc-gateway -f crds.yml -f rbac.yml -f texts.yml -f deployment.yml -f kubelogin.yaml -f proxmox.yaml -f voron.yaml
```

298
oidc-gateway/crds.yml Normal file
View File

@@ -0,0 +1,298 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: oidcgatewayusers.codemowers.io
spec:
group: codemowers.io
names:
plural: oidcgatewayusers
singular: oidcgatewayuser
kind: OIDCGWUser
scope: Namespaced
versions:
- name: v1alpha1
served: true
storage: true
subresources:
status: { }
schema:
openAPIV3Schema:
required:
- spec
type: object
properties:
spec:
type: object
required:
- type
properties:
type:
type: string
enum: [ 'person', 'org', 'service', 'banned' ]
default: person
email:
type: string
companyEmail:
type: string
customGroups:
type: array
items:
type: object
properties:
prefix:
type: string
name:
type: string
customProfile:
type: object
x-kubernetes-preserve-unknown-fields: true
properties:
name:
type: string
company:
type: string
githubEmails:
type: array
items:
type: object
properties:
email:
type: string
primary:
type: boolean
default: false
githubGroups:
type: array
items:
type: object
properties:
prefix:
type: string
enum: [ 'github.com' ]
name:
type: string
githubProfile:
type: object
properties:
name:
type: string
company:
type: string
id:
type: integer
login:
type: string
slackId:
type: string
status:
type: object
properties:
primaryEmail:
type: string
emails:
type: array
items:
type: string
groups:
type: array
items:
type: object
properties:
prefix:
type: string
name:
type: string
profile:
type: object
x-kubernetes-preserve-unknown-fields: true
properties:
name:
type: string
company:
type: string
slackId:
type: string
conditions:
type: array
items:
type: object
x-kubernetes-embedded-resource: true
x-kubernetes-preserve-unknown-fields: true
additionalPrinterColumns:
- name: Type
type: string
jsonPath: .spec.type
- name: Name
type: string
jsonPath: .status.profile.name
- name: Display e-mail
type: string
jsonPath: .spec.companyEmail
- name: Upstream IdP e-mail
type: string
jsonPath: .spec.githubEmails[?(@.primary==true)].email
- name: GH ID
type: string
jsonPath: .spec.githubProfile.id
- name: Groups
type: string
jsonPath: .status.groups
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: oidcgatewayclients.codemowers.io
spec:
group: codemowers.io
names:
plural: oidcgatewayclients
singular: oidcgatewayclient
kind: OIDCGWClient
scope: Namespaced
versions:
- name: v1alpha1
served: true
storage: true
subresources:
status: { }
schema:
openAPIV3Schema:
required:
- spec
type: object
properties:
spec:
type: object
required:
- redirectUris
- grantTypes
- responseTypes
properties:
uri:
type: string
displayName:
type: string
redirectUris:
type: array
items:
type: string
grantTypes:
type: array
items:
type: string
enum: [ 'implicit', 'authorization_code', 'refresh_token' ]
responseTypes:
type: array
items:
type: string
enum: [ 'code id_token', 'code', 'id_token', 'none' ]
tokenEndpointAuthMethod:
type: string
enum: [ 'client_secret_basic', 'client_secret_jwt', 'client_secret_post', 'private_key_jwt', 'none' ]
idTokenSignedResponseAlg:
type: string
enum: [ 'PS256','RS256', 'ES256' ]
allowedGroups:
type: array
items:
type: string
overrideIncomingScopes:
type: boolean
default: false
availableScopes:
type: array
items:
type: string
enum: [ 'openid', 'profile', 'offline_access' ]
default: [ 'openid' ]
pkce:
type: boolean
default: true
status:
type: object
properties:
gateway:
type: string
additionalPrinterColumns:
- name: Gateway
type: string
description: 'OIDC gateway deployment which manages this client'
jsonPath: .status.gateway
- name: Uris
type: string
description: 'Redirect URLs configured for this client'
jsonPath: .spec.redirectUris
- name: Allowed groups
type: string
description: 'Groups allowed to this client'
jsonPath: .spec.allowedGroups
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: oidcgatewaymiddlewareclients.codemowers.io
spec:
group: codemowers.io
names:
plural: oidcgatewaymiddlewareclients
singular: oidcgatewaymiddlewareclient
kind: OIDCGWMiddlewareClient
scope: Namespaced
versions:
- name: v1alpha1
served: true
storage: true
subresources:
status: { }
schema:
openAPIV3Schema:
required:
- spec
type: object
properties:
spec:
type: object
properties:
uri:
type: string
displayName:
type: string
allowedGroups:
type: array
items:
type: string
headerMapping:
type: object
default:
user: 'Remote-User'
name: 'Remote-Name'
email: 'Remote-Email'
groups: 'Remote-Groups'
properties:
user:
type: string
name:
type: string
email:
type: string
groups:
type: string
status:
type: object
properties:
gateway:
type: string
additionalPrinterColumns:
- name: Gateway
type: string
description: 'OIDC gateway deployment which manages this client'
jsonPath: .status.gateway
- name: Uri
type: string
description: 'URL configured for this client'
jsonPath: .spec.uri
- name: Allowed groups
type: string
description: 'Groups allowed to this client'
jsonPath: .spec.allowedGroups

162
oidc-gateway/deployment.yml Normal file
View File

@@ -0,0 +1,162 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: oidc-gateway
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: oidc-gateway
subjects:
- kind: ServiceAccount
name: oidc-gateway
namespace: oidc-gateway
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: oidc-gateway
---
apiVersion: codemowers.io/v1alpha1
kind: Redis
metadata:
name: oidc-gateway
spec:
capacity: 512Mi
class: ephemeral
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: oidc-gateway
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: auth2.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: oidc-gateway
port:
number: 3000
tls:
- hosts:
- "*.k-space.ee"
---
apiVersion: v1
kind: Service
metadata:
name: oidc-gateway
spec:
type: ClusterIP
selector:
app: oidc-gateway
ports:
- protocol: TCP
port: 3000
---
apiVersion: batch/v1
kind: Job
metadata:
name: oidc-key-manager
spec:
template:
spec:
serviceAccountName: oidc-gateway
containers:
- name: oidc-key-manager
image: mirror.gcr.io/codemowers/passmower
command: [ '/app/node_modules/.bin/key-manager', 'initialize', '-c', 'cluster' ]
restartPolicy: Never
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: oidc-gateway
labels:
app: oidc-gateway
spec:
selector:
matchLabels:
app: oidc-gateway
replicas: 3
template:
metadata:
labels:
app: oidc-gateway
spec:
serviceAccountName: oidc-gateway
containers:
- name: oidc-gateway
image: mirror.gcr.io/passmower/passmower:latest@sha256:b909ae01a1f8de9253cf3d6925d189eb687b4299c723f646838e1254a95f72be
ports:
- containerPort: 3000
env:
- name: ISSUER_URL
value: 'https://auth2.k-space.ee/'
- name: DEPLOYMENT_NAME
valueFrom:
fieldRef:
fieldPath: metadata.labels['app']
- name: GROUP_PREFIX
value: 'k-space'
- name: ADMIN_GROUP
value: 'k-space:onboarding'
# - name: REQUIRED_GROUP # allow everyone to authenticate, limit access to services on client level.
# value: 'codemowers:users'
- name: GITHUB_ORGANIZATION # if not set, gateway will add user groups from all organizations that (s)he granted access for.
value: 'codemowers'
- name: ENROLL_USERS # allow everyone to self-register
value: 'false'
- name: NAMESPACE_SELECTOR
value: '*'
- name: PREFERRED_EMAIL_DOMAIN # try to make primary email consistent
value: 'k-space.ee'
- name: REQUIRE_CUSTOM_USERNAME
value: 'true'
envFrom:
- secretRef:
name: redis-oidc-gateway-owner-secrets
- secretRef:
name: oidc-keys
- secretRef:
name: email-credentials
- secretRef:
name: github-client
- secretRef:
name: slack-client
readinessProbe:
httpGet:
path: /.well-known/openid-configuration
port: 3000
httpHeaders:
- name: x-forwarded-for # suppress oidc-provider warning
value: 'https://auth2.k-space.ee/'
- name: x-forwarded-proto # suppress oidc-provider warning
value: https
initialDelaySeconds: 5
periodSeconds: 1
volumeMounts:
- mountPath: /app/tos
name: tos
- mountPath: /app/approval
name: approval
- mountPath: /app/src/views/custom/emails
name: email-templates
volumes:
- name: tos
configMap:
name: oidc-gateway-tos-v1
- name: approval
configMap:
name: oidc-gateway-approval-required
- name: email-templates
configMap:
name: oidc-gateway-email-templates

View File

@@ -0,0 +1,21 @@
---
apiVersion: codemowers.io/v1alpha1
kind: OIDCGWClient
metadata:
name: kubelogin
spec:
displayName: Kubernetes API
uri: https://git.k-space.ee/k-space/kube#cluster-access
redirectUris:
- http://localhost:27890
allowedGroups:
- k-space:kubernetes:admins
grantTypes:
- authorization_code
- refresh_token
responseTypes:
- code
availableScopes:
- openid
- profile
tokenEndpointAuthMethod: none

59
oidc-gateway/rbac.yml Normal file
View File

@@ -0,0 +1,59 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: oidc-gateway
rules:
- apiGroups:
- codemowers.io
resources:
- oidcgatewayusers
- oidcgatewayusers/status
- oidcgatewayclients
- oidcgatewayclients/status
- oidcgatewaymiddlewareclients
- oidcgatewaymiddlewareclients/status
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
- patch
- delete
- apiGroups:
- traefik.containo.us
resources:
- middlewares
verbs:
- get
- create
- update
- patch
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: oidc-gateway
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: oidc-gateway
subjects:
- kind: ServiceAccount
name: oidc-gateway
namespace: oidc-gateway
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: oidc-gateway

180
oidc-gateway/texts.yml Normal file
View File

@@ -0,0 +1,180 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: oidc-gateway-tos-v1
data:
tos.md: |
Rules
Also known as code of conduct, guidelines, member vows, values etc. Please keep em' on one wiki page. Canonical URL shall be [k-space.ee/rules](http://k-space.ee/rules) which will redirect to right wiki page.
These rules were approved 30. October 2020. Current version contains minor corrections. For future amendments of the rules please put your ideas and thoughts into [pad.k-space.ee/p/rules](https://pad.k-space.ee/p/rules), they will be taken into account during next General Assembly.
Upon becoming a member I solemny swear that
1. **Who's who**
- I acknowledge the management of the hackerspace is governed by K-SPACE MTÜ (non-profit organization), it's [charter](/pohikiri_80397632_716212.pdf) and [Non-profit Associations Act](https://www.riigiteataja.ee/en/eli/528052020003/consolide)
- I acknowledge there are different membership forms listed under [membership#packages](/about/membership#packages) which can be largely broken down to affiliates (Regulars, Associates, Residents) who are at the hackerspace to make use of some services and members who are more involved in making the hackerspace happen.
- I acknowledge that in order to participate in the K-SPACE MTÜ management processes I need to obtain the [Member Tier](/about/membership#member-tier) status, note that this is different from the 'Member package' which was renamed to 'Associate tier' in October of 2020 in order to clarify what member of an non-profit association means.
- Among the ['Member Tier'](https://members.k-space.ee/?filter=resident-member&filter=associate-member) people the board is elected which represents K-SPACE MTÜ in legal matters such as getting bills paid, signing rental contracts and so forth. Currently board members include Erki Naumanis only. Bearing the responsibility also means that board has the final word in many cases.
- I acknowledge K-SPACE MTÜ is renting rooms from SA Tallinna Teaduspark Tehnopol. The rental agreement imposes numerous conditions and constraints due to safety reasons. Eg corridors must be kept accessible and free of flammable materials.
1. **Stay positive.**
- I will set up a recurring payment (püsikorraldus) or pay up front in bulk.
- I will not go in debt.
- I acknowledge failing to pay membership fees for 2 months results in door access cancellation.
- I will mark my payments clearly and consistenly with my full name, so the payments can be automatically procesed.
1. **Clarity not obscurity.**
- I will mark my equipment, server, locker, desk with my full name, phone number and e-mail.
- I acknowledge that my unlabelled stuff will be unplugged.
- Before asking questions I will familiarize myself [how to ask questions](https://www.khanacademy.org/computing/computer-programming/programming/becoming-a-community-coder/a/ask-for-program-help) and provide enough details.
1. **Communication is vital**
- I will proactively communicate need to cancel or pause my subscription, e.g. in case of being unable to pay.
- Upon leaving I will vacate the locker and return any keys given to me.
- I acknowledge Slack is primary communications platform for K-SPACE MTÜ. This is mainly due to wanting to have a way to communicate even if all the selfhosted infra is down. I will request access to the channel from any of the onboarders. I will not complain about being left out of the loop about changes etc if I am not on the #members channel.
1. **Access controls**
- I acknowledge primary method of accessing the floor is by using some of my own NFC cards (eg green Ühiskaart) or keyfobs.
- I will get my token registered in the door system of the day. To be on the safe side I will register multiple cards and/or keyfobs.
- I acknowledge Slack bot for opening the doors supplements the primary one, eg I can get in if I have forgot my card at home.
- I acknowledge that when it comes to security of doorcards it's nonexistant. I will not make copies of my card or distribute it's unique identifier sequence.
1. **Treat as your own -- with care**
- I will do my best to take care of the equipment and the space.
- I acknowledge that I must turn off the lights and leave windows in microventilation mode when I am the last one to leave.
- In case of equiment breakage I will notify info@k-space.ee immediately for resolution options. Any K-SPACE MTÜ owned equiment replacements must be purchased by K-SPACE MTÜ, see below under 'Regarding the toys' why.
- Upon losing (physical metal) key I will compensate 10EUR to K-SPACE MTÜ for it
1. **Contributions are welcome**
- I acknowledge that my immaterial contributions (e.g. building something for the hackerspace or helping others) doesn't compensate my membership fees. The only discount option is the 'Student discount'. Flexing with your contributions on your CV is OK.
- Before bringing any items for donation I will consult with a board member. K-SPACE is not my free (e-)junk disposal station.
- Donations don't compensate for my membership fees. K-SPACE MTÜ still needs to pay rent and electricity bills.
1. **Underpromise, overdeliver**
- I acknowledge there are many fun activities I want to be part of.
- I acknowledge I have obligations elsewhere as well (eg school, work, family).
- I will do my best to fulfill what I have promised to other members.
- I will rather say no in first place than say yes and fail to fulfill the promise.
- I will let the relying party know as soon as possible if I am not able to handle something I promised to do.
1. **Regarding the toys**
- I acknowledge I can bring my own equipment to hackerspace for common use. I acknowledge many others already do that. Lasercutter, CNC cutter, and many power tools are not owned by K-SPACE MTÜ. I take extra care of other members' equipment and before making use of them request training from the machine owner.
- I agree not to utilise any tool or piece of equipment unless I am competent in its use (and have completed induction/training where necessary) or are under the direct supervision of a competent user.
- I agree that the space is a potentially dangerous environment and that I may be exposed to various risks as a result of work undertaken by me or others. Notwithstanding all reasonable attempts by K-Space to ensure the safety of me and others, I agree to the full extent permitted by law to assume complete responsibility for my own actions and their consequences.
- If I think some new equipment would be useful for the space to be purchased by K-SPACE MTÜ I will make a suggestion to K-SPACE MTÜ board via board@k-space.ee.
- If I want to build something for the hackerspace I will consult with a board member for the compensation of materials.
- I acknowledge that my membership fees contribute towards refreshing equipment in common use, that is soldering irons, switches in server room etc.
- Purchases related to the hackerspace must be made by K-SPACE MTÜ.
- This is the clearest option from accounting perspective.
- It's also important that so any warranty, receipts and other documents would be correctly assigned to K-SPACE MTÜ and usable should you give up your membership at some point.
- Preferred option is to ask for proforma invoice for K-SPACE MTÜ, Akadeemia tee 21/1 and send it to info@k-space.ee - the payment will be executed by a board member.
- In case of Internet shops ask for the credit card from a board member.
1. **No borrowing or lending equipment**
- I acknowledge that equipment provided by K-SPACE MTÜ or it's members is for on-prem use only
- I will not take any of the equipment with me
- Only exception to this is workshops happening outside (eg visiting Robotex, Lapikud, IT College etc)
- I will notify info@k-space.ee about what I am going to take with me and why
- I will return equipment immediately after the event
- I acknowledge that this rule is due to numerous occasions people forgetting to return borrowed stuff
1. **Sharing is caring**
- I acknowledge that flexidesks also known as hot desks (8 desks in the middle of big room with projector) are to be cleaned by the end of the day.
- Sometimes exprompt events are scheduled in the room - I will not leave my stuff on flexidesks over night. If I come in often I can leave my screen and keyboard on one of the desks near the window.
- I acknowledge that for more persistent setup I should really switch to resident tier
1. **Being Mindful**
- While being active in the hackerspace I will make sure that my activites are not disturbing others, my activites are not causing exess financial drain on the K-SPACE MTÜ.
- Wearing earphones is generally sign of working on something and not wanting to be interrupted.
- I will act frugal. For example running additional servers without paying or running Bitcoin miner on the expense of K-SPACE MTÜ is NOT okay.
- Causing interruptions in the server room operation during working hours is NOT okay. Designated timeslot for disruptive changes is Thursday 19:00 until Friday 6:00. I recognize that occasionally hardware fails and non-disruptive changes might turn into disruptive outside that timeslot.
- I will not smoke inside the hackerspace rooms. I will exit the building for a cigarette.
1. **Striving for improvement***
- I acknowledge that hackerspace is a vague term ranging from anarchocommunist potsmoking to <a href="https://www.fablab.berlin/">fullblown commercial operation</a>.
- I will do my best to educate myself about <a href="https://github.com/0x20/hackerspace-blueprint/releases/latest/download/hackerspace-blueprint.pdf">hackerspace and makerspace</a> scene in general and I will not indulge in flamewars.
1. **Lock and stock**
- I acknowledge one locker is available for me for no charge with supporter and member tiers.
- I acknowledge that if would switch to resident tier I would be not eligible for a free locker as I would already have whole desk for my stuff
- I acknowledge additional lockers are available for 10EUR per month.
1. **Run the jewels**
- I will not make use of a blade server without legit reason, eg running hypervisor host. For single web application obtain VM from our Proxmox cluster.
- I will shut down machine not in use and I will make sure it stays shut. It's recurring issue that servers that have been shut down are discovered powered on again, eg in BIOS make sure it says 'last state' not 'always on' or even better disconnect power.
1. **Community FTW!**
- I acknowledge that people who take time to deal with me are not paid, they do it voluntarily off their free time for sh\*ts and giggles.
- They still pay membership fees.
- I will treat them and their time with respect.
- I will not become a burden for them.
- I acknowledge rules above are not exhaustive.
- I will do my best to act in good faith and not try to work around the rules.
- I accept that there will always be certain amount chaos at the hackerspace.
**Disclaimer**: Lauri (K-SPACE founder) believes that:
* a) we can have a hackerspace that is not utter chaos
* b) that is financially in order
* c) offers friendly/flexible pricing for students
* d) keeps doors open for companies and startups
* e) allows reasonable buffer for experimentation.
Lauri's favourite example hackerspaces include [AFRA](https://wiki.hackerspaces.org/AFRA) and [Raumfahrtagentur](https://wiki.hackerspaces.org/Raumfahrtagentur)
---
apiVersion: v1
kind: ConfigMap
metadata:
name: oidc-gateway-approval-required
data:
approval.txt: |
Dear User,
Thank you for your interest in accessing the K-Space MTÜ infrastructure. To become a member, please contact us at info@k-space.ee
Also see https://www.k-space.ee/
Best regards, K-Space MTÜ
---
apiVersion: v1
kind: ConfigMap
metadata:
name: oidc-gateway-email-templates
data:
tos.txt: |
Hi, <%= name %>!
You agreed with the Terms of Service at <%= timestamp %>
Content SHA256 hash: <%= hash %>
Best regards,
K-Space MTÜ
tos.ejs: |
<div>
<p>Hi, <%= name %>!</p>
<p>You agreed with the following Terms of Service at <%= timestamp %></p>
<p>Content SHA256 hash: <%= hash %></p>
<blockquote>
<%- content -%>
</blockquote>
<p>Best regards, <br/> K-Space MTÜ</p>
</div>
tos.subject: |
Terms of Service agreement confirmation
link.txt: |
Open the following link to log in: <%= url %>
Best regards,
K-Space MTÜ
link.ejs: |
<div>
<p>Open the following link to log in: <a href="<%= url %>"<%= url %></a></p>
<p>Best regards, <br/> K-Space MTÜ</p>
</div>
link.subject: |
auth2.k-space.ee login link

404
openebs/rawfile.yaml Normal file
View File

@@ -0,0 +1,404 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rawfile-csi-driver
namespace: openebs
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rawfile-csi-provisioner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csistoragecapacities"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get"]
- apiGroups: ["apps"]
resources: ["daemonsets"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rawfile-csi-broker
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rawfile-csi-resizer
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rawfile-csi-provisioner
subjects:
- kind: ServiceAccount
name: rawfile-csi-driver
namespace: openebs
roleRef:
kind: ClusterRole
name: rawfile-csi-provisioner
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rawfile-csi-broker
subjects:
- kind: ServiceAccount
name: rawfile-csi-driver
namespace: openebs
roleRef:
kind: ClusterRole
name: rawfile-csi-broker
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rawfile-csi-resizer
subjects:
- kind: ServiceAccount
name: rawfile-csi-driver
namespace: openebs
roleRef:
kind: ClusterRole
name: rawfile-csi-resizer
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: rawfile-csi-controller
namespace: openebs
labels:
app.kubernetes.io/name: rawfile-csi
component: controller
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: rawfile-csi
component: controller
clusterIP: None
---
apiVersion: v1
kind: Service
metadata:
name: rawfile-csi-node
namespace: openebs
labels:
app.kubernetes.io/name: rawfile-csi
component: node
spec:
type: ClusterIP
ports:
- name: metrics
port: 9100
targetPort: metrics
protocol: TCP
selector:
app.kubernetes.io/name: rawfile-csi
component: node
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: rawfile-csi-node
namespace: openebs
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: "100%"
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: rawfile-csi
component: node
template:
metadata:
labels: *selectorLabels
spec:
serviceAccount: rawfile-csi-driver
priorityClassName: system-node-critical
tolerations:
- operator: "Exists"
volumes:
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/rawfile-csi
type: DirectoryOrCreate
- name: mountpoint-dir
hostPath:
path: /var/lib/kubelet
type: DirectoryOrCreate
- name: data-dir
hostPath:
path: /var/csi/rawfile
type: DirectoryOrCreate
containers:
- name: csi-driver
image: "harbor.k-space.ee/k-space/rawfile-localpv@sha256:4227d645b0667136bfe3d09d7b01be655e6427b851435cdb7dcf015ce22e9d51"
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
env:
- name: PROVISIONER_NAME
value: "rawfile.csi.openebs.io"
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: IMAGE_REPOSITORY
value: "harbor.k-space.ee/k-space/rawfile-localpv"
- name: IMAGE_TAG
value: "latest@sha256:4227d645b0667136bfe3d09d7b01be655e6427b851435cdb7dcf015ce22e9d51"
- name: NODE_ID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
ports:
- name: metrics
containerPort: 9100
- name: csi-probe
containerPort: 9808
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: mountpoint-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- name: data-dir
mountPath: /data
resources:
limits:
cpu: 1
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
- name: node-driver-registrar
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0
imagePullPolicy: IfNotPresent
args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --health-port=9809
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/rawfile-csi/csi.sock
ports:
- containerPort: 9809
name: healthz
livenessProbe:
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 5
timeoutSeconds: 5
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
resources:
limits:
cpu: 500m
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
- name: external-provisioner
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2
imagePullPolicy: IfNotPresent
args:
- "--csi-address=$(ADDRESS)"
- "--feature-gates=Topology=true"
- "--strict-topology"
- "--immediate-topology=false"
- "--timeout=120s"
- "--enable-capacity=true"
- "--capacity-ownerref-level=1" # DaemonSet
- "--node-deployment=true"
env:
- name: ADDRESS
value: /csi/csi.sock
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
volumeMounts:
- name: socket-dir
mountPath: /csi
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: rawfile-csi-controller
namespace: openebs
spec:
replicas: 1
serviceName: rawfile-csi
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: rawfile-csi
component: controller
template:
metadata:
labels: *selectorLabels
spec:
serviceAccount: rawfile-csi-driver
priorityClassName: system-cluster-critical
tolerations:
- key: "node-role.kubernetes.io/master"
operator: Equal
value: "true"
effect: NoSchedule
volumes:
- name: socket-dir
emptyDir: {}
containers:
- name: csi-driver
image: "harbor.k-space.ee/k-space/rawfile-localpv@sha256:4227d645b0667136bfe3d09d7b01be655e6427b851435cdb7dcf015ce22e9d51"
imagePullPolicy: IfNotPresent
args:
- csi-driver
- --disable-metrics
env:
- name: PROVISIONER_NAME
value: "rawfile.csi.openebs.io"
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: IMAGE_REPOSITORY
value: "harbor.k-space.ee/k-space/rawfile-localpv"
- name: IMAGE_TAG
value: "latest@sha256:4227d645b0667136bfe3d09d7b01be655e6427b851435cdb7dcf015ce22e9d51"
volumeMounts:
- name: socket-dir
mountPath: /csi
ports:
- name: csi-probe
containerPort: 9808
resources:
limits:
cpu: 1
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
- name: external-resizer
image: k8s.gcr.io/sig-storage/csi-resizer:v1.4.0
imagePullPolicy: IfNotPresent
args:
- "--csi-address=$(ADDRESS)"
- "--handle-volume-inuse-error=false"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: rawfile.csi.openebs.io
spec:
attachRequired: false
podInfoOnMount: true
fsGroupPolicy: File
storageCapacity: true
volumeLifecycleModes:
- Persistent
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rawfile-ext4
provisioner: rawfile.csi.openebs.io
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
fsType: "ext4"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rawfile-xfs
provisioner: rawfile.csi.openebs.io
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
fsType: "xfs"

View File

@@ -1 +0,0 @@
application.yaml

View File

@@ -1,30 +1,3 @@
# Passmower
Passmower provides magic login link based OIDC provider for all hackerspace services.
The link is sent via e-mail and Slack bot.
Passmower replaces previously used Samba (Active Directory) and
[Authelia](https://www.authelia.com/) combo as it provides Kubernetes native
declarative user and application management,
automatic OIDC secret provisioning within the cluster,
and more.
For official documentation refer to
[github.com/passmower/passmower](https://github.com/passmower/passmower)
# For users
To login and list the applications enrolled with Passmower visit
[auth.k-space.ee](https://auth.k-space.ee/)
To add applications refer to the [official docs](https://github.com/passmower/passmower?tab=readme-ov-file#application-enrollment)
For good examples refer to [Grafana](https://git.k-space.ee/k-space/kube/src/branch/master/grafana/application.yml)
# For administrators
Passmower was deployed with Helm chart:
```
helm template --include-crds -n passmower passmower oci://ghcr.io/passmower/charts/passmower -f passmower/values.yaml > passmower/application.yaml
```
helm template --include-crds -n passmower passmower ../passmower/ -f passmower/values.yaml > passmower/application.yaml
```

View File

@@ -1,4 +1,3 @@
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
@@ -17,19 +16,10 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`auth2.k-space.ee`)
kind: Rule
middlewares:
- name: auth2-redirect
services:
- kind: TraefikService
name: api@internal
---
# Creates a dummy/stub in auth.k-space.ee user-facing service listing
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: slack
spec:
displayName: Slack
uri: 'https://k-space-ee.slack.com/'
- match: Host(`auth2.k-space.ee`)
kind: Rule
middlewares:
- name: auth2-redirect
services:
- kind: TraefikService
name: api@internal

659
passmower/application.yaml Normal file
View File

@@ -0,0 +1,659 @@
---
# Source: passmower/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: passmower
labels:
helm.sh/chart: passmower-0.1.0
app.kubernetes.io/name: passmower
app.kubernetes.io/instance: passmower
app.kubernetes.io/version: "0.1.0"
app.kubernetes.io/managed-by: Helm
---
# Source: passmower/templates/crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: oidcusers.codemowers.cloud
spec:
group: codemowers.cloud
names:
plural: oidcusers
singular: oidcuser
kind: OIDCUser
listKind: OIDCUserList
scope: Namespaced
versions:
- name: v1beta1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required:
- spec
properties:
github:
type: object
properties:
company:
type: string
email:
type: string
emails:
type: array
items:
type: object
properties:
email:
type: string
primary:
type: boolean
default: false
groups:
type: array
items:
type: object
properties:
name:
type: string
prefix:
type: string
enum:
- github.com
id:
type: integer
login:
type: string
name:
type: string
passmower:
type: object
properties:
company:
type: string
email:
type: string
groups:
type: array
items:
type: object
properties:
name:
type: string
prefix:
type: string
name:
type: string
slack:
type: object
properties:
id:
type: string
spec:
type: object
required:
- type
properties:
companyEmail:
type: string
email:
type: string
groups:
type: array
items:
type: object
properties:
name:
type: string
prefix:
type: string
type:
type: string
default: person
enum:
- person
- org
- service
- banned
- group
status:
type: object
properties:
conditions:
type: array
items:
type: object
x-kubernetes-preserve-unknown-fields: true
x-kubernetes-embedded-resource: true
emails:
type: array
items:
type: string
groups:
type: array
items:
type: object
properties:
name:
type: string
prefix:
type: string
primaryEmail:
type: string
profile:
type: object
properties:
company:
type: string
name:
type: string
x-kubernetes-preserve-unknown-fields: true
slackId:
type: string
subresources:
status: {}
additionalPrinterColumns:
- name: Type
type: string
jsonPath: .spec.type
- name: Name
type: string
jsonPath: .status.profile.name
- name: Display e-mail
type: string
jsonPath: .status.primaryEmail
- name: Upstream IdP e-mail
type: string
jsonPath: .spec.github.emails[?(@.primary==true)].email
- name: GH ID
type: string
jsonPath: .spec.github.id
- name: Groups
type: string
jsonPath: .status.groups
conversion:
strategy: None
---
# Source: passmower/templates/crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: oidcclients.codemowers.cloud
spec:
group: codemowers.cloud
names:
plural: oidcclients
singular: oidcclient
kind: OIDCClient
listKind: OIDCClientList
scope: Namespaced
versions:
- name: v1beta1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required:
- spec
properties:
spec:
type: object
required:
- redirectUris
- grantTypes
- responseTypes
properties:
allowedCORSOrigins:
type: array
items:
type: string
allowedGroups:
type: array
items:
type: string
availableScopes:
type: array
default:
- openid
items:
type: string
enum:
- openid
- profile
- offline_access
displayName:
type: string
grantTypes:
type: array
items:
type: string
enum:
- implicit
- authorization_code
- refresh_token
idTokenSignedResponseAlg:
type: string
enum:
- PS256
- RS256
- ES256
overrideIncomingScopes:
type: boolean
default: false
pkce:
type: boolean
default: true
redirectUris:
type: array
items:
type: string
responseTypes:
type: array
items:
type: string
enum:
- code id_token
- code
- id_token
- none
secretMetadata:
type: object
properties:
annotations:
type: object
x-kubernetes-preserve-unknown-fields: true
labels:
type: object
x-kubernetes-preserve-unknown-fields: true
secretRefreshPod:
type: object
x-kubernetes-preserve-unknown-fields: true
x-kubernetes-embedded-resource: true
tokenEndpointAuthMethod:
type: string
enum:
- client_secret_basic
- client_secret_jwt
- client_secret_post
- private_key_jwt
- none
uri:
type: string
status:
type: object
properties:
conditions:
type: array
items:
type: object
x-kubernetes-preserve-unknown-fields: true
x-kubernetes-embedded-resource: true
instance:
type: string
subresources:
status: {}
additionalPrinterColumns:
- name: Instance
type: string
description: Passmower deployment which manages this client
jsonPath: .status.instance
- name: Uris
type: string
description: Redirect URLs configured for this client
jsonPath: .spec.redirectUris
- name: Allowed groups
type: string
description: Groups allowed to this client
jsonPath: .spec.allowedGroups
conversion:
strategy: None
---
# Source: passmower/templates/crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: oidcmiddlewareclients.codemowers.cloud
spec:
group: codemowers.cloud
names:
plural: oidcmiddlewareclients
singular: oidcmiddlewareclient
kind: OIDCMiddlewareClient
listKind: OIDCMiddlewareClientList
scope: Namespaced
versions:
- name: v1beta1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required:
- spec
properties:
spec:
type: object
properties:
allowedGroups:
type: array
items:
type: string
displayName:
type: string
headerMapping:
type: object
default:
email: Remote-Email
groups: Remote-Groups
name: Remote-Name
user: Remote-User
properties:
email:
type: string
groups:
type: string
name:
type: string
user:
type: string
uri:
type: string
status:
type: object
properties:
conditions:
type: array
items:
type: object
x-kubernetes-preserve-unknown-fields: true
x-kubernetes-embedded-resource: true
instance:
type: string
subresources:
status: {}
additionalPrinterColumns:
- name: Instance
type: string
description: Passmower deployment which manages this client
jsonPath: .status.instance
- name: Uri
type: string
description: URL configured for this client
jsonPath: .spec.uri
- name: Allowed groups
type: string
description: Groups allowed to this client
jsonPath: .spec.allowedGroups
conversion:
strategy: None
---
# Source: passmower/templates/serviceaccount.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: passmower
labels:
helm.sh/chart: passmower-0.1.0
app.kubernetes.io/name: passmower
app.kubernetes.io/instance: passmower
app.kubernetes.io/version: "0.1.0"
app.kubernetes.io/managed-by: Helm
rules:
- verbs:
- get
- list
- watch
- create
- update
- patch
apiGroups:
- codemowers.cloud
resources:
- oidcusers
- oidcusers/status
- oidcclients
- oidcclients/status
- oidcmiddlewareclients
- oidcmiddlewareclients/status
- verbs:
- get
- create
- patch
- delete
apiGroups:
- ''
resources:
- secrets
- verbs:
- create
apiGroups:
- ''
resources:
- pods
- verbs:
- get
- create
- update
- patch
- delete
apiGroups:
- traefik.io
resources:
- middlewares
---
# Source: passmower/templates/serviceaccount.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: passmower
labels:
helm.sh/chart: passmower-0.1.0
app.kubernetes.io/name: passmower
app.kubernetes.io/instance: passmower
app.kubernetes.io/version: "0.1.0"
app.kubernetes.io/managed-by: Helm
subjects:
- kind: ServiceAccount
name: passmower
namespace: passmower
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: passmower
---
# Source: passmower/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: passmower
labels:
helm.sh/chart: passmower-0.1.0
app.kubernetes.io/name: passmower
app.kubernetes.io/instance: passmower
app.kubernetes.io/version: "0.1.0"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: passmower
app.kubernetes.io/instance: passmower
---
# Source: passmower/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: passmower
labels:
helm.sh/chart: passmower-0.1.0
app.kubernetes.io/name: passmower
app.kubernetes.io/instance: passmower
app.kubernetes.io/version: "0.1.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 3
selector:
matchLabels:
app.kubernetes.io/name: passmower
app.kubernetes.io/instance: passmower
template:
metadata:
labels:
app.kubernetes.io/name: passmower
app.kubernetes.io/instance: passmower
spec:
serviceAccountName: passmower
securityContext:
{}
containers:
- name: passmower
securityContext:
{}
image: "passmower/passmower:develop"
imagePullPolicy: Always
ports:
- containerPort: 3000
name: http
- containerPort: 9090
name: metrics
env:
- name: ISSUER_URL
value: https://auth.k-space.ee/
- name: DEPLOYMENT_NAME
value: passmower
- name: GROUP_PREFIX
value: "k-space"
- name: ADMIN_GROUP
value: "k-space:onboarding"
- name: REQUIRED_GROUP
value: ""
- name: GITHUB_ORGANIZATION
value: "codemowers"
- name: ENROLL_USERS
value: "false"
- name: NAMESPACE_SELECTOR
value: "*"
- name: PREFERRED_EMAIL_DOMAIN
value: "k-space.ee"
- name: REQUIRE_CUSTOM_USERNAME
value: "true"
- name: NORMALIZE_EMAIL_ADDRESSES
value: "true"
- name: REDIS_URI
valueFrom:
secretKeyRef:
name: dragonfly-auth
key: REDIS_URI
envFrom:
- secretRef:
name: oidc-keys
- secretRef:
name: email-credentials
- secretRef:
name: github-client
- secretRef:
name: slack-client
readinessProbe:
httpGet:
path: /.well-known/openid-configuration
port: 3000
httpHeaders:
- name: x-forwarded-for # suppress oidc-provider warning
value: https://auth.k-space.ee/
- name: x-forwarded-proto # suppress oidc-provider warning
value: https
livenessProbe:
httpGet:
path: /health
port: 9090
httpHeaders:
- name: x-forwarded-for # suppress oidc-provider warning
value: https://auth.k-space.ee/
- name: x-forwarded-proto # suppress oidc-provider warning
value: https
initialDelaySeconds: 30
periodSeconds: 3
volumeMounts:
- mountPath: /app/tos
name: tos
- mountPath: /app/approval
name: approval
- mountPath: /app/src/views/custom/emails
name: email-templates
resources:
{}
volumes:
- name: tos
configMap:
name: passmower-tos
- name: approval
configMap:
name: passmower-approval
- name: email-templates
configMap:
name: passmower-email-templates
---
# Source: passmower/templates/deployment.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: passmower-key-manager
labels:
helm.sh/chart: passmower-0.1.0
app.kubernetes.io/name: passmower
app.kubernetes.io/instance: passmower
app.kubernetes.io/version: "0.1.0"
app.kubernetes.io/managed-by: Helm
spec:
template:
spec:
serviceAccountName: passmower
containers:
- name: oidc-key-manager
image: "passmower/passmower:develop"
command: [ '/app/node_modules/.bin/key-manager', 'initialize', '-c', 'cluster' ]
restartPolicy: Never
---
# Source: passmower/templates/ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: passmower
labels:
helm.sh/chart: passmower-0.1.0
app.kubernetes.io/name: passmower
app.kubernetes.io/instance: passmower
app.kubernetes.io/version: "0.1.0"
app.kubernetes.io/managed-by: Helm
annotations:
external-dns.alpha.kubernetes.io/hostname: auth.k-space.ee,auth2.k-space.ee
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
tls:
- hosts:
- "*.k-space.ee"
secretName:
rules:
- host: "auth.k-space.ee"
http:
paths:
- path: "/"
pathType: Prefix
backend:
service:
name: passmower
port:
number: 80
---
# Source: passmower/templates/texts.yaml
---

View File

@@ -18,20 +18,11 @@ metadata:
name: redis
namespace: passmower
spec:
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.21.2@sha256:4a253dffa1f0416c454a758dcbaa86c29c4065aba44b5240fa25e55c5d89baf3"
authentication:
passwordFromSecret:
key: password
name: dragonfly-auth
replicas: 3
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app: redis
app.kubernetes.io/part-of: dragonfly
resources:
limits:
cpu: 600m

View File

@@ -140,47 +140,47 @@ metadata:
kubernetes.io/ingress.class: traefik
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.entrypoints: websecure
# traefik.ingress.kubernetes.io/router.middlewares: passmower-proxmox@kubernetescrd,passmower-proxmox-redirect@kubernetescrd
traefik.ingress.kubernetes.io/router.middlewares: passmower-proxmox@kubernetescrd,passmower-proxmox-redirect@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
rules:
- host: proxmox.k-space.ee
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: whoami
port:
number: 80
- host: pve.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: pve1
port:
number: 8006
- pathType: Prefix
path: "/"
backend:
service:
name: pve8
port:
number: 8006
- pathType: Prefix
path: "/"
backend:
service:
name: pve9
port:
number: 8006
- host: proxmox.k-space.ee
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: whoami
port:
number: 80
- host: pve.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: pve1
port:
number: 8006
- pathType: Prefix
path: "/"
backend:
service:
name: pve8
port:
number: 8006
- pathType: Prefix
path: "/"
backend:
service:
name: pve9
port:
number: 8006
tls:
- hosts:
- "*.k-space.ee"
- hosts:
- "*.k-space.ee"
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
@@ -200,13 +200,13 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`proxmox.k-space.ee`)
kind: Rule
middlewares:
- name: proxmox-redirect
services: # Dirty workaround, service can't be empty
- kind: TraefikService
name: api@internal
- match: Host(`proxmox.k-space.ee`)
kind: Rule
middlewares:
- name: proxmox-redirect
services: # Dirty workaround, service can't be empty
- kind: TraefikService
name: api@internal
---
apiVersion: networking.k8s.io/v1
kind: Ingress
@@ -220,33 +220,33 @@ metadata:
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
rules:
- host: pve-internal.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: pve1
port:
number: 8006
- pathType: Prefix
path: "/"
backend:
service:
name: pve8
port:
number: 8006
- pathType: Prefix
path: "/"
backend:
service:
name: pve9
port:
number: 8006
- host: pve-internal.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: pve1
port:
number: 8006
- pathType: Prefix
path: "/"
backend:
service:
name: pve8
port:
number: 8006
- pathType: Prefix
path: "/"
backend:
service:
name: pve9
port:
number: 8006
tls:
- hosts:
- "*.k-space.ee"
- hosts:
- "*.k-space.ee"
---
apiVersion: traefik.io/v1alpha1
kind: Middleware

View File

@@ -38,16 +38,16 @@ metadata:
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: prusa.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: prusa
port:
name: http
- host: prusa.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: prusa
port:
name: http
tls:
- hosts:
- "*.k-space.ee"
- hosts:
- "*.k-space.ee"

92
passmower/users.yaml Normal file
View File

@@ -0,0 +1,92 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCUser
metadata:
name: eaas
spec:
type: person
email: erkiaas@gmail.com
companyEmail: eaas@k-space.ee
groups:
- prefix: k-space
name: floor
- prefix: k-space
name: proxmox:admins
- prefix: k-space
name: kubernetes:admins
- prefix: k-space
name: onboarding
- prefix: k-space
name: youtube
- prefix: k-space
name: gitlab
- prefix: k-space
name: legalmember
- prefix: k-space
name: matomo
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCUser
metadata:
name: lauri
spec:
type: person
email: lauri.vosandi@gmail.com
companyEmail: lauri@k-space.ee
groups:
- prefix: k-space
name: board
- prefix: k-space
name: info
- prefix: k-space
name: workshop
- prefix: k-space
name: floor
- prefix: k-space
name: kubernetes:admins
- prefix: k-space
name: proxmox:admins
- prefix: k-space
name: youtube
- prefix: k-space
name: facebook
- prefix: k-space
name: instagram
- prefix: k-space
name: gitlab
- prefix: k-space
name: legalmember
- prefix: k-space
name: onboarding
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCUser
metadata:
name: rasmus
spec:
type: person
email: github@c7.ee
companyEmail: rasmus@k-space.ee
groups:
- prefix: k-space
name: info
- prefix: k-space
name: onboarding
- prefix: k-space
name: workshop
- prefix: k-space
name: floor
- prefix: k-space
name: proxmox:admins
- prefix: k-space
name: kubernetes:developers
- prefix: k-space
name: kubernetes:admins
- prefix: k-space
name: instagram
- prefix: k-space
name: facebook
- prefix: k-space
name: youtube
- prefix: k-space
name: gitlab

View File

@@ -1,4 +1,3 @@
---
nameOverride: ""
fullnameOverride: ""
@@ -6,7 +5,7 @@ passmower:
# Hostname on which Passmower will be deployed to. Will be used as ingress host.
host: "auth.k-space.ee"
# Local groups will be created with given prefix.
group_prefix: 'k-space'
group_prefix: 'k-space'
# Local or remote group which members will automatically become admins.
admin_group: 'k-space:onboarding'
# If set, require all users to be member of the given local or remote group.
@@ -15,12 +14,10 @@ passmower:
github_organization: "codemowers"
# Allow enrolling new users automatically. Actual access will be based on required_group parameter. Disable to only manually provision users.
enroll_users: false
# Disable making changes to users on their profile or via admin panel - use for enforcing GitOps practices via OIDCUser spec.
disable_frontend_edit: true
# Comma-separated, wildcard enabled namespace selector to select, in which namespaces Passmower looks for client CRDs.
namespace_selector: "*"
# Domain which will be preferred for determining primary emails.
preferred_email_domain: 'k-space.ee'
preferred_email_domain: 'k-space.ee'
# Require users to set a custom username instead of system generated one. Will be used as OIDCUser CRD name and OIDC username claim.
require_custom_username: true
# Normalize incoming email addresses by removing aliases (e.g. username+alias@gmail.com) etc.
@@ -42,10 +39,7 @@ passmower:
terms_of_service:
configMapRef:
name: passmower-tos
disable_frontend_edit:
content: "Edit users via [the members repo](https://git.k-space.ee/k-space/members). The repository is automatically synced to cluster via [ArgoCD](https://argocd.k-space.ee/applications/argocd/members?view=tree&resource=)"
redis:
# Requires the Codemowers redis-operator to be installed: https://github.com/codemowers/operatorlib/tree/main/samples/redis-operator
redisClaim:
@@ -75,23 +69,45 @@ ingress:
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
external-dns.alpha.kubernetes.io/hostname: auth.k-space.ee,auth2.k-space.ee
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
tls:
- hosts:
- "*.k-space.ee"
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
# image:
# repository: docker.com/passmower/passmower
# pullPolicy: IfNotPresent
# # Overrides the image tag whose default is the chart appVersion.
# tag: "develop"
image:
repository: passmower/passmower
pullPolicy: Always
# Overrides the image tag whose default is the chart appVersion.
tag: "develop"
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
replicaCount: 3
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}

View File

@@ -38,16 +38,16 @@ metadata:
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: voron.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: voron
port:
name: http
- host: voron.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: voron
port:
name: http
tls:
- hosts:
- "*.k-space.ee"
- hosts:
- "*.k-space.ee"

View File

@@ -1,12 +1,9 @@
# Postgres clusters
This is for shared Postgres clusters managed by the Codemowers' sample: [postgres-database-operator](https://github.com/codemowers/operatorlib/tree/main/samples/postgres-database-operator), which is deployed via [ArgoCD](https://argocd.k-space.ee/applications/argocd/postgres-database-operator)
This is namespace for Postgres clusters managed by Codemowers' sample
[postgres-database-operator](https://github.com/codemowers/operatorlib/tree/main/samples/postgres-database-operator)
which is deployed via [ArgoCD](https://argocd.k-space.ee/applications/argocd/postgres-database-operator)
All instances run in a dedicated namespace. Clusters managed by operator-bundle. For more details check https://cloudnative-pg.io/.
Admin access available via [pgweb.k-space.ee](https://pgweb.k-space.ee/).
Deploying manually without ArgoCD:
```
kubectl create namespace postgres-clusters
kubectl apply -n postgres-clusters -f application.yaml

View File

@@ -1,6 +0,0 @@
clusters:
- url: https://pve1.proxmox.infra.k-space.ee:8006/api2/json
insecure: false
token_id: "ks-kubernetes-csi@pve!cs"
token_secret: "<token goes here>"
region: pve-cluster

View File

@@ -1,554 +0,0 @@
---
# Source: proxmox-csi-plugin/templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: csi-proxmox
labels:
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/audit: baseline
pod-security.kubernetes.io/warn: baseline
---
# Source: proxmox-csi-plugin/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: proxmox-csi-plugin-controller
namespace: csi-proxmox
labels:
helm.sh/chart: proxmox-csi-plugin-0.2.8
app.kubernetes.io/name: proxmox-csi-plugin
app.kubernetes.io/instance: proxmox-csi-plugin
app.kubernetes.io/version: "v0.7.0"
app.kubernetes.io/managed-by: Helm
---
# Source: proxmox-csi-plugin/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: proxmox-csi-plugin-node
namespace: csi-proxmox
labels:
helm.sh/chart: proxmox-csi-plugin-0.2.8
app.kubernetes.io/name: proxmox-csi-plugin
app.kubernetes.io/instance: proxmox-csi-plugin
app.kubernetes.io/version: "v0.7.0"
app.kubernetes.io/managed-by: Helm
---
# Source: proxmox-csi-plugin/templates/storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: proxmox-data-xfs
provisioner: csi.proxmox.sinextra.dev
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
parameters:
csi.storage.k8s.io/fstype: xfs
storage: data
---
# Source: proxmox-csi-plugin/templates/storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: proxmox-data
provisioner: csi.proxmox.sinextra.dev
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
parameters:
csi.storage.k8s.io/fstype: ext4
ssd: "true"
storage: data
---
# Source: proxmox-csi-plugin/templates/controller-clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: proxmox-csi-plugin-controller
namespace: csi-proxmox
labels:
helm.sh/chart: proxmox-csi-plugin-0.2.8
app.kubernetes.io/name: proxmox-csi-plugin
app.kubernetes.io/instance: proxmox-csi-plugin
app.kubernetes.io/version: "v0.7.0"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "patch", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get","list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
---
# Source: proxmox-csi-plugin/templates/node-clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: proxmox-csi-plugin-node
namespace: csi-proxmox
labels:
helm.sh/chart: proxmox-csi-plugin-0.2.8
app.kubernetes.io/name: proxmox-csi-plugin
app.kubernetes.io/instance: proxmox-csi-plugin
app.kubernetes.io/version: "v0.7.0"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
# Source: proxmox-csi-plugin/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: proxmox-csi-plugin-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: proxmox-csi-plugin-controller
subjects:
- kind: ServiceAccount
name: proxmox-csi-plugin-controller
namespace: csi-proxmox
---
# Source: proxmox-csi-plugin/templates/node-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: proxmox-csi-plugin-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: proxmox-csi-plugin-node
subjects:
- kind: ServiceAccount
name: proxmox-csi-plugin-node
namespace: csi-proxmox
---
# Source: proxmox-csi-plugin/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: proxmox-csi-plugin-controller
namespace: csi-proxmox
labels:
helm.sh/chart: proxmox-csi-plugin-0.2.8
app.kubernetes.io/name: proxmox-csi-plugin
app.kubernetes.io/instance: proxmox-csi-plugin
app.kubernetes.io/version: "v0.7.0"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: ["storage.k8s.io"]
resources: ["csistoragecapacities"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get"]
- apiGroups: ["apps"]
resources: ["replicasets"]
verbs: ["get"]
---
# Source: proxmox-csi-plugin/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: proxmox-csi-plugin-controller
namespace: csi-proxmox
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: proxmox-csi-plugin-controller
subjects:
- kind: ServiceAccount
name: proxmox-csi-plugin-controller
namespace: csi-proxmox
---
# Source: proxmox-csi-plugin/templates/node-deployment.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: proxmox-csi-plugin-node
namespace: csi-proxmox
labels:
helm.sh/chart: proxmox-csi-plugin-0.2.8
app.kubernetes.io/name: proxmox-csi-plugin
app.kubernetes.io/instance: proxmox-csi-plugin
app.kubernetes.io/version: "v0.7.0"
app.kubernetes.io/managed-by: Helm
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: proxmox-csi-plugin
app.kubernetes.io/instance: proxmox-csi-plugin
app.kubernetes.io/component: node
template:
metadata:
labels:
app.kubernetes.io/name: proxmox-csi-plugin
app.kubernetes.io/instance: proxmox-csi-plugin
app.kubernetes.io/component: node
spec:
priorityClassName: system-node-critical
enableServiceLinks: false
serviceAccountName: proxmox-csi-plugin-node
securityContext:
runAsUser: 0
runAsGroup: 0
containers:
- name: proxmox-csi-plugin-node
securityContext:
privileged: true
capabilities:
drop:
- ALL
add:
- SYS_ADMIN
- CHOWN
- DAC_OVERRIDE
seccompProfile:
type: RuntimeDefault
image: "ghcr.io/sergelogvinov/proxmox-csi-node:edge"
imagePullPolicy: Always
args:
- "-v=5"
- "--csi-address=unix:///csi/csi.sock"
- "--node-id=$(NODE_NAME)"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
resources:
{}
volumeMounts:
- name: socket
mountPath: /csi
- name: kubelet
mountPath: /var/lib/kubelet
mountPropagation: Bidirectional
- name: dev
mountPath: /dev
- name: sys
mountPath: /sys
- name: csi-node-driver-registrar
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
seccompProfile:
type: RuntimeDefault
image: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.9.4"
imagePullPolicy: IfNotPresent
args:
- "-v=5"
- "--csi-address=unix:///csi/csi.sock"
- "--kubelet-registration-path=/var/lib/kubelet/plugins/csi.proxmox.sinextra.dev/csi.sock"
volumeMounts:
- name: socket
mountPath: /csi
- name: registration
mountPath: /registration
resources:
requests:
cpu: 10m
memory: 16Mi
- name: liveness-probe
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
seccompProfile:
type: RuntimeDefault
image: "registry.k8s.io/sig-storage/livenessprobe:v2.11.0"
imagePullPolicy: IfNotPresent
args:
- "-v=5"
- "--csi-address=unix:///csi/csi.sock"
volumeMounts:
- name: socket
mountPath: /csi
resources:
requests:
cpu: 10m
memory: 16Mi
volumes:
- name: socket
hostPath:
path: /var/lib/kubelet/plugins/csi.proxmox.sinextra.dev/
type: DirectoryOrCreate
- name: registration
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: kubelet
hostPath:
path: /var/lib/kubelet
type: Directory
- name: dev
hostPath:
path: /dev
type: Directory
- name: sys
hostPath:
path: /sys
type: Directory
tolerations:
- effect: NoSchedule
key: node.kubernetes.io/unschedulable
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/disk-pressure
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
---
# Source: proxmox-csi-plugin/templates/controller-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: proxmox-csi-plugin-controller
namespace: csi-proxmox
labels:
helm.sh/chart: proxmox-csi-plugin-0.2.8
app.kubernetes.io/name: proxmox-csi-plugin
app.kubernetes.io/instance: proxmox-csi-plugin
app.kubernetes.io/version: "v0.7.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
app.kubernetes.io/name: proxmox-csi-plugin
app.kubernetes.io/instance: proxmox-csi-plugin
app.kubernetes.io/component: controller
template:
metadata:
annotations:
checksum/config: c69436cb1e16c36ff708b1003d3ca4c6ee6484d2524e2ba7d9b68f473acaa1ca
labels:
app.kubernetes.io/name: proxmox-csi-plugin
app.kubernetes.io/instance: proxmox-csi-plugin
app.kubernetes.io/component: controller
spec:
priorityClassName: system-cluster-critical
enableServiceLinks: false
serviceAccountName: proxmox-csi-plugin-controller
securityContext:
fsGroup: 65532
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
hostAliases:
[]
initContainers:
[]
containers:
- name: proxmox-csi-plugin-controller
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
seccompProfile:
type: RuntimeDefault
image: "ghcr.io/sergelogvinov/proxmox-csi-controller:edge"
imagePullPolicy: Always
args:
- "-v=5"
- "--csi-address=unix:///csi/csi.sock"
- "--cloud-config=/etc/proxmox/config.yaml"
# - "--metrics-address=:8080"
# ports:
# - name: metrics
# containerPort: 8080
# protocol: TCP
resources:
requests:
cpu: 10m
memory: 16Mi
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: cloud-config
mountPath: /etc/proxmox/
- name: csi-attacher
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
seccompProfile:
type: RuntimeDefault
image: "registry.k8s.io/sig-storage/csi-attacher:v4.4.4"
imagePullPolicy: IfNotPresent
args:
- "-v=5"
- "--csi-address=unix:///csi/csi.sock"
- "--timeout=3m"
- "--leader-election"
- "--default-fstype=ext4"
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
requests:
cpu: 10m
memory: 16Mi
- name: csi-provisioner
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
seccompProfile:
type: RuntimeDefault
image: "registry.k8s.io/sig-storage/csi-provisioner:v3.6.4"
imagePullPolicy: IfNotPresent
args:
- "-v=5"
- "--csi-address=unix:///csi/csi.sock"
- "--timeout=3m"
- "--leader-election"
- "--default-fstype=ext4"
- "--feature-gates=Topology=True"
- "--enable-capacity"
- "--capacity-ownerref-level=2"
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
requests:
cpu: 10m
memory: 16Mi
- name: csi-resizer
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
seccompProfile:
type: RuntimeDefault
image: "registry.k8s.io/sig-storage/csi-resizer:v1.9.4"
imagePullPolicy: IfNotPresent
args:
- "-v=5"
- "--csi-address=unix:///csi/csi.sock"
- "--timeout=3m"
- "--handle-volume-inuse-error=false"
- "--leader-election"
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
requests:
cpu: 10m
memory: 16Mi
- name: liveness-probe
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
seccompProfile:
type: RuntimeDefault
image: "registry.k8s.io/sig-storage/livenessprobe:v2.11.0"
imagePullPolicy: IfNotPresent
args:
- "-v=5"
- "--csi-address=unix:///csi/csi.sock"
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
requests:
cpu: 10m
memory: 16Mi
volumes:
- name: socket-dir
emptyDir: {}
- name: cloud-config
secret:
secretName: proxmox-csi-plugin
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app.kubernetes.io/name: proxmox-csi-plugin
app.kubernetes.io/instance: proxmox-csi-plugin
app.kubernetes.io/component: controller
---
# Source: proxmox-csi-plugin/templates/csidriver.yaml
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: csi.proxmox.sinextra.dev
spec:
attachRequired: true
podInfoOnMount: true
storageCapacity: true
volumeLifecycleModes:
- Persistent

View File

@@ -1,24 +0,0 @@
# proxmox-csi
1. create role in pve if it doesn't exist
2. create user and assign permissions, preferrably at resource pool level
```
pveum user add ks-kubernetes-csi@pve
pveum aclmod /pool/kspace_pool -user ks-kubernetes-csi@pve -role CSI
pveum user token add ks-kubernetes-csi@pve cs -privsep 0
```
save the token!
3. apply `proxmox-csi-plugin.yml` and `storage-class.yaml`, delete proxmox-csi default storage classes from kube.
4. add the token from pve to `config.yaml` and create the secret: `kubectl -n csi-proxmox create secret generic proxmox-csi-plugin --from-file=config.yaml`
5. label the nodes according to allocation:
```
kubectl --kubeconfig ~/.kube/k-space label nodes worker1.kube.k-space.ee topology.kubernetes.io/region=pve-cluster topology.kubernetes.io/zone=pve1 --overwrite
kubectl --kubeconfig ~/.kube/k-space label nodes worker2.kube.k-space.ee topology.kubernetes.io/region=pve-cluster topology.kubernetes.io/zone=pve2 --overwrite
kubectl --kubeconfig ~/.kube/k-space label nodes worker3.kube.k-space.ee topology.kubernetes.io/region=pve-cluster topology.kubernetes.io/zone=pve8 --overwrite
kubectl --kubeconfig ~/.kube/k-space label nodes worker4.kube.k-space.ee topology.kubernetes.io/region=pve-cluster topology.kubernetes.io/zone=pve9 --overwrite
kubectl --kubeconfig ~/.kube/k-space label nodes master1.kube.k-space.ee topology.kubernetes.io/region=pve-cluster topology.kubernetes.io/zone=pve1 --overwrite
kubectl --kubeconfig ~/.kube/k-space label nodes master2.kube.k-space.ee topology.kubernetes.io/region=pve-cluster topology.kubernetes.io/zone=pve2 --overwrite
kubectl --kubeconfig ~/.kube/k-space label nodes master3.kube.k-space.ee topology.kubernetes.io/region=pve-cluster topology.kubernetes.io/zone=pve8 --overwrite
```

View File

@@ -1,27 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: proxmox
parameters:
csi.storage.k8s.io/fstype: xfs
storage: ks-pvs
cache: none
ssd: "false"
provisioner: csi.proxmox.sinextra.dev
allowVolumeExpansion: true
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: proxmox-nas
parameters:
csi.storage.k8s.io/fstype: xfs
storage: ks-pvs-nas
cache: none
ssd: "false"
provisioner: csi.proxmox.sinextra.dev
allowVolumeExpansion: true
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer

View File

@@ -1,94 +1,28 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: postgres
annotations:
kubernetes.io/description: |
Storage class for Postgres, Postgis and similar applications that
implement high availability in application layer.
This storage class uses XFS, has no block level redundancy and
has block device level caching disabled.
provisioner: csi.proxmox.sinextra.dev
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
csi.storage.k8s.io/fstype: xfs
storage: ks-pvs
cache: none
ssd: "true"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: mysql
annotations:
kubernetes.io/description: |
Storage class for MySQL, MariaDB and similar applications that
implement high availability in application layer.
This storage class uses XFS, has no block level redundancy and
has block device level caching disabled.
provisioner: csi.proxmox.sinextra.dev
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
csi.storage.k8s.io/fstype: xfs
storage: ks-pvs
cache: none
ssd: "true"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: mongo
annotations:
kubernetes.io/description: |
Storage class for Mongo and similar applications that
implement high availability in application layer.
This storage class uses XFS, has no block level redundancy and
has block device level caching disabled.
provisioner: csi.proxmox.sinextra.dev
provisioner: rawfile.csi.openebs.io
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
csi.storage.k8s.io/fstype: xfs
storage: ks-pvs
cache: none
ssd: "true"
fsType: "xfs"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: prometheus
annotations:
kubernetes.io/description: |
Storage class for Prometheus and similar applications that
implement high availability in application layer.
This storage class uses XFS, has no block level redundancy and
has block device level caching disabled.
provisioner: csi.proxmox.sinextra.dev
provisioner: rancher.io/local-path
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
csi.storage.k8s.io/fstype: xfs
storage: ks-pvs
cache: none
ssd: "true"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: woodpecker
annotations:
kubernetes.io/description: |
Storage class for Drone, Woodpecker and similar application
pipeline runs where Git repos are checked out to.
This storage class uses XFS, has no block level redundancy and it's
deleted as soon as the pod exits.
provisioner: driver.longhorn.io
reclaimPolicy: Delete
volumeBindingMode: Immediate
@@ -102,11 +36,6 @@ apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gitea
annotations:
kubernetes.io/description: |
Storage class for Gitea and similar applications needing
block device level replication with 3 replicas using XFS filesystem and
best effort data locality.
provisioner: driver.longhorn.io
reclaimPolicy: Retain
volumeBindingMode: Immediate
@@ -115,57 +44,3 @@ parameters:
dataLocality: best-effort
numberOfReplicas: "3"
fsType: "xfs"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rabbitmq
annotations:
kubernetes.io/description: |
Storage class for RabbitMQ and similar applications
deployed in highly available fashion utilizing application level
replication needing persistent volume.
This storage class uses XFS, has no block level redundancy and
has block device level caching disabled.
provisioner: csi.proxmox.sinextra.dev
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
csi.storage.k8s.io/fstype: xfs
storage: ks-pvs
cache: none
ssd: "true"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: redis
annotations:
kubernetes.io/description: |
Storage class for Redis, KeyDB, DragonflyDB and similar applications
deployed in highly available fashion utilizing application level
replication needing persistent volume for storing the snapshots.
This storage class uses XFS, has no block level redundancy and
has block device level caching disabled.
provisioner: csi.proxmox.sinextra.dev
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
csi.storage.k8s.io/fstype: xfs
storage: ks-pvs
cache: none
ssd: "true"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: "null"
annotations:
kubernetes.io/description: |
Storage class for applications insisting on having a PV, but actually do
not and for data that can be discarded immediately
provisioner: rancher.io/local-path
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer

View File

@@ -1 +0,0 @@
tigera-operator.yaml

View File

@@ -1,20 +1,9 @@
# Calico
# Calico CNI
Calico implements
[container network interface plugin](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/)
which enables inter-pod network with BGP, also advertising Service LB IPs.
# For user
Nothing specific to point out, this is standard Kubernetes feature
# For administrator
Tigera operator was used to deploy Calico:
Calico implements the inter-pod overlay network
```
curl https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/tigera-operator.yaml -O
curl https://raw.githubusercontent.com/projectcalico/calico/v3.28.0/manifests/tigera-operator.yaml -O
kubectl apply --server-side --force-conflicts -f tigera-operator.yaml
kubectl apply -f application.yaml
kubectl -n calico-system create secret generic bgp-secrets --from-literal=password=...
```

View File

@@ -12,8 +12,8 @@ spec:
- blockSize: 26
cidr: 10.244.0.0/16
encapsulation: VXLANCrossSubnet
natOutgoing: Disabled
nodeSelector: all()
natOutgoing: Enabled
nodeSelector: all()
---
# This section configures the Calico API server.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
@@ -22,60 +22,3 @@ kind: APIServer
metadata:
name: default
spec: {}
---
apiVersion: crd.projectcalico.org/v1
kind: BGPPeer
metadata:
name: mikrotik-router
spec:
peerIP: 172.21.255.254
asNumber: 64567
keepOriginalNextHop: true
password:
secretKeyRef:
name: bgp-secrets
key: password
---
apiVersion: crd.projectcalico.org/v1
kind: BGPConfiguration
metadata:
name: default
spec:
serviceLoadBalancerIPs:
- cidr: 172.21.51.4/32
- cidr: 172.21.53.1/32
- cidr: 172.21.53.2/32
- cidr: 172.21.53.3/32
- cidr: 193.40.103.36/32
- cidr: 193.40.103.37/32
- cidr: 193.40.103.38/32
- cidr: 193.40.103.39/32
- cidr: 62.65.250.36/32
- cidr: 62.65.250.37/32
- cidr: 62.65.250.2/32
- cidr: 193.40.103.25/32
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: secret-access
namespace: calico-system
rules:
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["bgp-secrets"]
verbs: ["watch", "list", "get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: secret-access
namespace: calico-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: secret-access
subjects:
- kind: ServiceAccount
name: calico-node
namespace: calico-system

View File

@@ -45,7 +45,6 @@ spec:
usages:
- digital signature
- key encipherment
revisionHistoryLimit: 1
---
apiVersion: traefik.io/v1alpha1
kind: Middleware

View File

@@ -20,7 +20,7 @@ providers:
enabled: true
deployment:
replicas: 4
replicas: 2
annotations:
keel.sh/policy: minor

View File

@@ -14,27 +14,12 @@ spec:
labels:
app: whoami
spec:
securityContext:
runAsUser: 65534
runAsGroup: 65534
containers:
- name: whoami
image: traefik/whoami
env:
- name: WHOAMI_PORT_NUMBER
value: "8080"
resources:
limits:
cpu: 10m
memory: 15Mi
requests:
cpu: 1m
memory: 2Mi
securityContext:
readOnlyRootFilesystem: true
ports:
- containerPort: 8080
protocol: TCP
- image: traefik/whoami
name: whoami
ports:
- containerPort: 80
protocol: TCP
---
apiVersion: v1
kind: Service
@@ -49,7 +34,7 @@ spec:
ports:
- name: whoami-http
port: 80
targetPort: 8080
targetPort: 80
---
apiVersion: networking.k8s.io/v1
kind: Ingress

View File

@@ -10,4 +10,3 @@ spec:
kind: ClusterIssuer
name: default
secretName: wildduck-tls
revisionHistoryLimit: 1

View File

@@ -19,8 +19,8 @@ spec:
image: mirror.gcr.io/clamav/clamav:1.1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3310
name: api
- containerPort: 3310
name: api
volumeMounts:
- mountPath: /var/lib/clamav
name: avdata
@@ -41,5 +41,5 @@ spec:
selector:
app.kubernetes.io/name: clamav
ports:
- port: 3310
name: clamav
- port: 3310
name: clamav

Some files were not shown because too many files have changed in this diff Show More