Separate from Kubernetes repo

This commit is contained in:
Lauri Võsandi 2024-08-12 21:36:48 +03:00
commit 55f95641ca
14 changed files with 1080 additions and 0 deletions

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
*.keys

44
README.md Normal file
View File

@ -0,0 +1,44 @@
# Ansible playbooks
This repository contains Ansible playbooks used to configure parts
of the hackerspace infrastructure:
* Kubernetes dependency deployment and upgrading
* Authorized SSH keys
* Authoritative DNS server configuration for hackerspace domains
* Door controller software deployment
Most notably out of scope of this repository are:
* Kubernetes manifests of applications running on the Kubernetes cluster,
they can be found at https://git.k-space.ee/k-space/kube
* Mikrotik router and switch configurations backed up at
https://git.k-space.ee/k-space/rosdump
## Contents
Once you've checked out repo you can try pinging the hosts:
```
ansible all -m ping
```
Check OS version:
```
ansible all -m shell -a "lsb_release -d"
```
Update SSH authorized keys on the hosts:
```
ansible-playbook update-ssh-config.yaml
```
Update `mjpg-streamer` on the door controllers:
```
ansible doors -m shell \
-a "ctr image pull harbor.k-space.ee/k-space/mjpg-streamer:latest"
```
For more playbooks refer to `*.yaml` files in this repository

15
ansible.cfg Normal file
View File

@ -0,0 +1,15 @@
[defaults]
inventory = inventory.yaml
nocows = 1
pattern =
deprecation_warnings = False
fact_caching = jsonfile
fact_caching_connection = ~/.ansible/k-space-fact-cache
fact_caching_timeout = 7200
remote_user = root
[ssh_connection]
control_path = ~/.ssh/cm-%%r@%%h:%%p
ssh_args = -o ControlMaster=auto -o ControlPersist=8h -F ssh_config
pipelining = True

View File

@ -0,0 +1,82 @@
# Authoritative DNS server for
# k-space.ee, kspace.ee and k6.ee domains and several member domains
# Domain records mostly managed by external-dns running on the Kubernetes cluster
# Additionally cert-manager running on the Kubernetes cluster reads-writes DNS records
---
- name: Setup primary nameserver
hosts: ns1.k-space.ee
tasks:
- name: Make sure bind9 is installed
ansible.builtin.apt:
name: bind9
state: present
- name: Configure Bind
register: bind
copy:
dest: /etc/bind/named.conf
content: |
# This file is managed by Ansible
# https://git.k-space.ee/k-space/ansible/src/branch/main/authoritative-nameserver.yaml
# Do NOT modify manually
include "/etc/bind/named.conf.local";
include "/etc/bind/readwrite.key";
include "/etc/bind/readonly.key";
options {
directory "/var/cache/bind";
version "";
listen-on { any; };
listen-on-v6 { any; };
pid-file "/var/run/named/named.pid";
notify explicit; also-notify { 172.20.53.1; 172.20.53.2; 172.20.53.3; };
allow-recursion { none; };
recursion no;
check-names master ignore;
dnssec-validation no;
auth-nxdomain no;
};
# https://kb.isc.org/docs/aa-00723
acl allowed {
172.20.3.0/24;
172.20.4.0/24;
};
acl rejected { !allowed; any; };
zone "." {
type hint;
file "/var/lib/bind/db.root";
};
zone "k-space.ee" {
type master;
file "/var/lib/bind/db.k-space.ee";
allow-update { !rejected; key readwrite; };
allow-transfer { !rejected; key readonly; key readwrite; };
};
zone "k6.ee" {
type master;
file "/var/lib/bind/db.k6.ee";
allow-update { !rejected; key readwrite; };
allow-transfer { !rejected; key readonly; key readwrite; };
};
zone "kspace.ee" {
type master;
file "/var/lib/bind/db.kspace.ee";
allow-update { !rejected; key readwrite; };
allow-transfer { !rejected; key readonly; key readwrite; };
};
- name: Check Bind config
ansible.builtin.shell: "named-checkconf"
- name: Reload Bind config
service:
name: bind9
state: reloaded
when: bind.changed

67
door-controllers.yaml Normal file
View File

@ -0,0 +1,67 @@
# For more information regarding door management refer to
# https://wiki.k-space.ee/en/hosting/doors
# journalctl -u mjpg_streamer@video0.service -f
---
- name: Setup doors
hosts: doors
tasks:
- name: Make sure containerd is installed
ansible.builtin.apt:
name: containerd
state: present
- name: Copy systemd service for Doorboy controller
copy:
dest: /etc/systemd/system/godoor.service
content: |
[Unit]
Description=Doorboy service
Documentation=https://git.k-space.ee/k-space/godoor
After=network.target
[Service]
Environment=IMAGE=harbor.k-space.ee/k-space/godoor:latest
ExecStartPre=-ctr task kill --signal=9 %N
ExecStartPre=-ctr task rm %N
ExecStartPre=-ctr c rm %N
ExecStartPre=-ctr image pull $IMAGE
ExecStart=ctr run --rm --pid-file=/run/%N.pid --privileged --read-only --env-file=/etc/godoor --env=KDOORPI_API_ALLOWED=https://doorboy-proxy.k-space.ee/allowed --env=KDOORPI_API_LONGPOLL=https://doorboy-proxy.k-space.ee/longpoll --env=KDOORPI_API_SWIPE=https://doorboy-proxy.k-space.ee/swipe --env=KDOORPI_DOOR=%H --net-host --net-host --cwd /app $IMAGE %N /godoor
ExecStopPost=ctr task rm %N
ExecStopPost=ctr c rm %N
Restart=always
[Install]
WantedBy=multi-user.target
- name: Enable Doorboy controller
ansible.builtin.systemd:
state: restarted
daemon_reload: true
name: godoor.service
- name: Copy systemd service for mjpg-streamer
copy:
dest: /etc/systemd/system/mjpg_streamer@.service
content: |
[Unit]
Description=A server for streaming Motion-JPEG from a video capture device
Documentation=
After=network.target
ConditionPathExists=/dev/%I
[Service]
Environment=IMAGE=harbor.k-space.ee/k-space/mjpg-streamer:latest
StandardOutput=tty
Type=forking
ExecStartPre=-ctr task kill --signal=9 %p_%i
ExecStartPre=-ctr task rm %p_%i
ExecStartPre=-ctr c rm %p_%i
ExecStartPre=-ctr image pull $IMAGE
ExecStart=ctr run --tty -d --rm --pid-file=/run/%i.pid --privileged --read-only --net-host $IMAGE %p_%i /usr/local/bin/mjpg_streamer -i 'input_uvc.so -d /dev/%I -r 1280x720 -f 10' -o 'output_http.so -w /usr/share/mjpg_streamer/www'
ExecStopPost=ctr task rm %p_%i
ExecStopPost=ctr c rm %p_%i
PIDFile=/run/%i.pid
[Install]
WantedBy=multi-user.target
- name: Enable mjpg-streamer
ansible.builtin.systemd:
state: restarted
daemon_reload: true
name: mjpg_streamer@video0.service

117
inventory.yaml Normal file
View File

@ -0,0 +1,117 @@
---
all:
vars:
admins:
- lauri
- eaas
children:
# Documentation at
# https://wiki.k-space.ee/en/hosting/storage
nasgroup:
hosts:
nas.k-space.ee:
ansible_host: 172.23.0.7
offsite:
ansible_host: 78.28.64.17
ansible_port: 10648
admins:
- lauri
vars:
offsite_dataset: offsite/backup_zrepl
misc:
children:
nasgroup:
hosts:
ns1.k-space.ee:
ansible_host: 172.20.0.2
worker99.codemowers.cloud:
# Nvidia Tegra Jetson-AGX
ansible_host: 172.20.5.99
# Proxmox aka PVE (Proxmox Virtualization Environment) documented
# https://wiki.k-space.ee/hosting/proxmox
proxmox:
vars:
admins:
- lauri
- eaas
- rasmus
hosts:
pve1:
ansible_host: 172.21.20.1
pve2:
ansible_host: 172.21.20.2
pve8:
ansible_host: 172.21.20.8
pve9:
ansible_host: 172.21.20.9
# Kubernetes cluster setup documented at
# https://git.k-space.ee/k-space/kube
kubernetes:
children:
masters:
hosts:
master1.kube.k-space.ee:
ansible_host: 172.21.3.51
master2.kube.k-space.ee:
ansible_host: 172.21.3.52
master3.kube.k-space.ee:
ansible_host: 172.21.3.53
kubelets:
children:
# Monitoring nodes are hosted in privileged VLAN to scrape UPS-es
mon:
hosts:
mon1.kube.k-space.ee:
ansible_host: 172.21.3.61
mon2.kube.k-space.ee:
ansible_host: 172.21.3.62
mon3.kube.k-space.ee:
ansible_host: 172.21.3.63
# Longhorn nodes
storage:
hosts:
storage1.kube.k-space.ee:
ansible_host: 172.21.3.71
storage2.kube.k-space.ee:
ansible_host: 172.21.3.72
storage3.kube.k-space.ee:
ansible_host: 172.21.3.73
storage4.kube.k-space.ee:
ansible_host: 172.21.3.74
# Workers run bulk of the applications
workers:
hosts:
worker1.kube.k-space.ee:
ansible_host: 172.20.3.81
worker2.kube.k-space.ee:
ansible_host: 172.20.3.82
worker3.kube.k-space.ee:
ansible_host: 172.20.3.83
worker4.kube.k-space.ee:
ansible_host: 172.20.3.84
# For more information regarding door management refer to
# https://wiki.k-space.ee/en/hosting/doors
doors:
vars:
admins:
- arti
- lauri
- eaas
hosts:
grounddoor:
ansible_host: 100.102.3.1
frontdoor:
ansible_host: 100.102.3.2
backdoor:
ansible_host: 100.102.3.3
workshopdoor:
ansible_host: 100.102.3.4

27
known_hosts Normal file
View File

@ -0,0 +1,27 @@
# Use `ansible-playbook update-ssh-config.yml` to update this file
100.102.3.3 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBN4SifLddYAz8CasmFwX5TQbiM8atAYMFuDQRchclHM0sq9Pi8wRxSZK8SHON4Y7YFsIY+cXnQ2Wx4FpzKmfJYE= # backdoor
100.102.3.2 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE8/E7PDqTrTdU+MFurHkIPzTBTGcSJqXuv5n0Ugd/IlvOr2v+eYi3ma91pSBmF5Hjy9foWypCLZfH+vWMkV0gs= # frontdoor
100.102.3.1 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFcH8D2AhnESw3uu2f4EHBhT9rORQQJJ3TlbwN+kro5tRZsZk4p3MKabBiuCSZw2KWjfu0MY4yHSCrUUQrggJDM= # grounddoor
172.21.3.51 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMYy07yLlOiFvXzmVDIULS9VDCMz7T+qOq4M+x8Lo3KEKamI6ZD737mvimPTW6K1FRBzzq67Mq495UnoFKVnQWE= # master1.kube.k-space.ee
172.21.3.52 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKRFfYDaTH58FUw+9stBVsyCviaPCGEbe9Y1a9WKvj98S7m+qU03YvtfPkRfEH/3iXHDvngEDVpJrTWW4y6e6MI= # master2.kube.k-space.ee
172.21.3.53 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIqIepuMkMo/KO3bb4X6lgb6YViAifPmgHXVrbtHwbOZLll5Qqr4pXdLDxkuZsmiE7iZBw2gSzZLcNMGdDEnWrY= # master3.kube.k-space.ee
172.21.3.61 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCJ9XgDz2NEzvjw/nDmRIKUJAmNqzsaXMJn4WFiWfTz1x2HrRcXgY3UXKWUxUvJO1jJ7hIvyE+V/8UtwYRDP1uY= # mon1.kube.k-space.ee
172.21.3.62 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLveng7H/2Gek+HYDYRWFD0Dy+4l/zjrbF2mnnkBI5CFOtqK0zwBh41IlizkpmmI5fqEIXwhLFHZEWXbUvev5oo= # mon2.kube.k-space.ee
172.21.3.63 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMMgOIL43dgCYlwAI2O269iHxo7ymweG7NoXjnk2F529G5mP+mp5We4lDZEJVyLYtemvhQ2hEHI/WVPWy3SNiuM= # mon3.kube.k-space.ee
172.23.0.7 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBC15tWIbuBqd4UZLaRbpb6oTlwniS4cg2IYZYe5ys352azj2kzOnvtCGiPo0fynFadwfDHtge9JjK6Efwl87Wgc= # nas.k-space.ee
172.20.0.2 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBO77ffkJi903aA6cM7HnFfSyYbPP4jkydI/+/tIGeMv+c9BYOE27n+ylNERaEhYkyddIx93MB4M6GYRyQOjLWSc= # ns1.k-space.ee
[78.28.64.17]:10648 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE7J61p3YzsbRAYtXIrhQUeqc47LuVw1I38egHzi/kLG+CFPsyB9krd29yJMyLRjyM+m5qUjoxNiWK/x0g3jKOI= # offsite
172.21.20.1 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBHLHc3T/J5G1CIf33XeniJk5+D0cpaXe0OkHmpCQ3DoZC3KkFBpA+/U1mlo+qb8xf/GrMj6BMMMLXKSUxbEVGaU= # pve1
172.21.20.2 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFGSRetFdHExRT69pHJAcuhqzAu+Xx4K2AEmWJhUZ2JYF7aa0JbltiYQs58Bpx9s9NA793tiHLZXABy56dI+D9Q= # pve2
172.21.20.8 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMzNvX3ga56EELcI9gV7moyFdKllSwb81V2tCWIjhFVSFTo3QKH/gX/MBnjcs+RxeVV3GF7zIIv8492bCvgiO9s= # pve8
172.21.20.9 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNU4YzKSzzUSnAgh4L1DF3dlC1VEaKVaIeTgsL5VJ0UMqjPr+8QMjIvo28cSLfIQYtfoQbt7ASVsm0uDQvKOldM= # pve9
172.21.3.71 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBI2jy8EsMo7Voor4URCMdgiEzc0nmYDowV4gB2rZ6hnH7bcKGdaODsCyBH6nvbitgnESCC8136RmdxCnO9/TuJ0= # storage1.kube.k-space.ee
172.21.3.72 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKxa2PbOj7bV0AUkBZuPkQZ/3ZMeh1mUCD+rwB4+sXbvTc+ca+xgcPGdAozbY/cUA4GdaKelhjI9DEC46MeFymY= # storage2.kube.k-space.ee
172.21.3.73 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBGYqNHAxwwoZqne/uv5syRb+tEwpbaGeK8oct4IjIHcmPdU32JlMiSqLX7d58t/b8tqE1z2rM4gCc4bpzvNrHMQ= # storage3.kube.k-space.ee
172.21.3.74 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBI+FRuwbrUpMDg9gKf6AqcfovEkt8r5SgB4JXEuMD+I6pp+2PfbxMwrXQ8Xg3oHW+poG413KWw4FZOWv2gH4CEQ= # storage4.kube.k-space.ee
172.20.3.81 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPnmGiEWtWnNNcF872fhYKCD07QwOb75BDEwN3fC4QYmBAbiN0iX/UH96r02V5f7uga3a07/xxt5P0cfEOdtQwQ= # worker1.kube.k-space.ee
172.20.3.82 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBkSNAYeugxGvNmV3biY1s0BWPCEw3g3H0VWLomu/vPbg+GN10/A1pfgt62DHFCYDB6QZwkZM6HIFy8y0xhRl9g= # worker2.kube.k-space.ee
172.20.3.83 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBe+A9Bg54UwUvlPguKDyNAsX7mYbnfMOxhK2UP2YofPlzJ0KDUuH5mbmw76XWz0L6jhT6I7hyc0QsFBdO3ug68= # worker3.kube.k-space.ee
172.20.3.84 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKoNIL+kEYphi/yCdhIytxqRaucm2aTzFrmNN4gEjCrn4TK8A46fyqAuwmgyLQFm7RD5qcEKPWP57Cl0DhTU1T4= # worker4.kube.k-space.ee
172.20.5.99 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCoepYYnNMXkZ9dn4RSSMhFFsppPVkzmjkG3z9vK84454XkI4wizmhUlZ0p+Ovx2YbrjbKibfrrtk8RgWUMi0rY= # worker99.codemowers.cloud
100.102.3.4 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMpkSqEOyYrKXChxl6PAV+q0KypOPnKsXoXWO1JSZSIOwAs5YTzt8Q1Ryb+nQnAOlGj1AY1H7sRllTzdv0cA/EM= # workshopdoor

173
kubernetes-nodes.yaml Normal file
View File

@ -0,0 +1,173 @@
# This playbook sets up dependencies for the Kubernetes cluster nodes
---
- name: Reconfigure Kubernetes worker nodes
hosts:
- storage
- workers
tasks:
- name: Configure grub defaults
copy:
dest: "/etc/default/grub"
content: |
GRUB_DEFAULT=0
GRUB_TIMEOUT_STYLE=countdown
GRUB_TIMEOUT=5
GRUB_DISTRIBUTOR=`lsb_release -i -s 2> /dev/null || echo Debian`
GRUB_CMDLINE_LINUX_DEFAULT="quiet splash memhp_default_state=online"
GRUB_CMDLINE_LINUX="memhp_default_state=online rootflags=pquota"
register: grub_defaults
when: ansible_architecture == 'x86_64'
- name: Load grub defaults
ansible.builtin.shell: update-grub
when: grub_defaults.changed
- name: Ensure nfs-common is installed
ansible.builtin.apt:
name: nfs-common
state: present
- name: Reconfigure Kubernetes nodes
hosts: kubernetes
vars:
KUBERNETES_VERSION: v1.30.3
IP: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
tasks:
- name: Remove APT packages
ansible.builtin.apt:
name: "{{ item }}"
state: absent
loop:
- kubelet
- kubeadm
- kubectl
- name: Download kubectl, kubeadm, kubelet
ansible.builtin.get_url:
url: "https://cdn.dl.k8s.io/release/{{ KUBERNETES_VERSION }}/bin/linux/{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}/{{ item }}"
dest: "/usr/bin/{{ item }}-{{ KUBERNETES_VERSION }}"
mode: '0755'
loop:
- kubelet
- kubectl
- kubeadm
- name: Create symlinks for kubectl, kubeadm, kubelet
ansible.builtin.file:
src: "/usr/bin/{{ item }}-{{ KUBERNETES_VERSION }}"
dest: "/usr/bin/{{ item }}"
state: link
loop:
- kubelet
- kubectl
- kubeadm
register: kubelet
- name: Restart Kubelet
service:
name: kubelet
enabled: true
state: restarted
when: kubelet.changed
- name: Create /etc/systemd/system/kubelet.service
ansible.builtin.copy:
content: |
[Unit]
Description=kubelet: The Kubernetes Node Agent
Documentation=https://kubernetes.io/docs/home/
Wants=network-online.target
After=network-online.target
[Service]
ExecStart=/usr/local/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target
dest: /etc/systemd/system/kubelet.service
- name: Reconfigure shutdownGracePeriod
ansible.builtin.lineinfile:
path: /var/lib/kubelet/config.yaml
regexp: '^shutdownGracePeriod:'
line: 'shutdownGracePeriod: 5m'
- name: Reconfigure shutdownGracePeriodCriticalPods
ansible.builtin.lineinfile:
path: /var/lib/kubelet/config.yaml
regexp: '^shutdownGracePeriodCriticalPods:'
line: 'shutdownGracePeriodCriticalPods: 5m'
- name: Work around unattended-upgrades
ansible.builtin.lineinfile:
path: /lib/systemd/logind.conf.d/unattended-upgrades-logind-maxdelay.conf
regexp: '^InhibitDelayMaxSec='
line: 'InhibitDelayMaxSec=5m0s'
- name: Disable unneccesary services
ignore_errors: true
loop:
- gdm3
- snapd
- bluetooth
- multipathd
service:
name: "{{item}}"
state: stopped
enabled: false
- name: Reset /etc/containers/registries.conf
ansible.builtin.copy:
content: "unqualified-search-registries = [\"docker.io\"]\n"
dest: /etc/containers/registries.conf
register: registries
- name: Restart CRI-O
service:
name: cri-o
state: restarted
when: registries.changed
- name: Reset /etc/modules
ansible.builtin.copy:
content: |
overlay
br_netfilter
dest: /etc/modules
register: kernel_modules
- name: Load kernel modules
ansible.builtin.shell: "cat /etc/modules | xargs -L 1 -t modprobe"
when: kernel_modules.changed
- name: Reset /etc/sysctl.d/99-k8s.conf
ansible.builtin.copy:
content: |
net.ipv4.conf.all.accept_redirects = 0
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
vm.max_map_count = 524288
fs.inotify.max_user_instances = 1280
fs.inotify.max_user_watches = 655360
dest: /etc/sysctl.d/99-k8s.conf
register: sysctl
- name: Reload sysctl config
ansible.builtin.shell: "sysctl --system"
when: sysctl.changed
- name: Reconfigure kube-apiserver to use Passmower OIDC endpoint
ansible.builtin.template:
src: kube-apiserver.j2
dest: /etc/kubernetes/manifests/kube-apiserver.yaml
mode: 600
register: apiserver
when:
- inventory_hostname in groups["masters"]
- name: Restart kube-apiserver
ansible.builtin.shell: "killall kube-apiserver"
when: apiserver.changed

211
ssh_config Normal file
View File

@ -0,0 +1,211 @@
# Use `ansible-playbook update-ssh-config.yml` to update this file
# Use `ssh -F ssh_config ...` to connect to target machine or
# Add `Include ~/path/to/this/ansible/ssh_config` in your ~/.ssh/config
Host backdoor 100.102.3.3
User root
Hostname 100.102.3.3
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host frontdoor 100.102.3.2
User root
Hostname 100.102.3.2
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host grounddoor 100.102.3.1
User root
Hostname 100.102.3.1
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host master1.kube.k-space.ee 172.21.3.51
User root
Hostname 172.21.3.51
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host master2.kube.k-space.ee 172.21.3.52
User root
Hostname 172.21.3.52
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host master3.kube.k-space.ee 172.21.3.53
User root
Hostname 172.21.3.53
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host mon1.kube.k-space.ee 172.21.3.61
User root
Hostname 172.21.3.61
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host mon2.kube.k-space.ee 172.21.3.62
User root
Hostname 172.21.3.62
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host mon3.kube.k-space.ee 172.21.3.63
User root
Hostname 172.21.3.63
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host nas.k-space.ee 172.23.0.7
User root
Hostname 172.23.0.7
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host ns1.k-space.ee 172.20.0.2
User root
Hostname 172.20.0.2
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host offsite 78.28.64.17
User root
Hostname 78.28.64.17
Port 10648
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host pve1 172.21.20.1
User root
Hostname 172.21.20.1
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host pve2 172.21.20.2
User root
Hostname 172.21.20.2
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host pve8 172.21.20.8
User root
Hostname 172.21.20.8
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host pve9 172.21.20.9
User root
Hostname 172.21.20.9
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host storage1.kube.k-space.ee 172.21.3.71
User root
Hostname 172.21.3.71
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host storage2.kube.k-space.ee 172.21.3.72
User root
Hostname 172.21.3.72
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host storage3.kube.k-space.ee 172.21.3.73
User root
Hostname 172.21.3.73
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host storage4.kube.k-space.ee 172.21.3.74
User root
Hostname 172.21.3.74
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host worker1.kube.k-space.ee 172.20.3.81
User root
Hostname 172.20.3.81
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host worker2.kube.k-space.ee 172.20.3.82
User root
Hostname 172.20.3.82
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host worker3.kube.k-space.ee 172.20.3.83
User root
Hostname 172.20.3.83
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host worker4.kube.k-space.ee 172.20.3.84
User root
Hostname 172.20.3.84
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host worker99.codemowers.cloud 172.20.5.99
User root
Hostname 172.20.5.99
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host workshopdoor 100.102.3.4
User root
Hostname 100.102.3.4
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h

132
templates/kube-apiserver.j2 Normal file
View File

@ -0,0 +1,132 @@
apiVersion: v1
kind: Pod
metadata:
annotations:
kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: {{ IP }}:6443
creationTimestamp: null
labels:
component: kube-apiserver
tier: control-plane
name: kube-apiserver
namespace: kube-system
spec:
containers:
- command:
- kube-apiserver
- --advertise-address={{ IP }}
- --allow-privileged=true
- --authorization-mode=Node,RBAC
- --client-ca-file=/etc/kubernetes/pki/ca.crt
- --enable-admission-plugins=NodeRestriction
- --enable-bootstrap-token-auth=true
- --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
- --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
- --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
- --etcd-servers=https://127.0.0.1:2379
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --oidc-client-id=passmower.kubelogin
- --oidc-groups-claim=groups
- --oidc-issuer-url=https://auth.k-space.ee/
- --oidc-username-claim=sub
- --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
- --requestheader-allowed-names=front-proxy-client
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --requestheader-group-headers=X-Remote-Group
- --requestheader-username-headers=X-Remote-User
- --secure-port=6443
- --service-account-issuer=https://kubernetes.default.svc.cluster.local
- --service-account-key-file=/etc/kubernetes/pki/sa.pub
- --service-account-signing-key-file=/etc/kubernetes/pki/sa.key
- --service-cluster-ip-range=10.96.0.0/12
- --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
- --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
image: registry.k8s.io/kube-apiserver:{{ KUBERNETES_VERSION }}
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
host: {{ IP }}
path: /livez
port: 6443
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
name: kube-apiserver
readinessProbe:
failureThreshold: 3
httpGet:
host: {{ IP }}
path: /readyz
port: 6443
scheme: HTTPS
periodSeconds: 1
timeoutSeconds: 15
resources:
requests:
cpu: 250m
startupProbe:
failureThreshold: 24
httpGet:
host: {{ IP }}
path: /livez
port: 6443
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
volumeMounts:
- mountPath: /etc/ssl/certs
name: ca-certs
readOnly: true
- mountPath: /etc/ca-certificates
name: etc-ca-certificates
readOnly: true
- mountPath: /etc/pki
name: etc-pki
readOnly: true
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
- mountPath: /usr/local/share/ca-certificates
name: usr-local-share-ca-certificates
readOnly: true
- mountPath: /usr/share/ca-certificates
name: usr-share-ca-certificates
readOnly: true
hostNetwork: true
priority: 2000001000
priorityClassName: system-node-critical
securityContext:
seccompProfile:
type: RuntimeDefault
volumes:
- hostPath:
path: /etc/ssl/certs
type: DirectoryOrCreate
name: ca-certs
- hostPath:
path: /etc/ca-certificates
type: DirectoryOrCreate
name: etc-ca-certificates
- hostPath:
path: /etc/pki
type: DirectoryOrCreate
name: etc-pki
- hostPath:
path: /etc/kubernetes/pki
type: DirectoryOrCreate
name: k8s-certs
- hostPath:
path: /usr/local/share/ca-certificates
type: DirectoryOrCreate
name: usr-local-share-ca-certificates
- hostPath:
path: /usr/share/ca-certificates
type: DirectoryOrCreate
name: usr-share-ca-certificates
status: {}

74
update-ssh-config.yaml Normal file
View File

@ -0,0 +1,74 @@
# This playbook updates known_hosts and ssh_config files in this repository
# and authorized keys on target machines
---
- name: Collect servers SSH public keys to known_hosts
hosts: localhost
connection: local
vars:
targets: "{{ hostvars[groups['all']] }}"
tasks:
- name: Generate ssh_config
ansible.builtin.copy:
dest: ssh_config
content: |
# Use `ansible-playbook update-ssh-config.yml` to update this file
# Use `ssh -F ssh_config ...` to connect to target machine or
# Add `Include ~/path/to/ansible/ssh_config` in your ~/.ssh/config
{% for host in groups['all'] | sort %}
Host {{ [host, hostvars[host].get('ansible_host', host)] | unique | join(' ') }}
User root
Hostname {{ hostvars[host].get('ansible_host', host) }}
Port {{ hostvars[host].get('ansible_port', 22) }}
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
{% endfor %}
- name: Generate known_hosts
ansible.builtin.copy:
dest: known_hosts
content: |
# Use `ansible-playbook update-ssh-config.yml` to update this file
{% for host in groups['all'] | sort %}
{{ lookup('ansible.builtin.pipe', 'ssh-keyscan -p %d -t ecdsa %s' % (
hostvars[host].get('ansible_port', 22),
hostvars[host].get('ansible_host', host))) }} # {{ host }}
{% endfor %}
- name: Pull authorized keys from Gitea
hosts: localhost
connection: local
vars:
targets: "{{ hostvars[groups['all']] }}"
tasks:
- name: Download https://git.k-space.ee/user.keys
loop:
- arti
- eaas
- lauri
- rasmus
ansible.builtin.get_url:
url: https://git.k-space.ee/{{ item }}.keys
dest: "./{{ item }}.keys"
- name: Push authorized keys to targets
hosts:
- misc
- kubernetes
- doors
tasks:
- name: Generate /root/.ssh/authorized_keys
ansible.builtin.copy:
dest: "/root/.ssh/authorized_keys"
owner: root
group: root
mode: '0644'
content: |
# Use `ansible-playbook update-ssh-config.yml` from https://git.k-space.ee/k-space/ansible/ to update this file
{% for user in admins | unique | sort %}
{% for line in lookup("ansible.builtin.file", user + ".keys").split("\n") %}
{% if line.startswith("sk-") %}
{{ line }} # {{ user }}
{% endif %}
{% endfor %}
{% endfor %}

49
zrepl/playbook.yml Normal file
View File

@ -0,0 +1,49 @@
# Referenced/linked and documented by https://wiki.k-space.ee/en/hosting/storage#zrepl
---
- name: zrepl
hosts: nasgroup
tasks:
- name: 'apt: zrepl gpg'
ansible.builtin.get_url:
url: 'https://zrepl.cschwarz.com/apt/apt-key.asc'
dest: /usr/share/keyrings/zrepl.asc
- name: 'apt: zrepl repo'
apt_repository:
repo: 'deb [arch=amd64 signed-by=/usr/share/keyrings/zrepl.asc] https://zrepl.cschwarz.com/apt/debian bookworm main'
- name: 'apt: ensure packages'
apt:
state: latest
pkg: zrepl
- name: 'zrepl: ensure config'
ansible.builtin.template:
src: "zrepl_{{ansible_hostname}}.yml.j2"
dest: /etc/zrepl/zrepl.yml
mode: 600
register: zreplconf
- name: 'zrepl: restart service after config change'
when: zreplconf.changed
service:
state: restarted
enabled: true
name: zrepl
- name: 'zrepl: ensure service'
when: not zreplconf.changed
service:
state: started
enabled: true
name: zrepl
# avoid accidental conflicts of changes on recv (would err 'will not overwrite without force')
- name: 'zfs: ensure recv mountpoint=off'
hosts: offsite
tasks:
- name: 'zfs: get mountpoint'
shell: zfs get mountpoint -H -o value {{offsite_dataset}}
register: result
changed_when: false
- when: result.stdout != "none"
name: 'zfs: ensure mountpoint=off'
changed_when: true
shell: zfs set mountpoint=none {{offsite_dataset}}
register: result

47
zrepl/zrepl_nas.yml.j2 Normal file
View File

@ -0,0 +1,47 @@
global:
logging:
- type: syslog
format: logfmt
level: warn
monitoring:
- type: prometheus
listen: ':9811'
jobs:
- name: k6zrepl
type: snap
# "<" aka recursive, https://zrepl.github.io/configuration/filter_syntax.html
filesystems:
'nas/k6<': true
snapshotting:
type: periodic
prefix: zrepl_
interval: 1h
pruning:
keep:
# Keep non-zrepl snapshots
- type: regex
negate: true
regex: '^zrepl_'
- type: last_n
regex: "^zrepl_.*"
count: 4
- type: grid
regex: "^zrepl_.*"
grid: 4x1h | 6x4h | 3x1d | 2x7d
- name: k6zrepl_offsite_src
type: source
send:
encrypted: true # zfs native already-encrypted, filesystems not encrypted will log to error-level
serve:
type: tcp
listen: "{{ansible_host}}:35566" # NAT-ed to 193.40.103.250
clients: {
"78.28.64.17": "offsite.k-space.ee",
}
filesystems:
'nas/k6': true
snapshotting: # handled by above job, separated for secuwurity (isolation of domains)
type: manual

View File

@ -0,0 +1,41 @@
global:
logging:
- type: syslog
format: logfmt
level: warn
monitoring:
- type: prometheus
listen: ':9811'
jobs:
- name: k6zrepl_offsite_dest
type: pull
recv:
placeholder:
encryption: off # https://zrepl.github.io/configuration/sendrecvoptions.html#placeholders
# bandwidth_limit:
# max: 9 MiB # 75.5 Mbps
connect:
type: tcp
address: '193.40.103.250:35566' # firewall whitelisted to offsite
root_fs: {{offsite_dataset}}
interval: 10m # start interval, does nothing when no snapshots to recv
replication:
concurrency:
steps: 2
pruning:
keep_sender: # offsite does not dictate nas snapshot policy
- type: regex
regex: '.*'
keep_receiver:
# Keep non-zrepl snapshots
- negate: true
type: regex
regex: "^zrepl_"
- type: last_n
regex: "^zrepl_"
count: 4
- type: grid
regex: "^zrepl_"
grid: 4x1h | 6x4h | 3x1d | 2x7d