Move Ansible directory to separate repo

This commit is contained in:
Lauri Võsandi 2024-08-12 21:41:36 +03:00
parent 68e936463b
commit 7c2b862ca8
15 changed files with 23 additions and 1018 deletions

View File

@ -1,5 +0,0 @@
#TODO:
- inventory
- running playbooks NB! about PWD
- ssh_config; updating
Include ssh_config (with known_hosts) to access all machines listed.

View File

@ -1,15 +0,0 @@
[defaults]
inventory = inventory.yml
nocows = 1
pattern =
deprecation_warnings = False
fact_caching = jsonfile
fact_caching_connection = ~/.ansible/k-space-fact-cache
fact_caching_timeout = 7200
remote_user = root
[ssh_connection]
control_path = ~/.ssh/cm-%%r@%%h:%%p
ssh_args = -o ControlMaster=auto -o ControlPersist=8h -F ssh_config
pipelining = True

View File

@ -1,76 +0,0 @@
- name: Setup primary nameserver
hosts: ns1.k-space.ee
tasks:
- name: Make sure bind9 is installed
ansible.builtin.apt:
name: bind9
state: present
- name: Configure Bind
register: bind
copy:
dest: /etc/bind/named.conf
content: |
# This file is managed by Ansible
# https://git.k-space.ee/k-space/kube/src/branch/master/ansible-bind-primary.yml
# Do NOT modify manually
include "/etc/bind/named.conf.local";
include "/etc/bind/readwrite.key";
include "/etc/bind/readonly.key";
options {
directory "/var/cache/bind";
version "";
listen-on { any; };
listen-on-v6 { any; };
pid-file "/var/run/named/named.pid";
notify explicit; also-notify { 172.20.53.1; 172.20.53.2; 172.20.53.3; };
allow-recursion { none; };
recursion no;
check-names master ignore;
dnssec-validation no;
auth-nxdomain no;
};
# https://kb.isc.org/docs/aa-00723
acl allowed {
172.20.3.0/24;
172.20.4.0/24;
};
acl rejected { !allowed; any; };
zone "." {
type hint;
file "/var/lib/bind/db.root";
};
zone "k-space.ee" {
type master;
file "/var/lib/bind/db.k-space.ee";
allow-update { !rejected; key readwrite; };
allow-transfer { !rejected; key readonly; key readwrite; };
};
zone "k6.ee" {
type master;
file "/var/lib/bind/db.k6.ee";
allow-update { !rejected; key readwrite; };
allow-transfer { !rejected; key readonly; key readwrite; };
};
zone "kspace.ee" {
type master;
file "/var/lib/bind/db.kspace.ee";
allow-update { !rejected; key readwrite; };
allow-transfer { !rejected; key readonly; key readwrite; };
};
- name: Check Bind config
ansible.builtin.shell: "named-checkconf"
- name: Reload Bind config
service:
name: bind9
state: reloaded
when: bind.changed

View File

@ -1,65 +0,0 @@
# ansible doors -m shell -a "ctr image pull harbor.k-space.ee/k-space/mjpg-streamer:latest"
# journalctl -u mjpg_streamer@video0.service -f
# Referenced/linked and documented by https://wiki.k-space.ee/en/hosting/doors
- name: Setup doors
hosts: doors
tasks:
- name: Make sure containerd is installed
ansible.builtin.apt:
name: containerd
state: present
- name: Copy systemd service for Doorboy controller # https://git.k-space.ee/k-space/godoor
copy:
dest: /etc/systemd/system/godoor.service
content: |
[Unit]
Description=Doorboy service
Documentation=https://git.k-space.ee/k-space/godoor
After=network.target
[Service]
Environment=IMAGE=harbor.k-space.ee/k-space/godoor:latest
ExecStartPre=-ctr task kill --signal=9 %N
ExecStartPre=-ctr task rm %N
ExecStartPre=-ctr c rm %N
ExecStartPre=-ctr image pull $IMAGE
ExecStart=ctr run --rm --pid-file=/run/%N.pid --privileged --read-only --env-file=/etc/godoor --env=KDOORPI_API_ALLOWED=https://doorboy-proxy.k-space.ee/allowed --env=KDOORPI_API_LONGPOLL=https://doorboy-proxy.k-space.ee/longpoll --env=KDOORPI_API_SWIPE=https://doorboy-proxy.k-space.ee/swipe --env=KDOORPI_DOOR=%H --net-host --net-host --cwd /app $IMAGE %N /godoor
ExecStopPost=ctr task rm %N
ExecStopPost=ctr c rm %N
Restart=always
[Install]
WantedBy=multi-user.target
- name: Enable Doorboy controller
ansible.builtin.systemd:
state: restarted
daemon_reload: yes
name: godoor.service
- name: Copy systemd service for mjpg-streamer # https://git.k-space.ee/k-space/mjpg-steramer
copy:
dest: /etc/systemd/system/mjpg_streamer@.service
content: |
[Unit]
Description=A server for streaming Motion-JPEG from a video capture device
After=network.target
ConditionPathExists=/dev/%I
[Service]
Environment=IMAGE=harbor.k-space.ee/k-space/mjpg-streamer:latest
StandardOutput=tty
Type=forking
ExecStartPre=-ctr task kill --signal=9 %p_%i
ExecStartPre=-ctr task rm %p_%i
ExecStartPre=-ctr c rm %p_%i
ExecStartPre=-ctr image pull $IMAGE
ExecStart=ctr run --tty -d --rm --pid-file=/run/%i.pid --privileged --read-only --net-host $IMAGE %p_%i /usr/local/bin/mjpg_streamer -i 'input_uvc.so -d /dev/%I -r 1280x720 -f 10' -o 'output_http.so -w /usr/share/mjpg_streamer/www'
ExecStopPost=ctr task rm %p_%i
ExecStopPost=ctr c rm %p_%i
PIDFile=/run/%i.pid
[Install]
WantedBy=multi-user.target
- name: Enable mjpg-streamer
ansible.builtin.systemd:
state: restarted
daemon_reload: yes
name: mjpg_streamer@video0.service

View File

@ -1,83 +0,0 @@
# This file is linked from /README.md as 'all infra'.
##### Not otherwise linked:
# Homepage: https://git.k-space.ee/k-space/homepage (on GitLab)
# Slack: https://k-space-ee.slack.com
# Routers/Switches: https://git.k-space.ee/k-space/rosdump
all:
vars:
admins:
- lauri
- eaas
extra_admins: []
children:
# https://wiki.k-space.ee/en/hosting/storage
nasgroup:
hosts:
nas.k-space.ee: { ansible_host: 172.23.0.7 }
offsite:
ansible_host: 78.28.64.17
ansible_port: 10648
vars:
offsite_dataset: offsite/backup_zrepl
misc:
children:
nasgroup:
hosts:
# https://git.k-space.ee/k-space/kube: bind/README.md (primary DNS, PVE VM)
ns1.k-space.ee: { ansible_host: 172.20.0.2 }
worker99.codemowers.cloud: { ansible_host: 172.20.5.99 } # Nvidia Tegra Jetson-AGX
# https://wiki.k-space.ee/hosting/proxmox (depends on nas.k-space.ee)
proxmox: # aka PVE, Proxmox Virtualization Environment
vars:
extra_admins:
- rasmus
hosts:
pve1: { ansible_host: 172.21.20.1 }
pve2: { ansible_host: 172.21.20.2 }
pve8: { ansible_host: 172.21.20.8 }
pve9: { ansible_host: 172.21.20.9 }
# https://git.k-space.ee/k-space/kube: README.md
# CLUSTER.md (PVE VMs + external nas.k-space.ee)
kubernetes:
children:
masters:
hosts:
master1.kube.k-space.ee: { ansible_host: 172.21.3.51 }
master2.kube.k-space.ee: { ansible_host: 172.21.3.52 }
master3.kube.k-space.ee: { ansible_host: 172.21.3.53 }
kubelets:
children:
mon: # they sit in a priviledged VLAN
hosts:
mon1.kube.k-space.ee: { ansible_host: 172.21.3.61 }
mon2.kube.k-space.ee: { ansible_host: 172.21.3.62 }
mon3.kube.k-space.ee: { ansible_host: 172.21.3.63 }
storage: # longhorn, to be replaced with a more direct CSI
hosts:
storage1.kube.k-space.ee: { ansible_host: 172.21.3.71 }
storage2.kube.k-space.ee: { ansible_host: 172.21.3.72 }
storage3.kube.k-space.ee: { ansible_host: 172.21.3.73 }
storage4.kube.k-space.ee: { ansible_host: 172.21.3.74 }
workers:
hosts:
worker1.kube.k-space.ee: { ansible_host: 172.20.3.81 }
worker2.kube.k-space.ee: { ansible_host: 172.20.3.82 }
worker3.kube.k-space.ee: { ansible_host: 172.20.3.83 }
worker4.kube.k-space.ee: { ansible_host: 172.20.3.84 }
# https://wiki.k-space.ee/en/hosting/doors
# See also: https://git.k-space.ee/k-space/kube: camtiler/README.md
doors:
vars:
extra_admins:
- arti
hosts:
grounddoor: { ansible_host: 100.102.3.1 }
frontdoor: { ansible_host: 100.102.3.2 }
backdoor: { ansible_host: 100.102.3.3 }
workshopdoor: { ansible_host: 100.102.3.4 }

View File

@ -1,27 +0,0 @@
# Use `ansible-playbook update-ssh-config.yml` to update this file
100.102.3.3 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBN4SifLddYAz8CasmFwX5TQbiM8atAYMFuDQRchclHM0sq9Pi8wRxSZK8SHON4Y7YFsIY+cXnQ2Wx4FpzKmfJYE= # backdoor
100.102.3.2 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE8/E7PDqTrTdU+MFurHkIPzTBTGcSJqXuv5n0Ugd/IlvOr2v+eYi3ma91pSBmF5Hjy9foWypCLZfH+vWMkV0gs= # frontdoor
100.102.3.1 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFcH8D2AhnESw3uu2f4EHBhT9rORQQJJ3TlbwN+kro5tRZsZk4p3MKabBiuCSZw2KWjfu0MY4yHSCrUUQrggJDM= # grounddoor
172.21.3.51 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMYy07yLlOiFvXzmVDIULS9VDCMz7T+qOq4M+x8Lo3KEKamI6ZD737mvimPTW6K1FRBzzq67Mq495UnoFKVnQWE= # master1.kube.k-space.ee
172.21.3.52 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKRFfYDaTH58FUw+9stBVsyCviaPCGEbe9Y1a9WKvj98S7m+qU03YvtfPkRfEH/3iXHDvngEDVpJrTWW4y6e6MI= # master2.kube.k-space.ee
172.21.3.53 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIqIepuMkMo/KO3bb4X6lgb6YViAifPmgHXVrbtHwbOZLll5Qqr4pXdLDxkuZsmiE7iZBw2gSzZLcNMGdDEnWrY= # master3.kube.k-space.ee
172.21.3.61 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCJ9XgDz2NEzvjw/nDmRIKUJAmNqzsaXMJn4WFiWfTz1x2HrRcXgY3UXKWUxUvJO1jJ7hIvyE+V/8UtwYRDP1uY= # mon1.kube.k-space.ee
172.21.3.62 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLveng7H/2Gek+HYDYRWFD0Dy+4l/zjrbF2mnnkBI5CFOtqK0zwBh41IlizkpmmI5fqEIXwhLFHZEWXbUvev5oo= # mon2.kube.k-space.ee
172.21.3.63 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMMgOIL43dgCYlwAI2O269iHxo7ymweG7NoXjnk2F529G5mP+mp5We4lDZEJVyLYtemvhQ2hEHI/WVPWy3SNiuM= # mon3.kube.k-space.ee
172.23.0.7 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBC15tWIbuBqd4UZLaRbpb6oTlwniS4cg2IYZYe5ys352azj2kzOnvtCGiPo0fynFadwfDHtge9JjK6Efwl87Wgc= # nas.k-space.ee
172.20.0.2 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBO77ffkJi903aA6cM7HnFfSyYbPP4jkydI/+/tIGeMv+c9BYOE27n+ylNERaEhYkyddIx93MB4M6GYRyQOjLWSc= # ns1.k-space.ee
[78.28.64.17]:10648 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE7J61p3YzsbRAYtXIrhQUeqc47LuVw1I38egHzi/kLG+CFPsyB9krd29yJMyLRjyM+m5qUjoxNiWK/x0g3jKOI= # offsite
172.21.20.1 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBHLHc3T/J5G1CIf33XeniJk5+D0cpaXe0OkHmpCQ3DoZC3KkFBpA+/U1mlo+qb8xf/GrMj6BMMMLXKSUxbEVGaU= # pve1
172.21.20.2 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFGSRetFdHExRT69pHJAcuhqzAu+Xx4K2AEmWJhUZ2JYF7aa0JbltiYQs58Bpx9s9NA793tiHLZXABy56dI+D9Q= # pve2
172.21.20.8 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMzNvX3ga56EELcI9gV7moyFdKllSwb81V2tCWIjhFVSFTo3QKH/gX/MBnjcs+RxeVV3GF7zIIv8492bCvgiO9s= # pve8
172.21.20.9 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNU4YzKSzzUSnAgh4L1DF3dlC1VEaKVaIeTgsL5VJ0UMqjPr+8QMjIvo28cSLfIQYtfoQbt7ASVsm0uDQvKOldM= # pve9
172.21.3.71 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBI2jy8EsMo7Voor4URCMdgiEzc0nmYDowV4gB2rZ6hnH7bcKGdaODsCyBH6nvbitgnESCC8136RmdxCnO9/TuJ0= # storage1.kube.k-space.ee
172.21.3.72 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKxa2PbOj7bV0AUkBZuPkQZ/3ZMeh1mUCD+rwB4+sXbvTc+ca+xgcPGdAozbY/cUA4GdaKelhjI9DEC46MeFymY= # storage2.kube.k-space.ee
172.21.3.73 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBGYqNHAxwwoZqne/uv5syRb+tEwpbaGeK8oct4IjIHcmPdU32JlMiSqLX7d58t/b8tqE1z2rM4gCc4bpzvNrHMQ= # storage3.kube.k-space.ee
172.21.3.74 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBI+FRuwbrUpMDg9gKf6AqcfovEkt8r5SgB4JXEuMD+I6pp+2PfbxMwrXQ8Xg3oHW+poG413KWw4FZOWv2gH4CEQ= # storage4.kube.k-space.ee
172.20.3.81 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPnmGiEWtWnNNcF872fhYKCD07QwOb75BDEwN3fC4QYmBAbiN0iX/UH96r02V5f7uga3a07/xxt5P0cfEOdtQwQ= # worker1.kube.k-space.ee
172.20.3.82 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBkSNAYeugxGvNmV3biY1s0BWPCEw3g3H0VWLomu/vPbg+GN10/A1pfgt62DHFCYDB6QZwkZM6HIFy8y0xhRl9g= # worker2.kube.k-space.ee
172.20.3.83 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBe+A9Bg54UwUvlPguKDyNAsX7mYbnfMOxhK2UP2YofPlzJ0KDUuH5mbmw76XWz0L6jhT6I7hyc0QsFBdO3ug68= # worker3.kube.k-space.ee
172.20.3.84 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKoNIL+kEYphi/yCdhIytxqRaucm2aTzFrmNN4gEjCrn4TK8A46fyqAuwmgyLQFm7RD5qcEKPWP57Cl0DhTU1T4= # worker4.kube.k-space.ee
172.21.3.89 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCoepYYnNMXkZ9dn4RSSMhFFsppPVkzmjkG3z9vK84454XkI4wizmhUlZ0p+Ovx2YbrjbKibfrrtk8RgWUMi0rY= # worker9.kube.k-space.ee
100.102.3.4 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMpkSqEOyYrKXChxl6PAV+q0KypOPnKsXoXWO1JSZSIOwAs5YTzt8Q1Ryb+nQnAOlGj1AY1H7sRllTzdv0cA/EM= # workshopdoor

View File

@ -1,171 +0,0 @@
---
- name: Reconfigure Kubernetes worker nodes
hosts:
- storage
- workers
tasks:
- name: Configure grub defaults
copy:
dest: "/etc/default/grub"
content: |
GRUB_DEFAULT=0
GRUB_TIMEOUT_STYLE=countdown
GRUB_TIMEOUT=5
GRUB_DISTRIBUTOR=`lsb_release -i -s 2> /dev/null || echo Debian`
GRUB_CMDLINE_LINUX_DEFAULT="quiet splash memhp_default_state=online"
GRUB_CMDLINE_LINUX="memhp_default_state=online rootflags=pquota"
register: grub_defaults
when: ansible_architecture == 'x86_64'
- name: Load grub defaults
ansible.builtin.shell: update-grub
when: grub_defaults.changed
- name: Ensure nfs-common is installed
ansible.builtin.apt:
name: nfs-common
state: present
- name: Reconfigure Kubernetes nodes
hosts: kubernetes
vars:
KUBERNETES_VERSION: v1.30.3
IP: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
tasks:
- name: Remove APT packages
ansible.builtin.apt:
name: "{{ item }}"
state: absent
loop:
- kubelet
- kubeadm
- kubectl
- name: Download kubectl, kubeadm, kubelet
ansible.builtin.get_url:
url: "https://cdn.dl.k8s.io/release/{{ KUBERNETES_VERSION }}/bin/linux/{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}/{{ item }}"
dest: "/usr/bin/{{ item }}-{{ KUBERNETES_VERSION }}"
mode: '0755'
loop:
- kubelet
- kubectl
- kubeadm
- name: Create symlinks for kubectl, kubeadm, kubelet
ansible.builtin.file:
src: "/usr/bin/{{ item }}-{{ KUBERNETES_VERSION }}"
dest: "/usr/bin/{{ item }}"
state: link
loop:
- kubelet
- kubectl
- kubeadm
register: kubelet
- name: Restart Kubelet
service:
name: kubelet
enabled: true
state: restarted
when: kubelet.changed
- name: Create /etc/systemd/system/kubelet.service
ansible.builtin.copy:
content: |
[Unit]
Description=kubelet: The Kubernetes Node Agent
Documentation=https://kubernetes.io/docs/home/
Wants=network-online.target
After=network-online.target
[Service]
ExecStart=/usr/local/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target
dest: /etc/systemd/system/kubelet.service
- name: Reconfigure shutdownGracePeriod
ansible.builtin.lineinfile:
path: /var/lib/kubelet/config.yaml
regexp: '^shutdownGracePeriod:'
line: 'shutdownGracePeriod: 5m'
- name: Reconfigure shutdownGracePeriodCriticalPods
ansible.builtin.lineinfile:
path: /var/lib/kubelet/config.yaml
regexp: '^shutdownGracePeriodCriticalPods:'
line: 'shutdownGracePeriodCriticalPods: 5m'
- name: Work around unattended-upgrades
ansible.builtin.lineinfile:
path: /lib/systemd/logind.conf.d/unattended-upgrades-logind-maxdelay.conf
regexp: '^InhibitDelayMaxSec='
line: 'InhibitDelayMaxSec=5m0s'
- name: Disable unneccesary services
ignore_errors: true
loop:
- gdm3
- snapd
- bluetooth
- multipathd
service:
name: "{{item}}"
state: stopped
enabled: no
- name: Reset /etc/containers/registries.conf
ansible.builtin.copy:
content: "unqualified-search-registries = [\"docker.io\"]\n"
dest: /etc/containers/registries.conf
register: registries
- name: Restart CRI-O
service:
name: cri-o
state: restarted
when: registries.changed
- name: Reset /etc/modules
ansible.builtin.copy:
content: |
overlay
br_netfilter
dest: /etc/modules
register: kernel_modules
- name: Load kernel modules
ansible.builtin.shell: "cat /etc/modules | xargs -L 1 -t modprobe"
when: kernel_modules.changed
- name: Reset /etc/sysctl.d/99-k8s.conf
ansible.builtin.copy:
content: |
net.ipv4.conf.all.accept_redirects = 0
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
vm.max_map_count = 524288
fs.inotify.max_user_instances = 1280
fs.inotify.max_user_watches = 655360
dest: /etc/sysctl.d/99-k8s.conf
register: sysctl
- name: Reload sysctl config
ansible.builtin.shell: "sysctl --system"
when: sysctl.changed
- name: Reconfigure kube-apiserver to use Passmower OIDC endpoint
ansible.builtin.template:
src: kube-apiserver.j2
dest: /etc/kubernetes/manifests/kube-apiserver.yaml
mode: 600
register: apiserver
when:
- inventory_hostname in groups["masters"]
- name: Restart kube-apiserver
ansible.builtin.shell: "killall kube-apiserver"
when: apiserver.changed

View File

@ -1,211 +0,0 @@
# Use `ansible-playbook update-ssh-config.yml` to update this file
# Use `ssh -F ssh_config ...` to connect to target machine or
# Add `Include ~/path/to/this/kube/ssh_config` in your ~/.ssh/config
Host backdoor 100.102.3.3
User root
Hostname 100.102.3.3
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host frontdoor 100.102.3.2
User root
Hostname 100.102.3.2
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host grounddoor 100.102.3.1
User root
Hostname 100.102.3.1
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host master1.kube.k-space.ee 172.21.3.51
User root
Hostname 172.21.3.51
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host master2.kube.k-space.ee 172.21.3.52
User root
Hostname 172.21.3.52
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host master3.kube.k-space.ee 172.21.3.53
User root
Hostname 172.21.3.53
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host mon1.kube.k-space.ee 172.21.3.61
User root
Hostname 172.21.3.61
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host mon2.kube.k-space.ee 172.21.3.62
User root
Hostname 172.21.3.62
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host mon3.kube.k-space.ee 172.21.3.63
User root
Hostname 172.21.3.63
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host nas.k-space.ee 172.23.0.7
User root
Hostname 172.23.0.7
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host ns1.k-space.ee 172.20.0.2
User root
Hostname 172.20.0.2
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host offsite 78.28.64.17
User root
Hostname 78.28.64.17
Port 10648
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host pve1 172.21.20.1
User root
Hostname 172.21.20.1
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host pve2 172.21.20.2
User root
Hostname 172.21.20.2
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host pve8 172.21.20.8
User root
Hostname 172.21.20.8
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host pve9 172.21.20.9
User root
Hostname 172.21.20.9
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host storage1.kube.k-space.ee 172.21.3.71
User root
Hostname 172.21.3.71
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host storage2.kube.k-space.ee 172.21.3.72
User root
Hostname 172.21.3.72
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host storage3.kube.k-space.ee 172.21.3.73
User root
Hostname 172.21.3.73
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host storage4.kube.k-space.ee 172.21.3.74
User root
Hostname 172.21.3.74
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host worker1.kube.k-space.ee 172.20.3.81
User root
Hostname 172.20.3.81
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host worker2.kube.k-space.ee 172.20.3.82
User root
Hostname 172.20.3.82
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host worker3.kube.k-space.ee 172.20.3.83
User root
Hostname 172.20.3.83
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host worker4.kube.k-space.ee 172.20.3.84
User root
Hostname 172.20.3.84
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host worker9.kube.k-space.ee 172.21.3.89
User root
Hostname 172.21.3.89
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
Host workshopdoor 100.102.3.4
User root
Hostname 100.102.3.4
Port 22
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h

View File

@ -1,132 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
annotations:
kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: {{ IP }}:6443
creationTimestamp: null
labels:
component: kube-apiserver
tier: control-plane
name: kube-apiserver
namespace: kube-system
spec:
containers:
- command:
- kube-apiserver
- --advertise-address={{ IP }}
- --allow-privileged=true
- --authorization-mode=Node,RBAC
- --client-ca-file=/etc/kubernetes/pki/ca.crt
- --enable-admission-plugins=NodeRestriction
- --enable-bootstrap-token-auth=true
- --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
- --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
- --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
- --etcd-servers=https://127.0.0.1:2379
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --oidc-client-id=passmower.kubelogin
- --oidc-groups-claim=groups
- --oidc-issuer-url=https://auth.k-space.ee/
- --oidc-username-claim=sub
- --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
- --requestheader-allowed-names=front-proxy-client
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --requestheader-group-headers=X-Remote-Group
- --requestheader-username-headers=X-Remote-User
- --secure-port=6443
- --service-account-issuer=https://kubernetes.default.svc.cluster.local
- --service-account-key-file=/etc/kubernetes/pki/sa.pub
- --service-account-signing-key-file=/etc/kubernetes/pki/sa.key
- --service-cluster-ip-range=10.96.0.0/12
- --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
- --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
image: registry.k8s.io/kube-apiserver:{{ KUBERNETES_VERSION }}
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
host: {{ IP }}
path: /livez
port: 6443
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
name: kube-apiserver
readinessProbe:
failureThreshold: 3
httpGet:
host: {{ IP }}
path: /readyz
port: 6443
scheme: HTTPS
periodSeconds: 1
timeoutSeconds: 15
resources:
requests:
cpu: 250m
startupProbe:
failureThreshold: 24
httpGet:
host: {{ IP }}
path: /livez
port: 6443
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
volumeMounts:
- mountPath: /etc/ssl/certs
name: ca-certs
readOnly: true
- mountPath: /etc/ca-certificates
name: etc-ca-certificates
readOnly: true
- mountPath: /etc/pki
name: etc-pki
readOnly: true
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
- mountPath: /usr/local/share/ca-certificates
name: usr-local-share-ca-certificates
readOnly: true
- mountPath: /usr/share/ca-certificates
name: usr-share-ca-certificates
readOnly: true
hostNetwork: true
priority: 2000001000
priorityClassName: system-node-critical
securityContext:
seccompProfile:
type: RuntimeDefault
volumes:
- hostPath:
path: /etc/ssl/certs
type: DirectoryOrCreate
name: ca-certs
- hostPath:
path: /etc/ca-certificates
type: DirectoryOrCreate
name: etc-ca-certificates
- hostPath:
path: /etc/pki
type: DirectoryOrCreate
name: etc-pki
- hostPath:
path: /etc/kubernetes/pki
type: DirectoryOrCreate
name: k8s-certs
- hostPath:
path: /usr/local/share/ca-certificates
type: DirectoryOrCreate
name: usr-local-share-ca-certificates
- hostPath:
path: /usr/share/ca-certificates
type: DirectoryOrCreate
name: usr-share-ca-certificates
status: {}

View File

@ -1,72 +0,0 @@
---
- name: Collect servers SSH public keys to known_hosts
hosts: localhost
connection: local
vars:
targets: "{{ hostvars[groups['all']] }}"
tasks:
- name: Generate ssh_config
ansible.builtin.copy:
dest: ssh_config
content: |
# Use `ansible-playbook update-ssh-config.yml` to update this file
# Use `ssh -F ssh_config ...` to connect to target machine or
# Add `Include ~/path/to/this/kube/ssh_config` in your ~/.ssh/config
{% for host in groups['all'] | sort %}
Host {{ [host, hostvars[host].get('ansible_host', host)] | unique | join(' ') }}
User root
Hostname {{ hostvars[host].get('ansible_host', host) }}
Port {{ hostvars[host].get('ansible_port', 22) }}
GlobalKnownHostsFile known_hosts
UserKnownHostsFile /dev/null
ControlMaster auto
ControlPersist 8h
{% endfor %}
- name: Generate known_hosts
ansible.builtin.copy:
dest: known_hosts
content: |
# Use `ansible-playbook update-ssh-config.yml` to update this file
{% for host in groups['all'] | sort %}
{{ lookup('ansible.builtin.pipe', 'ssh-keyscan -p %d -t ecdsa %s' % (
hostvars[host].get('ansible_port', 22),
hostvars[host].get('ansible_host', host))) }} # {{ host }}
{% endfor %}
- name: Pull authorized keys from Gitea
hosts: localhost
connection: local
vars:
targets: "{{ hostvars[groups['all']] }}"
tasks:
- name: Download https://git.k-space.ee/user.keys
loop:
- arti
- eaas
- lauri
- rasmus
ansible.builtin.get_url:
url: https://git.k-space.ee/{{ item }}.keys
dest: "./{{ item }}.keys"
- name: Push authorized keys to targets
hosts:
- misc
- kubernetes
- doors
tasks:
- name: Generate /root/.ssh/authorized_keys
ansible.builtin.copy:
dest: "/root/.ssh/authorized_keys"
owner: root
group: root
mode: '0644'
content: |
# Use `ansible-playbook update-ssh-config.yml` from https://git.k-space.ee/k-space/kube/ to update this file
{% for user in admins + extra_admins | unique | sort %}
{% for line in lookup("ansible.builtin.file", user + ".keys").split("\n") %}
{% if line.startswith("sk-") %}
{{ line }} # {{ user }}
{% endif %}
{% endfor %}
{% endfor %}

View File

@ -1,49 +0,0 @@
# Referenced/linked and documented by https://wiki.k-space.ee/en/hosting/storage#zrepl
- name: zrepl
hosts: nasgroup
tasks:
- name: 'apt: zrepl gpg'
ansible.builtin.get_url:
url: 'https://zrepl.cschwarz.com/apt/apt-key.asc'
dest: /usr/share/keyrings/zrepl.asc
- name: 'apt: zrepl repo'
apt_repository:
repo: 'deb [arch=amd64 signed-by=/usr/share/keyrings/zrepl.asc] https://zrepl.cschwarz.com/apt/debian bookworm main'
- name: 'apt: ensure packages'
apt:
state: latest
pkg: zrepl
- name: 'zrepl: ensure config'
ansible.builtin.template:
src: "zrepl_{{ansible_hostname}}.yml.j2"
dest: /etc/zrepl/zrepl.yml
mode: 600
register: zreplconf
- name: 'zrepl: restart service after config change'
when: zreplconf.changed
service:
state: restarted
enabled: true
name: zrepl
- name: 'zrepl: ensure service'
when: not zreplconf.changed
service:
state: started
enabled: true
name: zrepl
# avoid accidental conflicts of changes on recv (would err 'will not overwrite without force')
- name: 'zfs: ensure recv mountpoint=off'
hosts: offsite
tasks:
- name: 'zfs: get mountpoint'
shell: zfs get mountpoint -H -o value {{offsite_dataset}}
register: result
changed_when: false
- when: result.stdout != "none"
name: 'zfs: ensure mountpoint=off'
changed_when: true
shell: zfs set mountpoint=none {{offsite_dataset}}
register: result

View File

@ -1,23 +0,0 @@
---
apiVersion: monitoring.coreos.com/v1
kind: Probe
metadata:
name: zrepl
spec:
scrapeTimeout: 30s
targets:
staticConfig:
static:
- nas.mgmt.k-space.ee:9811
# - offsite.k-space.ee:9811 # TODO: unreachable
relabelingConfigs:
- sourceLabels: [__param_target]
targetLabel: instance
- sourceLabels: [__param_target]
targetLabel: __address__
prober:
url: localhost
path: /metrics
metricRelabelings:
- sourceLabels: [__address__]
targetLabel: target

View File

@ -1,47 +0,0 @@
global:
logging:
- type: syslog
format: logfmt
level: warn
monitoring:
- type: prometheus
listen: ':9811'
jobs:
- name: k6zrepl
type: snap
# "<" aka recursive, https://zrepl.github.io/configuration/filter_syntax.html
filesystems:
'nas/k6<': true
snapshotting:
type: periodic
prefix: zrepl_
interval: 1h
pruning:
keep:
# Keep non-zrepl snapshots
- type: regex
negate: true
regex: '^zrepl_'
- type: last_n
regex: "^zrepl_.*"
count: 4
- type: grid
regex: "^zrepl_.*"
grid: 4x1h | 6x4h | 3x1d | 2x7d
- name: k6zrepl_offsite_src
type: source
send:
encrypted: true # zfs native already-encrypted, filesystems not encrypted will log to error-level
serve:
type: tcp
listen: "{{ansible_host}}:35566" # NAT-ed to 193.40.103.250
clients: {
"78.28.64.17": "offsite.k-space.ee",
}
filesystems:
'nas/k6': true
snapshotting: # handled by above job, separated for secuwurity (isolation of domains)
type: manual

View File

@ -1,41 +0,0 @@
global:
logging:
- type: syslog
format: logfmt
level: warn
monitoring:
- type: prometheus
listen: ':9811'
jobs:
- name: k6zrepl_offsite_dest
type: pull
recv:
placeholder:
encryption: off # https://zrepl.github.io/configuration/sendrecvoptions.html#placeholders
# bandwidth_limit:
# max: 9 MiB # 75.5 Mbps
connect:
type: tcp
address: '193.40.103.250:35566' # firewall whitelisted to offsite
root_fs: {{offsite_dataset}}
interval: 10m # start interval, does nothing when no snapshots to recv
replication:
concurrency:
steps: 2
pruning:
keep_sender: # offsite does not dictate nas snapshot policy
- type: regex
regex: '.*'
keep_receiver:
# Keep non-zrepl snapshots
- negate: true
type: regex
regex: "^zrepl_"
- type: last_n
regex: "^zrepl_"
count: 4
- type: grid
regex: "^zrepl_"
grid: 4x1h | 6x4h | 3x1d | 2x7d

View File

@ -1 +0,0 @@
../ansible/zrepl/prom.yaml

23
monitoring/zrepl.yaml Normal file
View File

@ -0,0 +1,23 @@
---
apiVersion: monitoring.coreos.com/v1
kind: Probe
metadata:
name: zrepl
spec:
scrapeTimeout: 30s
targets:
staticConfig:
static:
- nas.mgmt.k-space.ee:9811
# - offsite.k-space.ee:9811 # TODO: unreachable
relabelingConfigs:
- sourceLabels: [__param_target]
targetLabel: instance
- sourceLabels: [__param_target]
targetLabel: __address__
prober:
url: localhost
path: /metrics
metricRelabelings:
- sourceLabels: [__address__]
targetLabel: target