Add Proxmox Ceph mesh network playbook
This commit is contained in:
58
proxmox/README.md
Normal file
58
proxmox/README.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# Proxmox Virtual Environment
|
||||
|
||||
## K-Space Hyper Converged CEPH setup
|
||||
|
||||
1. Configure a mesh network
|
||||
|
||||
ansible-playbook proxmox/ceph.yaml
|
||||
|
||||
This will configure the 40Gbit interfaces and FRR daemon with OpenFabric routing.
|
||||
Our CEPH setup uses a private IPv6 subnet for inner cluster communication.
|
||||
|
||||
fdcc:a182:4fed::/64
|
||||
2. Setup CEPH packages on all nodes
|
||||
|
||||
pveceph install --repository no-subscription --version squid
|
||||
3. CEPH init
|
||||
|
||||
pveceph init --network fdcc:a182:4fed::/64
|
||||
4. Create CEPH monitors on each node
|
||||
|
||||
pveceph mon create
|
||||
5. Also create CEPH managers on each node
|
||||
|
||||
pveceph mgr create
|
||||
6. Create OSD daemons for each disk on all nodes
|
||||
|
||||
NVMe drives will get 2 OSD daemons per disk for better IOPS
|
||||
|
||||
pveceph osd create /dev/nvme0n1 --crush-device-class nvme --osds-per-device 2
|
||||
|
||||
HDD-s will get just 1
|
||||
|
||||
pveceph osd create /dev/sdX --crush-device-class hdd
|
||||
7. Create CRUSH Maps
|
||||
|
||||
We want to separate out HDD and NVMe storage into different storage buckets.
|
||||
|
||||
Default `replicated_rule` would put datablock on all of the available disks
|
||||
|
||||
# ceph osd crush rule create-replicated <rule-name> <root> <failure-domain> <class>
|
||||
ceph osd crush rule create-replicated replicated_nvme default host nvme
|
||||
ceph osd crush rule create-replicated replicated_hdd default host hdd
|
||||
8. Create CEPH Pools for VM disk images
|
||||
|
||||
This is done in individual node Ceph -> Pools configuration
|
||||
|
||||
**NB:** Under advanced, select correct Crush Rule (nvme or hdd)
|
||||
|
||||
9. Create CephFS Storage pool for ISO images
|
||||
|
||||
First create metadata server on each node
|
||||
|
||||
pveceph mds create
|
||||
|
||||
Then on one of the individual nodes create a CephFS.
|
||||
|
||||
After that is done you can modify under Pools change the cephfs_data and cephfs_metadata
|
||||
Crush rules to use NVMe drives.
|
49
proxmox/ceph.yaml
Normal file
49
proxmox/ceph.yaml
Normal file
@@ -0,0 +1,49 @@
|
||||
---
|
||||
|
||||
- name: configure ceph on proxmox
|
||||
hosts:
|
||||
- pve90
|
||||
- pve91
|
||||
# - pve92
|
||||
- pve93
|
||||
gather_facts: false
|
||||
tasks:
|
||||
- name: configure mesh network
|
||||
ansible.builtin.template:
|
||||
src: templates/ceph.interfaces.j2
|
||||
dest: /etc/network/interfaces.d/ceph
|
||||
tags: network
|
||||
|
||||
- name: ifup lo
|
||||
ansible.builtin.command:
|
||||
cmd: ifup lo
|
||||
tags: network
|
||||
|
||||
- name: ifup mesh interfaces
|
||||
ansible.builtin.command:
|
||||
cmd: "ifup {{ item }}"
|
||||
loop: "{{ ceph_mesh.interfaces }}"
|
||||
loop_control:
|
||||
label: "ifup {{ item }}"
|
||||
tags: network
|
||||
|
||||
- name: enable fabricd OpenFabric in FRR
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/frr/daemons
|
||||
regexp: ^fabricd=.*$
|
||||
line: fabricd=yes
|
||||
notify: reload FRR
|
||||
tags: frr
|
||||
|
||||
- name: configure FRR
|
||||
ansible.builtin.template:
|
||||
src: templates/frr.conf.j2
|
||||
dest: /etc/frr/frr.conf
|
||||
notify: reload FRR
|
||||
tags: frr
|
||||
|
||||
handlers:
|
||||
- name: reload FRR
|
||||
ansible.builtin.systemd_service:
|
||||
name: frr.service
|
||||
state: reloaded
|
7
proxmox/host_vars/pve90.yaml
Normal file
7
proxmox/host_vars/pve90.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
ceph_mesh:
|
||||
address: fdcc:a182:4fed::90/128
|
||||
openfabric_net: 49.0000.0000.0090.00
|
||||
interfaces:
|
||||
- enp161s0
|
||||
- enp161s0d1
|
7
proxmox/host_vars/pve91.yaml
Normal file
7
proxmox/host_vars/pve91.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
ceph_mesh:
|
||||
address: fdcc:a182:4fed::91/128
|
||||
openfabric_net: 49.0000.0000.0091.00
|
||||
interfaces:
|
||||
- enp161s0
|
||||
- enp161s0d1
|
7
proxmox/host_vars/pve92.yaml
Normal file
7
proxmox/host_vars/pve92.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
ceph_mesh:
|
||||
address: fdcc:a182:4fed::92/128
|
||||
openfabric_net: 49.0000.0000.0092.00
|
||||
interfaces:
|
||||
- enp161s0
|
||||
- enp161s0d1
|
7
proxmox/host_vars/pve93.yaml
Normal file
7
proxmox/host_vars/pve93.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
ceph_mesh:
|
||||
address: fdcc:a182:4fed::93/128
|
||||
openfabric_net: 49.0000.0000.0093.00
|
||||
interfaces:
|
||||
- enp161s0
|
||||
- enp161s0d1
|
15
proxmox/templates/ceph.interfaces.j2
Normal file
15
proxmox/templates/ceph.interfaces.j2
Normal file
@@ -0,0 +1,15 @@
|
||||
# {{ ansible_managed }}
|
||||
# ifupdown interfaces(5) file for setting up CEPH network
|
||||
|
||||
# Real routing is handled by FRR routing daemon
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
up ip -6 addr add {{ ceph_mesh.address }} dev lo
|
||||
|
||||
{% for iface in ceph_mesh.interfaces %}
|
||||
auto {{ iface }}
|
||||
iface {{ iface }} inet6 static
|
||||
pre-up ip link set $IFACE up
|
||||
mtu 9000
|
||||
|
||||
{% endfor %}
|
30
proxmox/templates/frr.conf.j2
Normal file
30
proxmox/templates/frr.conf.j2
Normal file
@@ -0,0 +1,30 @@
|
||||
# {{ ansible_managed }}
|
||||
frr version 10.2.2
|
||||
frr defaults traditional
|
||||
hostname {{ inventory_hostname }}
|
||||
log syslog informational
|
||||
no ip forwarding
|
||||
service integrated-vtysh-config
|
||||
!
|
||||
{% for iface in ceph_mesh.interfaces %}
|
||||
interface {{ iface }}
|
||||
ipv6 router openfabric 1
|
||||
openfabric csnp-interval 5
|
||||
openfabric hello-interval 1
|
||||
openfabric hello-multiplier 3
|
||||
exit
|
||||
!
|
||||
{% endfor %}
|
||||
interface lo
|
||||
ipv6 router openfabric 1
|
||||
openfabric csnp-interval 5
|
||||
openfabric hello-interval 1
|
||||
openfabric hello-multiplier 3
|
||||
openfabric passive
|
||||
exit
|
||||
!
|
||||
router openfabric 1
|
||||
net {{ ceph_mesh.openfabric_net }}
|
||||
lsp-gen-interval 5
|
||||
exit
|
||||
!
|
Reference in New Issue
Block a user