Compare commits

..

22 Commits

Author SHA1 Message Date
e89d045f38 goredirect: add nopath env var 2024-09-13 21:54:49 +03:00
7e70315514 monitoring: Fix snmp-exporter 2024-09-12 22:15:10 +03:00
af5a048bcd replace ups 2024-09-12 21:54:46 +03:00
0005219f81 monitoring: Fix mikrotik-exporter formatting 2024-09-12 21:48:43 +03:00
813bb32e48 monitoring: Update UPS-es 2024-09-12 21:47:20 +03:00
0efae7baf9 unschedule harbor from storage nodes 2024-09-12 19:48:51 +03:00
be90b4e266 monitoring: Update mikrotik-exporter 2024-09-09 22:19:46 +03:00
999d17c384 rosdump: Use codemowers/git image 2024-09-09 08:45:21 +03:00
bacef8d438 remove logmower 2024-09-08 23:54:32 +03:00
60d1ba9b18 monitoring: Bump mikrotik-exporter again 2024-09-06 12:10:45 +03:00
dcb80e6638 monitoring: Bump mikrotik-exporter 2024-09-06 11:55:49 +03:00
95e0f97db2 grafana: Specify OIDC scopes on Grafana side 2024-09-05 09:32:34 +03:00
f5a7b44ae6 grafana: Add groups OIDC scope 2024-09-05 09:29:16 +03:00
be7e1d9459 grafana: Assign editor role for hackerspace members 2024-09-05 09:23:41 +03:00
cd807ebcde grafana: Allow OIDC assignment to admin role 2024-09-05 09:04:02 +03:00
eaac7f61a7 monitoring: Pin specific mikrotik-exporter image 2024-09-04 23:29:37 +03:00
a0d5a585e4 add and configure calico ippool 2024-09-04 23:12:35 +03:00
1f8f288f95 monitoring: Update Mikrotik exporter 2024-09-04 22:33:15 +03:00
9de1881647 monitoring: Enable Prometheus admin API 2024-09-04 22:28:01 +03:00
28904cdd63 make calico use ipip encapsulation 2024-09-04 22:27:36 +03:00
0df188db36 monitoring: README syntax fix 2024-09-04 07:12:56 +03:00
a42b79b5ac monitoring: Add doc.crds.dev ref 2024-09-04 07:12:21 +03:00
13 changed files with 90 additions and 542 deletions

View File

@ -1,20 +0,0 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: logmower
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: logmower
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: logmower
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -18,6 +18,7 @@ spec:
availableScopes:
- openid
- profile
- groups
tokenEndpointAuthMethod: none
---
apiVersion: v1
@ -53,10 +54,11 @@ data:
name = OAuth
icon = signin
enabled = true
empty_scopes = false
scopes = openid profile groups
allow_sign_up = true
use_pkce = true
role_attribute_path = contains(groups[*], 'k-space:kubernetes:admins') && 'Admin' || 'Viewer'
role_attribute_path = contains(groups[*], 'k-space:kubernetes:admins') && 'Admin' || contains(groups[*], 'k-space:floor') && 'Editor' || Viewer
allow_assign_grafana_admin = true
[security]
disable_initial_admin_creation = true
---

View File

@ -34,6 +34,8 @@ spec:
value: https://inventory.k-space.ee/m/inventory/add-slug/%s
- name: GOREDIRECT_FOUND
value: https://inventory.k-space.ee/m/inventory/%s/view
- name: GOREDIRECT_NOPATH
value: https://inventory.k-space.ee/m/inventory
- name: MONGO_URI
valueFrom:
secretKeyRef:

View File

@ -730,8 +730,8 @@ spec:
app.kubernetes.io/component: core
annotations:
checksum/configmap: 9ea7f1881e4fe5b908355ee28e246b67c8c498d2f719dd74a5536a51ee2d9865
checksum/secret: 7827f00e118d39ccc4caad6df2df2125a0cef6b6ad9583cb30a6b17e62e1b934
checksum/secret-jobservice: f6fcc2a7c9a0224eefa0b4ed2deed3fb22335c417d5645067efdc1341de26bc7
checksum/secret: 0d2219f91d2afe8594c0136b9b35ea5048724958d8c76a501028f770b34398df
checksum/secret-jobservice: 555460412a789ff6b5f107e7a44d6deb7ce9d069b97350b3e9e088e4e5d15330
spec:
securityContext:
runAsUser: 10000
@ -830,13 +830,6 @@ spec:
secret:
- name: psc
emptyDir: {}
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
---
# Source: harbor/templates/exporter/exporter-dpl.yaml
apiVersion: apps/v1
@ -974,8 +967,8 @@ spec:
annotations:
checksum/configmap: 3a35bef831e58536bf86670117b43e2913a4c1a60d0e74d948559d7a7d564684
checksum/configmap-env: 80e8b81abf755707210d6112ad65167a7d53088b209f63c603d308ef68c4cfad
checksum/secret: 35297960a512675e6dcdff9d387587916f748c2c2ca2b5b8e5cbe5853488971b
checksum/secret-core: 72ed9c9917dd47ba68b05273e113792198afa5e09a696689e1063fbaffc80034
checksum/secret: 66cf8ec37ca1e006ea224e0913c9deb407300393d221fe0564dee44e6b0174cd
checksum/secret-core: a4bf7ecaeb201e06638a18b9e941a4b0e66668e484d6084fd1844d2c25a6492c
spec:
securityContext:
runAsUser: 10000
@ -1036,13 +1029,6 @@ spec:
- name: job-logs
persistentVolumeClaim:
claimName: harbor-jobservice
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
---
# Source: harbor/templates/nginx/deployment.yaml
apiVersion: apps/v1
@ -1133,13 +1119,6 @@ spec:
- name: certificate
secret:
secretName: harbor-ingress
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
---
# Source: harbor/templates/portal/deployment.yaml
apiVersion: apps/v1
@ -1224,13 +1203,6 @@ spec:
- name: portal-config
configMap:
name: "harbor-portal"
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
---
# Source: harbor/templates/registry/registry-dpl.yaml
apiVersion: apps/v1
@ -1275,9 +1247,9 @@ spec:
app.kubernetes.io/component: registry
annotations:
checksum/configmap: b6973055b0a56022c00f9460283665c292d00f4ec15c0b36ae334781fd72ff93
checksum/secret: b246f895959725e4164cb10bc8c1c5d4d50618736c48129c8ee233b126164339
checksum/secret-jobservice: 37d8a246aaaed2ca66ea98c8e6b0fd875de5cb0cf2660abd7bda98fa6d630ccb
checksum/secret-core: a3deaec6a79903eb0619162ab91a87581ae2da37bc3f894792a2f48912a2b7c8
checksum/secret: fbad596b28ac7eacc5280d30c332e45f389746bc7bd4fe312d81d20d787aa608
checksum/secret-jobservice: 50e965ac72128c882e5371663c8a24d54936984ec4596ee0beb3f5a35708571e
checksum/secret-core: f16bee9ef108e28e08e2d059c96c79edefb3daeb36709e49be6d0a9971247651
spec:
securityContext:
runAsUser: 10000
@ -1403,13 +1375,6 @@ spec:
name: "harbor-registry"
- name: registry-data
emptyDir: {}
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
---
# Source: harbor/templates/metrics/metrics-svcmon.yaml
apiVersion: monitoring.coreos.com/v1

View File

@ -143,49 +143,3 @@ redis:
addr: "dragonfly:6379"
username: ""
password: "MvYcuU0RaIu1SX7fY1m1JrgLUSaZJjge"
nginx:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
portal:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
core:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
jobservice:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
registry:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule

View File

@ -1,382 +0,0 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: frontend
spec:
displayName: Kubernetes pod log aggregator
uri: 'https://log.k-space.ee'
allowedGroups:
- k-space:kubernetes:developers
- k-space:kubernetes:admins
headerMapping:
email: Remote-Email
groups: Remote-Groups
name: Remote-Name
user: Remote-Username
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: logmower-shipper
spec:
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 50%
selector:
matchLabels:
app: logmower-shipper
template:
metadata:
labels:
app: logmower-shipper
spec:
serviceAccountName: logmower-shipper
containers:
- name: logmower-shipper
image: logmower/shipper:latest
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MONGO_URI
valueFrom:
secretKeyRef:
name: logmower-mongodb-application-readwrite
key: connectionString.standard
ports:
- containerPort: 8000
name: metrics
securityContext:
readOnlyRootFilesystem: true
command:
- /app/log_shipper.py
- --parse-json
- --normalize-log-level
- --stream-to-log-level
- --merge-top-level
- --max-collection-size
- "10000000000"
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: etcmachineid
mountPath: /etc/machine-id
readOnly: true
- name: varlog
mountPath: /var/log
readOnly: true
volumes:
- name: etcmachineid
hostPath:
path: /etc/machine-id
- name: varlog
hostPath:
path: /var/log
tolerations:
- operator: "Exists"
effect: "NoSchedule"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: logging-logmower-shipper
subjects:
- kind: ServiceAccount
name: logmower-shipper
namespace: logmower
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: logmower-shipper
labels:
app: logmower-shipper
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-shipper
spec:
podSelector:
matchLabels:
app: logmower-shipper
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: monitoring
podSelector:
matchLabels:
app.kubernetes.io/name: prometheus
egress:
- to:
- podSelector:
matchLabels:
app: logmower-mongodb-svc
ports:
- port: 27017
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-eventsource
spec:
podSelector:
matchLabels:
app: logmower-eventsource
policyTypes:
- Ingress
- Egress
egress:
- to:
- podSelector:
matchLabels:
app: logmower-mongodb-svc
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-frontend
spec:
podSelector:
matchLabels:
app: logmower-frontend
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: logmower-shipper
spec:
selector:
matchLabels:
app: logmower-shipper
podMetricsEndpoints:
- port: metrics
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: logmower-shipper
spec:
groups:
- name: logmower-shipper
rules:
- alert: LogmowerSingleInsertionErrors
annotations:
summary: Logmower shipper is having issues submitting log records
to database
expr: rate(logmower_insertion_error_count_total[30m]) > 0
for: 0m
labels:
severity: warning
- alert: LogmowerBulkInsertionErrors
annotations:
summary: Logmower shipper is having issues submitting log records
to database
expr: rate(logmower_bulk_insertion_error_count_total[30m]) > 0
for: 0m
labels:
severity: warning
- alert: LogmowerHighDatabaseLatency
annotations:
summary: Database operations are slow
expr: histogram_quantile(0.95, logmower_database_operation_latency_bucket) > 10
for: 1m
labels:
severity: warning
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: logmower
annotations:
kubernetes.io/ingress.class: traefik
cert-manager.io/cluster-issuer: default
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: logmower-frontend@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: log.k-space.ee
http:
paths:
- pathType: Prefix
path: "/events"
backend:
service:
name: logmower-eventsource
port:
number: 3002
- pathType: Prefix
path: "/"
backend:
service:
name: logmower-frontend
port:
number: 8080
tls:
- hosts:
- "*.k-space.ee"
---
apiVersion: v1
kind: Service
metadata:
name: logmower-eventsource
spec:
type: ClusterIP
selector:
app: logmower-eventsource
ports:
- protocol: TCP
port: 3002
---
apiVersion: v1
kind: Service
metadata:
name: logmower-frontend
spec:
type: ClusterIP
selector:
app: logmower-frontend
ports:
- protocol: TCP
port: 8080
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logmower-frontend
spec:
selector:
matchLabels:
app: logmower-frontend
template:
metadata:
labels:
app: logmower-frontend
spec:
containers:
- name: logmower-frontend
image: logmower/frontend:latest
ports:
- containerPort: 8080
name: http
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
resources:
limits:
memory: 50Mi
requests:
cpu: 1m
memory: 20Mi
volumeMounts:
- name : nginx-cache
mountPath: /var/cache/nginx/
- name : nginx-config
mountPath: /var/config/nginx/
- name: var-run
mountPath: /var/run/
volumes:
- emptyDir: {}
name: nginx-cache
- emptyDir: {}
name: nginx-config
- emptyDir: {}
name: var-run
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logmower-eventsource
spec:
selector:
matchLabels:
app: logmower-eventsource
template:
metadata:
labels:
app: logmower-eventsource
spec:
containers:
- name: logmower-eventsource
image: logmower/eventsource:latest
ports:
- containerPort: 3002
name: nodejs
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
resources:
limits:
cpu: 500m
memory: 200Mi
requests:
cpu: 10m
memory: 100Mi
env:
- name: MONGODB_HOST
valueFrom:
secretKeyRef:
name: logmower-mongodb-application-readonly
key: connectionString.standard
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-mongodb
spec:
podSelector:
matchLabels:
app: logmower-mongodb-svc
policyTypes:
- Ingress
- Egress
ingress:
- from:
- podSelector: {}
ports:
- port: 27017
egress:
- to:
- podSelector:
matchLabels:
app: logmower-mongodb-svc
ports:
- port: 27017

View File

@ -1 +0,0 @@
../shared/networkpolicy-base.yml

View File

@ -32,6 +32,9 @@ Sample queries:
* [Disk space left](https://prom.k-space.ee/graph?g0.range_input=1h&g0.expr=node_filesystem_avail_bytes&g0.tab=1)
* Minio [s3 egress](https://prom.k-space.ee/graph?g0.expr=rate(minio_s3_traffic_sent_bytes%5B3m%5D)&g0.tab=0&g0.display_mode=lines&g0.show_exemplars=0&g0.range_input=6h), [internode egress](https://prom.k-space.ee/graph?g0.expr=rate(minio_inter_node_traffic_sent_bytes%5B2m%5D)&g0.tab=0&g0.display_mode=lines&g0.show_exemplars=0&g0.range_input=6h), [storage used](https://prom.k-space.ee/graph?g0.expr=minio_node_disk_used_bytes&g0.tab=0&g0.display_mode=lines&g0.show_exemplars=0&g0.range_input=6h)
Another useful tool for exploring Prometheus operator custom resources is
[doc.crds.dev/github.com/prometheus-operator/prometheus-operator](https://doc.crds.dev/github.com/prometheus-operator/prometheus-operator@v0.75.0)
# For administrators
To reconfigure SNMP targets etc:
@ -52,7 +55,14 @@ To set Mikrotik secrets:
```
kubectl create -n monitoring secret generic mikrotik-exporter \
--from-literal=MIKROTIK_PASSWORD='f7W!H*Pu' \
--from-literal=PROMETHEUS_BEARER_TOKEN=$(cat /dev/urandom | base64 | head -c 30)
--from-literal=username=netpoller \
--from-literal=password=...
```
To wipe timeseries:
```
for replica in $(seq 0 2); do
kubectl exec -n monitoring prometheus-prometheus-$replica -- wget --post-data='match[]={__name__=~"mikrotik_.*"}' http://127.0.0.1:9090/api/v1/admin/tsdb/delete_series -O -
done
```

View File

@ -4,25 +4,29 @@ kind: Probe
metadata:
name: mikrotik
spec:
bearerTokenSecret:
name: mikrotik-exporter
key: PROMETHEUS_BEARER_TOKEN
basicAuth:
username:
name: mikrotik-exporter
key: username
password:
name: mikrotik-exporter
key: password
prober:
path: /metrics
url: mikrotik-exporter
module: full
targets:
staticConfig:
static:
- router.mgmt.k-space.ee
- sw_chaos.mgmt.k-space.ee
- sw_poe.mgmt.k-space.ee
- sw_mgmt.mgmt.k-space.ee
- sw_core02.mgmt.k-space.ee
- sw_cyber.mgmt.k-space.ee
- sw_ha.mgmt.k-space.ee
- sw_asocial.mgmt.k-space.ee
- sw_kitchen.mgmt.k-space.ee
- sw_core01.mgmt.k-space.ee
- 172.23.0.1
- 172.23.0.100
- 100.102.1.111
- 100.102.1.112
- 100.102.1.114
- 100.102.1.115
- 100.102.1.121
- 100.102.1.131
- 100.102.1.141
- 100.102.1.151
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
@ -32,22 +36,30 @@ spec:
groups:
- name: mikrotik
rules:
- alert: MikrotikUplinkRedundancyLost
expr: mikrotik_interface_running{port=~"sfp-sfpplus[12]", instance!~"sw_core.*", instance!~"sw_mgmt.*"} == 0
for: 0m
- alert: MikrotikBondRedundancyLost
expr: mikrotik_bond_port_active == 0
for: 2m
labels:
severity: error
annotations:
summary: Switch uplink high availability lost
description: One of the two 10Gb optical links is malfunctioning
description: One of the two bonds has inactive member interface
- alert: MikrotikLinkRateDegraded
expr: mikrotik_interface_rate{port=~"sfp-sfpplus.*"} < 10000000000
for: 0m
expr: mikrotik_interface_link_rate_bps{interface=~"sfp-sfpplus.*"} < 10000000000
for: 2m
labels:
severity: error
annotations:
summary: 10Gb link degraded
description: One of the 10Gb links is running at lower speed
summary: SFP+ link degraded
description: One of the SFP+ (10G) links is running at lower speed
- alert: MikrotikLinkRateDegraded
expr: mikrotik_interface_link_rate_bps{interface=~"qsfpplus.*"} < 40000000000
for: 2m
labels:
severity: error
annotations:
summary: QSFP+ link degraded
description: One of the QSFP+ (40G) links is running at lower speed
---
apiVersion: apps/v1
kind: Deployment
@ -63,20 +75,10 @@ spec:
metadata:
labels:
app: mikrotik-exporter
annotations:
co.elastic.logs/multiline.pattern: '^ '
co.elastic.logs/multiline.negate: "false"
co.elastic.logs/multiline.match: after
spec:
containers:
- name: mikrotik-exporter
image: mirror.gcr.io/codemowers/mikrotik-exporter:latest
env:
- name: MIKROTIK_USER
value: netpoller
envFrom:
- secretRef:
name: mikrotik-exporter
- name: mikrotik-exporter
image: mirror.gcr.io/codemowers/mikrotik-exporter:latest@sha256:3148c06a90b9836598695645b85c8514fa67ac9e7c644b3daf4853577fce0efb
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
@ -94,13 +96,13 @@ spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- mikrotik-exporter
topologyKey: "kubernetes.io/hostname"
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- mikrotik-exporter
topologyKey: "kubernetes.io/hostname"
---
kind: Service
apiVersion: v1

View File

@ -17,6 +17,7 @@ metadata:
name: prometheus
namespace: monitoring
spec:
enableAdminAPI: true
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone

View File

@ -86,8 +86,8 @@ spec:
staticConfig:
static:
- ups-4.mgmt.k-space.ee
- ups-6.mgmt.k-space.ee
- ups-7.mgmt.k-space.ee
- ups-8.mgmt.k-space.ee
- ups-9.mgmt.k-space.ee
---
apiVersion: monitoring.coreos.com/v1

View File

@ -21,7 +21,7 @@ data:
ssh rosdump@$target '/export' | grep -v '^# serial number =' | grep -v '^#.* by RouterOS' > $target
git add $target
done
if [[ `git status --porcelain` ]]; then
if [[ `git status --porcelain` ]]; then
echo "Attempting Git check in"
git commit -m "$(git diff --cached --shortstat)"
git push
@ -62,8 +62,8 @@ spec:
restartPolicy: OnFailure
containers:
- name: rosdump
image: harbor.k-space.ee/k-space/microscript-base
imagePullPolicy: Always
image: codemowers/git
imagePullPolicy: IfNotPresent
args:
- bash
- /config/script.sh

View File

@ -11,9 +11,23 @@ spec:
ipPools:
- blockSize: 26
cidr: 10.244.0.0/16
encapsulation: VXLANCrossSubnet
natOutgoing: Disabled
nodeSelector: all()
nodeSelector: all()
---
apiVersion: crd.projectcalico.org/v1
kind: IPPool
metadata:
name: default-ipv4-ippool
spec:
allowedUses:
- Workload
- Tunnel
blockSize: 26
cidr: 10.244.0.0/16
ipipMode: Always
nodeSelector: all()
vxlanMode: Never
natOutgoing: True
---
# This section configures the Calico API server.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
@ -52,6 +66,7 @@ spec:
- cidr: 193.40.103.39/32
- cidr: 62.65.250.36/32
- cidr: 62.65.250.37/32
- cidr: 62.65.250.38/32
- cidr: 62.65.250.2/32
- cidr: 193.40.103.25/32
---