Compare commits

..

No commits in common. "master" and "master" have entirely different histories.

13 changed files with 542 additions and 90 deletions

View File

@ -0,0 +1,20 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: logmower
namespace: argocd
spec:
project: k-space.ee
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: logmower
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: logmower
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -18,7 +18,6 @@ spec:
availableScopes: availableScopes:
- openid - openid
- profile - profile
- groups
tokenEndpointAuthMethod: none tokenEndpointAuthMethod: none
--- ---
apiVersion: v1 apiVersion: v1
@ -54,11 +53,10 @@ data:
name = OAuth name = OAuth
icon = signin icon = signin
enabled = true enabled = true
scopes = openid profile groups empty_scopes = false
allow_sign_up = true allow_sign_up = true
use_pkce = true use_pkce = true
role_attribute_path = contains(groups[*], 'k-space:kubernetes:admins') && 'Admin' || contains(groups[*], 'k-space:floor') && 'Editor' || Viewer role_attribute_path = contains(groups[*], 'k-space:kubernetes:admins') && 'Admin' || 'Viewer'
allow_assign_grafana_admin = true
[security] [security]
disable_initial_admin_creation = true disable_initial_admin_creation = true
--- ---

View File

@ -34,8 +34,6 @@ spec:
value: https://inventory.k-space.ee/m/inventory/add-slug/%s value: https://inventory.k-space.ee/m/inventory/add-slug/%s
- name: GOREDIRECT_FOUND - name: GOREDIRECT_FOUND
value: https://inventory.k-space.ee/m/inventory/%s/view value: https://inventory.k-space.ee/m/inventory/%s/view
- name: GOREDIRECT_NOPATH
value: https://inventory.k-space.ee/m/inventory
- name: MONGO_URI - name: MONGO_URI
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:

View File

@ -730,8 +730,8 @@ spec:
app.kubernetes.io/component: core app.kubernetes.io/component: core
annotations: annotations:
checksum/configmap: 9ea7f1881e4fe5b908355ee28e246b67c8c498d2f719dd74a5536a51ee2d9865 checksum/configmap: 9ea7f1881e4fe5b908355ee28e246b67c8c498d2f719dd74a5536a51ee2d9865
checksum/secret: 0d2219f91d2afe8594c0136b9b35ea5048724958d8c76a501028f770b34398df checksum/secret: 7827f00e118d39ccc4caad6df2df2125a0cef6b6ad9583cb30a6b17e62e1b934
checksum/secret-jobservice: 555460412a789ff6b5f107e7a44d6deb7ce9d069b97350b3e9e088e4e5d15330 checksum/secret-jobservice: f6fcc2a7c9a0224eefa0b4ed2deed3fb22335c417d5645067efdc1341de26bc7
spec: spec:
securityContext: securityContext:
runAsUser: 10000 runAsUser: 10000
@ -830,6 +830,13 @@ spec:
secret: secret:
- name: psc - name: psc
emptyDir: {} emptyDir: {}
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
--- ---
# Source: harbor/templates/exporter/exporter-dpl.yaml # Source: harbor/templates/exporter/exporter-dpl.yaml
apiVersion: apps/v1 apiVersion: apps/v1
@ -967,8 +974,8 @@ spec:
annotations: annotations:
checksum/configmap: 3a35bef831e58536bf86670117b43e2913a4c1a60d0e74d948559d7a7d564684 checksum/configmap: 3a35bef831e58536bf86670117b43e2913a4c1a60d0e74d948559d7a7d564684
checksum/configmap-env: 80e8b81abf755707210d6112ad65167a7d53088b209f63c603d308ef68c4cfad checksum/configmap-env: 80e8b81abf755707210d6112ad65167a7d53088b209f63c603d308ef68c4cfad
checksum/secret: 66cf8ec37ca1e006ea224e0913c9deb407300393d221fe0564dee44e6b0174cd checksum/secret: 35297960a512675e6dcdff9d387587916f748c2c2ca2b5b8e5cbe5853488971b
checksum/secret-core: a4bf7ecaeb201e06638a18b9e941a4b0e66668e484d6084fd1844d2c25a6492c checksum/secret-core: 72ed9c9917dd47ba68b05273e113792198afa5e09a696689e1063fbaffc80034
spec: spec:
securityContext: securityContext:
runAsUser: 10000 runAsUser: 10000
@ -1029,6 +1036,13 @@ spec:
- name: job-logs - name: job-logs
persistentVolumeClaim: persistentVolumeClaim:
claimName: harbor-jobservice claimName: harbor-jobservice
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
--- ---
# Source: harbor/templates/nginx/deployment.yaml # Source: harbor/templates/nginx/deployment.yaml
apiVersion: apps/v1 apiVersion: apps/v1
@ -1119,6 +1133,13 @@ spec:
- name: certificate - name: certificate
secret: secret:
secretName: harbor-ingress secretName: harbor-ingress
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
--- ---
# Source: harbor/templates/portal/deployment.yaml # Source: harbor/templates/portal/deployment.yaml
apiVersion: apps/v1 apiVersion: apps/v1
@ -1203,6 +1224,13 @@ spec:
- name: portal-config - name: portal-config
configMap: configMap:
name: "harbor-portal" name: "harbor-portal"
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
--- ---
# Source: harbor/templates/registry/registry-dpl.yaml # Source: harbor/templates/registry/registry-dpl.yaml
apiVersion: apps/v1 apiVersion: apps/v1
@ -1247,9 +1275,9 @@ spec:
app.kubernetes.io/component: registry app.kubernetes.io/component: registry
annotations: annotations:
checksum/configmap: b6973055b0a56022c00f9460283665c292d00f4ec15c0b36ae334781fd72ff93 checksum/configmap: b6973055b0a56022c00f9460283665c292d00f4ec15c0b36ae334781fd72ff93
checksum/secret: fbad596b28ac7eacc5280d30c332e45f389746bc7bd4fe312d81d20d787aa608 checksum/secret: b246f895959725e4164cb10bc8c1c5d4d50618736c48129c8ee233b126164339
checksum/secret-jobservice: 50e965ac72128c882e5371663c8a24d54936984ec4596ee0beb3f5a35708571e checksum/secret-jobservice: 37d8a246aaaed2ca66ea98c8e6b0fd875de5cb0cf2660abd7bda98fa6d630ccb
checksum/secret-core: f16bee9ef108e28e08e2d059c96c79edefb3daeb36709e49be6d0a9971247651 checksum/secret-core: a3deaec6a79903eb0619162ab91a87581ae2da37bc3f894792a2f48912a2b7c8
spec: spec:
securityContext: securityContext:
runAsUser: 10000 runAsUser: 10000
@ -1375,6 +1403,13 @@ spec:
name: "harbor-registry" name: "harbor-registry"
- name: registry-data - name: registry-data
emptyDir: {} emptyDir: {}
nodeSelector:
dedicated: storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: storage
--- ---
# Source: harbor/templates/metrics/metrics-svcmon.yaml # Source: harbor/templates/metrics/metrics-svcmon.yaml
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1

View File

@ -143,3 +143,49 @@ redis:
addr: "dragonfly:6379" addr: "dragonfly:6379"
username: "" username: ""
password: "MvYcuU0RaIu1SX7fY1m1JrgLUSaZJjge" password: "MvYcuU0RaIu1SX7fY1m1JrgLUSaZJjge"
nginx:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
portal:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
core:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
jobservice:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
registry:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule

382
logmower/application.yml Normal file
View File

@ -0,0 +1,382 @@
---
apiVersion: codemowers.cloud/v1beta1
kind: OIDCMiddlewareClient
metadata:
name: frontend
spec:
displayName: Kubernetes pod log aggregator
uri: 'https://log.k-space.ee'
allowedGroups:
- k-space:kubernetes:developers
- k-space:kubernetes:admins
headerMapping:
email: Remote-Email
groups: Remote-Groups
name: Remote-Name
user: Remote-Username
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: logmower-shipper
spec:
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 50%
selector:
matchLabels:
app: logmower-shipper
template:
metadata:
labels:
app: logmower-shipper
spec:
serviceAccountName: logmower-shipper
containers:
- name: logmower-shipper
image: logmower/shipper:latest
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MONGO_URI
valueFrom:
secretKeyRef:
name: logmower-mongodb-application-readwrite
key: connectionString.standard
ports:
- containerPort: 8000
name: metrics
securityContext:
readOnlyRootFilesystem: true
command:
- /app/log_shipper.py
- --parse-json
- --normalize-log-level
- --stream-to-log-level
- --merge-top-level
- --max-collection-size
- "10000000000"
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: etcmachineid
mountPath: /etc/machine-id
readOnly: true
- name: varlog
mountPath: /var/log
readOnly: true
volumes:
- name: etcmachineid
hostPath:
path: /etc/machine-id
- name: varlog
hostPath:
path: /var/log
tolerations:
- operator: "Exists"
effect: "NoSchedule"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: logging-logmower-shipper
subjects:
- kind: ServiceAccount
name: logmower-shipper
namespace: logmower
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: logmower-shipper
labels:
app: logmower-shipper
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-shipper
spec:
podSelector:
matchLabels:
app: logmower-shipper
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: monitoring
podSelector:
matchLabels:
app.kubernetes.io/name: prometheus
egress:
- to:
- podSelector:
matchLabels:
app: logmower-mongodb-svc
ports:
- port: 27017
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-eventsource
spec:
podSelector:
matchLabels:
app: logmower-eventsource
policyTypes:
- Ingress
- Egress
egress:
- to:
- podSelector:
matchLabels:
app: logmower-mongodb-svc
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-frontend
spec:
podSelector:
matchLabels:
app: logmower-frontend
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: logmower-shipper
spec:
selector:
matchLabels:
app: logmower-shipper
podMetricsEndpoints:
- port: metrics
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: logmower-shipper
spec:
groups:
- name: logmower-shipper
rules:
- alert: LogmowerSingleInsertionErrors
annotations:
summary: Logmower shipper is having issues submitting log records
to database
expr: rate(logmower_insertion_error_count_total[30m]) > 0
for: 0m
labels:
severity: warning
- alert: LogmowerBulkInsertionErrors
annotations:
summary: Logmower shipper is having issues submitting log records
to database
expr: rate(logmower_bulk_insertion_error_count_total[30m]) > 0
for: 0m
labels:
severity: warning
- alert: LogmowerHighDatabaseLatency
annotations:
summary: Database operations are slow
expr: histogram_quantile(0.95, logmower_database_operation_latency_bucket) > 10
for: 1m
labels:
severity: warning
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: logmower
annotations:
kubernetes.io/ingress.class: traefik
cert-manager.io/cluster-issuer: default
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: logmower-frontend@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: log.k-space.ee
http:
paths:
- pathType: Prefix
path: "/events"
backend:
service:
name: logmower-eventsource
port:
number: 3002
- pathType: Prefix
path: "/"
backend:
service:
name: logmower-frontend
port:
number: 8080
tls:
- hosts:
- "*.k-space.ee"
---
apiVersion: v1
kind: Service
metadata:
name: logmower-eventsource
spec:
type: ClusterIP
selector:
app: logmower-eventsource
ports:
- protocol: TCP
port: 3002
---
apiVersion: v1
kind: Service
metadata:
name: logmower-frontend
spec:
type: ClusterIP
selector:
app: logmower-frontend
ports:
- protocol: TCP
port: 8080
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logmower-frontend
spec:
selector:
matchLabels:
app: logmower-frontend
template:
metadata:
labels:
app: logmower-frontend
spec:
containers:
- name: logmower-frontend
image: logmower/frontend:latest
ports:
- containerPort: 8080
name: http
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
resources:
limits:
memory: 50Mi
requests:
cpu: 1m
memory: 20Mi
volumeMounts:
- name : nginx-cache
mountPath: /var/cache/nginx/
- name : nginx-config
mountPath: /var/config/nginx/
- name: var-run
mountPath: /var/run/
volumes:
- emptyDir: {}
name: nginx-cache
- emptyDir: {}
name: nginx-config
- emptyDir: {}
name: var-run
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logmower-eventsource
spec:
selector:
matchLabels:
app: logmower-eventsource
template:
metadata:
labels:
app: logmower-eventsource
spec:
containers:
- name: logmower-eventsource
image: logmower/eventsource:latest
ports:
- containerPort: 3002
name: nodejs
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
resources:
limits:
cpu: 500m
memory: 200Mi
requests:
cpu: 10m
memory: 100Mi
env:
- name: MONGODB_HOST
valueFrom:
secretKeyRef:
name: logmower-mongodb-application-readonly
key: connectionString.standard
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: logmower-mongodb
spec:
podSelector:
matchLabels:
app: logmower-mongodb-svc
policyTypes:
- Ingress
- Egress
ingress:
- from:
- podSelector: {}
ports:
- port: 27017
egress:
- to:
- podSelector:
matchLabels:
app: logmower-mongodb-svc
ports:
- port: 27017

View File

@ -0,0 +1 @@
../shared/networkpolicy-base.yml

View File

@ -32,9 +32,6 @@ Sample queries:
* [Disk space left](https://prom.k-space.ee/graph?g0.range_input=1h&g0.expr=node_filesystem_avail_bytes&g0.tab=1) * [Disk space left](https://prom.k-space.ee/graph?g0.range_input=1h&g0.expr=node_filesystem_avail_bytes&g0.tab=1)
* Minio [s3 egress](https://prom.k-space.ee/graph?g0.expr=rate(minio_s3_traffic_sent_bytes%5B3m%5D)&g0.tab=0&g0.display_mode=lines&g0.show_exemplars=0&g0.range_input=6h), [internode egress](https://prom.k-space.ee/graph?g0.expr=rate(minio_inter_node_traffic_sent_bytes%5B2m%5D)&g0.tab=0&g0.display_mode=lines&g0.show_exemplars=0&g0.range_input=6h), [storage used](https://prom.k-space.ee/graph?g0.expr=minio_node_disk_used_bytes&g0.tab=0&g0.display_mode=lines&g0.show_exemplars=0&g0.range_input=6h) * Minio [s3 egress](https://prom.k-space.ee/graph?g0.expr=rate(minio_s3_traffic_sent_bytes%5B3m%5D)&g0.tab=0&g0.display_mode=lines&g0.show_exemplars=0&g0.range_input=6h), [internode egress](https://prom.k-space.ee/graph?g0.expr=rate(minio_inter_node_traffic_sent_bytes%5B2m%5D)&g0.tab=0&g0.display_mode=lines&g0.show_exemplars=0&g0.range_input=6h), [storage used](https://prom.k-space.ee/graph?g0.expr=minio_node_disk_used_bytes&g0.tab=0&g0.display_mode=lines&g0.show_exemplars=0&g0.range_input=6h)
Another useful tool for exploring Prometheus operator custom resources is
[doc.crds.dev/github.com/prometheus-operator/prometheus-operator](https://doc.crds.dev/github.com/prometheus-operator/prometheus-operator@v0.75.0)
# For administrators # For administrators
To reconfigure SNMP targets etc: To reconfigure SNMP targets etc:
@ -55,14 +52,7 @@ To set Mikrotik secrets:
``` ```
kubectl create -n monitoring secret generic mikrotik-exporter \ kubectl create -n monitoring secret generic mikrotik-exporter \
--from-literal=username=netpoller \ --from-literal=MIKROTIK_PASSWORD='f7W!H*Pu' \
--from-literal=password=... --from-literal=PROMETHEUS_BEARER_TOKEN=$(cat /dev/urandom | base64 | head -c 30)
``` ```
To wipe timeseries:
```
for replica in $(seq 0 2); do
kubectl exec -n monitoring prometheus-prometheus-$replica -- wget --post-data='match[]={__name__=~"mikrotik_.*"}' http://127.0.0.1:9090/api/v1/admin/tsdb/delete_series -O -
done
```

View File

@ -4,29 +4,25 @@ kind: Probe
metadata: metadata:
name: mikrotik name: mikrotik
spec: spec:
basicAuth: bearerTokenSecret:
username: name: mikrotik-exporter
name: mikrotik-exporter key: PROMETHEUS_BEARER_TOKEN
key: username
password:
name: mikrotik-exporter
key: password
prober: prober:
path: /metrics
url: mikrotik-exporter url: mikrotik-exporter
module: full
targets: targets:
staticConfig: staticConfig:
static: static:
- 172.23.0.1 - router.mgmt.k-space.ee
- 172.23.0.100 - sw_chaos.mgmt.k-space.ee
- 100.102.1.111 - sw_poe.mgmt.k-space.ee
- 100.102.1.112 - sw_mgmt.mgmt.k-space.ee
- 100.102.1.114 - sw_core02.mgmt.k-space.ee
- 100.102.1.115 - sw_cyber.mgmt.k-space.ee
- 100.102.1.121 - sw_ha.mgmt.k-space.ee
- 100.102.1.131 - sw_asocial.mgmt.k-space.ee
- 100.102.1.141 - sw_kitchen.mgmt.k-space.ee
- 100.102.1.151 - sw_core01.mgmt.k-space.ee
--- ---
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule kind: PrometheusRule
@ -36,30 +32,22 @@ spec:
groups: groups:
- name: mikrotik - name: mikrotik
rules: rules:
- alert: MikrotikBondRedundancyLost - alert: MikrotikUplinkRedundancyLost
expr: mikrotik_bond_port_active == 0 expr: mikrotik_interface_running{port=~"sfp-sfpplus[12]", instance!~"sw_core.*", instance!~"sw_mgmt.*"} == 0
for: 2m for: 0m
labels: labels:
severity: error severity: error
annotations: annotations:
summary: Switch uplink high availability lost summary: Switch uplink high availability lost
description: One of the two bonds has inactive member interface description: One of the two 10Gb optical links is malfunctioning
- alert: MikrotikLinkRateDegraded - alert: MikrotikLinkRateDegraded
expr: mikrotik_interface_link_rate_bps{interface=~"sfp-sfpplus.*"} < 10000000000 expr: mikrotik_interface_rate{port=~"sfp-sfpplus.*"} < 10000000000
for: 2m for: 0m
labels: labels:
severity: error severity: error
annotations: annotations:
summary: SFP+ link degraded summary: 10Gb link degraded
description: One of the SFP+ (10G) links is running at lower speed description: One of the 10Gb links is running at lower speed
- alert: MikrotikLinkRateDegraded
expr: mikrotik_interface_link_rate_bps{interface=~"qsfpplus.*"} < 40000000000
for: 2m
labels:
severity: error
annotations:
summary: QSFP+ link degraded
description: One of the QSFP+ (40G) links is running at lower speed
--- ---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
@ -75,10 +63,20 @@ spec:
metadata: metadata:
labels: labels:
app: mikrotik-exporter app: mikrotik-exporter
annotations:
co.elastic.logs/multiline.pattern: '^ '
co.elastic.logs/multiline.negate: "false"
co.elastic.logs/multiline.match: after
spec: spec:
containers: containers:
- name: mikrotik-exporter - name: mikrotik-exporter
image: mirror.gcr.io/codemowers/mikrotik-exporter:latest@sha256:3148c06a90b9836598695645b85c8514fa67ac9e7c644b3daf4853577fce0efb image: mirror.gcr.io/codemowers/mikrotik-exporter:latest
env:
- name: MIKROTIK_USER
value: netpoller
envFrom:
- secretRef:
name: mikrotik-exporter
topologySpreadConstraints: topologySpreadConstraints:
- maxSkew: 1 - maxSkew: 1
topologyKey: topology.kubernetes.io/zone topologyKey: topology.kubernetes.io/zone
@ -96,13 +94,13 @@ spec:
affinity: affinity:
podAntiAffinity: podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution: requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector: - labelSelector:
matchExpressions: matchExpressions:
- key: app - key: app
operator: In operator: In
values: values:
- mikrotik-exporter - mikrotik-exporter
topologyKey: "kubernetes.io/hostname" topologyKey: "kubernetes.io/hostname"
--- ---
kind: Service kind: Service
apiVersion: v1 apiVersion: v1

View File

@ -17,7 +17,6 @@ metadata:
name: prometheus name: prometheus
namespace: monitoring namespace: monitoring
spec: spec:
enableAdminAPI: true
topologySpreadConstraints: topologySpreadConstraints:
- maxSkew: 1 - maxSkew: 1
topologyKey: topology.kubernetes.io/zone topologyKey: topology.kubernetes.io/zone

View File

@ -86,8 +86,8 @@ spec:
staticConfig: staticConfig:
static: static:
- ups-4.mgmt.k-space.ee - ups-4.mgmt.k-space.ee
- ups-6.mgmt.k-space.ee
- ups-7.mgmt.k-space.ee - ups-7.mgmt.k-space.ee
- ups-8.mgmt.k-space.ee
- ups-9.mgmt.k-space.ee - ups-9.mgmt.k-space.ee
--- ---
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1

View File

@ -62,8 +62,8 @@ spec:
restartPolicy: OnFailure restartPolicy: OnFailure
containers: containers:
- name: rosdump - name: rosdump
image: codemowers/git image: harbor.k-space.ee/k-space/microscript-base
imagePullPolicy: IfNotPresent imagePullPolicy: Always
args: args:
- bash - bash
- /config/script.sh - /config/script.sh

View File

@ -11,24 +11,10 @@ spec:
ipPools: ipPools:
- blockSize: 26 - blockSize: 26
cidr: 10.244.0.0/16 cidr: 10.244.0.0/16
encapsulation: VXLANCrossSubnet
natOutgoing: Disabled natOutgoing: Disabled
nodeSelector: all() nodeSelector: all()
--- ---
apiVersion: crd.projectcalico.org/v1
kind: IPPool
metadata:
name: default-ipv4-ippool
spec:
allowedUses:
- Workload
- Tunnel
blockSize: 26
cidr: 10.244.0.0/16
ipipMode: Always
nodeSelector: all()
vxlanMode: Never
natOutgoing: True
---
# This section configures the Calico API server. # This section configures the Calico API server.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer # For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1 apiVersion: operator.tigera.io/v1
@ -66,7 +52,6 @@ spec:
- cidr: 193.40.103.39/32 - cidr: 193.40.103.39/32
- cidr: 62.65.250.36/32 - cidr: 62.65.250.36/32
- cidr: 62.65.250.37/32 - cidr: 62.65.250.37/32
- cidr: 62.65.250.38/32
- cidr: 62.65.250.2/32 - cidr: 62.65.250.2/32
- cidr: 193.40.103.25/32 - cidr: 193.40.103.25/32
--- ---