kube/prometheus-operator/application.yml

766 lines
31 KiB
YAML
Raw Normal View History

2022-09-11 13:24:35 +00:00
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: metrics
spec:
namespaceSelector: {}
selector: {}
podMetricsEndpoints:
- port: exporter
- port: metrics
---
apiVersion: monitoring.coreos.com/v1
kind: Alertmanager
metadata:
name: alertmanager
spec:
2022-09-15 18:12:33 +00:00
alertmanagerConfigSelector:
matchLabels:
alertmanagerConfig: email
2022-09-11 13:24:35 +00:00
nodeSelector:
dedicated: monitoring
tolerations:
- key: dedicated
operator: Equal
value: monitoring
effect: NoSchedule
replicas: 3
serviceAccountName: alertmanager
externalUrl: http://am.k-space.ee/
routePrefix: "/"
securityContext:
fsGroup: 2000
runAsGroup: 2000
runAsNonRoot: true
runAsUser: 1000
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: alertmanager
---
apiVersion: monitoring.coreos.com/v1
kind: Prometheus
metadata:
name: prometheus
spec:
nodeSelector:
dedicated: monitoring
tolerations:
- key: dedicated
operator: Equal
value: monitoring
effect: NoSchedule
alerting:
alertmanagers:
- namespace: prometheus-operator
name: alertmanager
port: http
pathPrefix: "/"
apiVersion: v2
externalUrl: "http://prom.k-space.ee/"
replicas: 2
shards: 1
serviceAccountName: prometheus
securityContext:
fsGroup: 2000
runAsGroup: 2000
runAsNonRoot: true
runAsUser: 1000
serviceMonitorNamespaceSelector: {}
serviceMonitorSelector: {}
podMonitorNamespaceSelector: {}
podMonitorSelector: {}
probeNamespaceSelector: {}
probeSelector: {}
ruleNamespaceSelector: {}
ruleSelector: {}
retentionSize: 80GB
storage:
volumeClaimTemplate:
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
storageClassName: local-path
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus
rules:
- apiGroups: [""]
resources:
- nodes
- nodes/metrics
- services
- endpoints
- pods
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources:
- configmaps
verbs: ["get"]
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus
subjects:
- kind: ServiceAccount
name: prometheus
namespace: prometheus-operator
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: prometheus
spec:
groups:
- name: prometheus
rules:
- alert: PrometheusJobMissing
annotations:
description: "A Prometheus job has disappeared\n VALUE = {{ $value }}\n \
\ LABELS = {{ $labels }}"
summary: Prometheus job missing (instance {{ $labels.instance }})
expr: absent(up{job="prometheus-operator/prometheus"})
for: 0m
labels:
severity: warning
- alert: PrometheusTargetMissing
annotations:
description: "A Prometheus target has disappeared. An exporter might be crashed.\n\
\ VALUE = {{ $value }}\n LABELS = {{ $labels }}"
summary: Prometheus target missing (instance {{ $labels.instance }})
expr: up == 0
for: 5m
labels:
severity: critical
- alert: PrometheusAllTargetsMissing
annotations:
description: "A Prometheus job does not have living target anymore.\n VALUE\
\ = {{ $value }}\n LABELS = {{ $labels }}"
summary: Prometheus all targets missing (instance {{ $labels.instance }})
expr: count by (job) (up) == 0
for: 0m
labels:
severity: critical
- alert: PrometheusConfigurationReloadFailure
annotations:
description: "Prometheus configuration reload error\n VALUE = {{ $value }}\n\
\ LABELS = {{ $labels }}"
summary: Prometheus configuration reload failure (instance {{ $labels.instance
}})
expr: prometheus_config_last_reload_successful != 1
for: 0m
labels:
severity: warning
- alert: PrometheusTooManyRestarts
annotations:
description: "Prometheus has restarted more than twice in the last 15 minutes.\
\ It might be crashlooping.\n VALUE = {{ $value }}\n LABELS = {{ $labels\
\ }}"
summary: Prometheus too many restarts (instance {{ $labels.instance }})
expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m])
> 2
for: 0m
labels:
severity: warning
- alert: PrometheusAlertmanagerJobMissing
annotations:
description: "A Prometheus AlertManager job has disappeared\n VALUE = {{\
\ $value }}\n LABELS = {{ $labels }}"
summary: Prometheus AlertManager job missing (instance {{ $labels.instance
}})
expr: absent(up{job="prometheus-operator/alertmanager"})
for: 0m
labels:
severity: warning
- alert: PrometheusAlertmanagerConfigurationReloadFailure
annotations:
description: "AlertManager configuration reload error\n VALUE = {{ $value\
\ }}\n LABELS = {{ $labels }}"
summary: Prometheus AlertManager configuration reload failure (instance {{
$labels.instance }})
expr: alertmanager_config_last_reload_successful != 1
for: 0m
labels:
severity: warning
- alert: PrometheusAlertmanagerConfigNotSynced
annotations:
description: "Configurations of AlertManager cluster instances are out of\
\ sync\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
summary: Prometheus AlertManager config not synced (instance {{ $labels.instance
}})
expr: count(count_values("config_hash", alertmanager_config_hash)) > 1
for: 0m
labels:
severity: warning
- alert: PrometheusNotConnectedToAlertmanager
annotations:
description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value\
\ }}\n LABELS = {{ $labels }}"
summary: Prometheus not connected to alertmanager (instance {{ $labels.instance
}})
expr: prometheus_notifications_alertmanagers_discovered < 1
for: 0m
labels:
severity: critical
- alert: PrometheusRuleEvaluationFailures
annotations:
description: "Prometheus encountered {{ $value }} rule evaluation failures,\
\ leading to potentially ignored alerts.\n VALUE = {{ $value }}\n LABELS\
\ = {{ $labels }}"
summary: Prometheus rule evaluation failures (instance {{ $labels.instance
}})
expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0
for: 0m
labels:
severity: critical
- alert: PrometheusTemplateTextExpansionFailures
annotations:
description: "Prometheus encountered {{ $value }} template text expansion\
\ failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
summary: Prometheus template text expansion failures (instance {{ $labels.instance
}})
expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0
for: 0m
labels:
severity: critical
- alert: PrometheusRuleEvaluationSlow
annotations:
description: "Prometheus rule evaluation took more time than the scheduled\
\ interval. It indicates a slower storage backend access or too complex\
\ query.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
summary: Prometheus rule evaluation slow (instance {{ $labels.instance }})
expr: prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds
for: 5m
labels:
severity: warning
- alert: PrometheusNotificationsBacklog
annotations:
description: "The Prometheus notification queue has not been empty for 10\
\ minutes\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
summary: Prometheus notifications backlog (instance {{ $labels.instance }})
expr: min_over_time(prometheus_notifications_queue_length[10m]) > 0
for: 0m
labels:
severity: warning
- alert: PrometheusAlertmanagerNotificationFailing
annotations:
description: "Alertmanager is failing sending notifications\n VALUE = {{\
\ $value }}\n LABELS = {{ $labels }}"
summary: Prometheus AlertManager notification failing (instance {{ $labels.instance
}})
expr: rate(alertmanager_notifications_failed_total[1m]) > 0
for: 0m
labels:
severity: critical
- alert: PrometheusTargetEmpty
annotations:
description: "Prometheus has no target in service discovery\n VALUE = {{\
\ $value }}\n LABELS = {{ $labels }}"
summary: Prometheus target empty (instance {{ $labels.instance }})
expr: prometheus_sd_discovered_targets == 0
for: 0m
labels:
severity: critical
- alert: PrometheusLargeScrape
annotations:
description: "Prometheus has many scrapes that exceed the sample limit\n \
\ VALUE = {{ $value }}\n LABELS = {{ $labels }}"
summary: Prometheus large scrape (instance {{ $labels.instance }})
expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) >
10
for: 5m
labels:
severity: warning
- alert: PrometheusTargetScrapeDuplicate
annotations:
description: "Prometheus has many samples rejected due to duplicate timestamps\
\ but different values\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
summary: Prometheus target scrape duplicate (instance {{ $labels.instance
}})
expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m])
> 0
for: 0m
labels:
severity: warning
- alert: PrometheusTsdbCheckpointCreationFailures
annotations:
description: "Prometheus encountered {{ $value }} checkpoint creation failures\n\
\ VALUE = {{ $value }}\n LABELS = {{ $labels }}"
summary: Prometheus TSDB checkpoint creation failures (instance {{ $labels.instance
}})
expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0
for: 0m
labels:
severity: critical
- alert: PrometheusTsdbCheckpointDeletionFailures
annotations:
description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n\
\ VALUE = {{ $value }}\n LABELS = {{ $labels }}"
summary: Prometheus TSDB checkpoint deletion failures (instance {{ $labels.instance
}})
expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0
for: 0m
labels:
severity: critical
- alert: PrometheusTsdbCompactionsFailed
annotations:
description: "Prometheus encountered {{ $value }} TSDB compactions failures\n\
\ VALUE = {{ $value }}\n LABELS = {{ $labels }}"
summary: Prometheus TSDB compactions failed (instance {{ $labels.instance
}})
expr: increase(prometheus_tsdb_compactions_failed_total[1m]) > 0
for: 0m
labels:
severity: critical
- alert: PrometheusTsdbHeadTruncationsFailed
annotations:
description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n\
\ VALUE = {{ $value }}\n LABELS = {{ $labels }}"
summary: Prometheus TSDB head truncations failed (instance {{ $labels.instance
}})
expr: increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0
for: 0m
labels:
severity: critical
- alert: PrometheusTsdbReloadFailures
annotations:
description: "Prometheus encountered {{ $value }} TSDB reload failures\n \
\ VALUE = {{ $value }}\n LABELS = {{ $labels }}"
summary: Prometheus TSDB reload failures (instance {{ $labels.instance }})
expr: increase(prometheus_tsdb_reloads_failures_total[1m]) > 0
for: 0m
labels:
severity: critical
- alert: PrometheusTsdbWalCorruptions
annotations:
description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n \
\ VALUE = {{ $value }}\n LABELS = {{ $labels }}"
summary: Prometheus TSDB WAL is corrupt, make sure there is enough disk space
and wipe /data/wal
expr: increase(prometheus_tsdb_wal_corruptions_total[2h]) > 0
for: 0m
labels:
severity: critical
- alert: PrometheusTsdbWalTruncationsFailed
annotations:
description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n\
\ VALUE = {{ $value }}\n LABELS = {{ $labels }}"
summary: Prometheus TSDB WAL truncations failed (instance {{ $labels.instance
}})
expr: increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0
for: 0m
labels:
severity: critical
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: prometheus
annotations:
cert-manager.io/cluster-issuer: default
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.middlewares: traefik-sso@kubernetescrd
spec:
rules:
- host: prom.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: prometheus-operated
port:
number: 9090
tls:
- hosts:
- prom.k-space.ee
secretName: prom-tls
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: alertmanager
annotations:
cert-manager.io/cluster-issuer: default
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.middlewares: traefik-sso@kubernetescrd
spec:
rules:
- host: am.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: alertmanager-operated
port:
number: 9093
tls:
- hosts:
- am.k-space.ee
secretName: alertmanager-tls
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: prometheus
spec:
selector:
matchLabels:
app.kubernetes.io/name: prometheus
podMetricsEndpoints:
- port: web
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: alertmanager
spec:
selector:
matchLabels:
app.kubernetes.io/name: alertmanager
podMetricsEndpoints:
- port: web
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: operator
spec:
selector:
matchLabels:
app.kubernetes.io/name: prometheus-operator
podMetricsEndpoints:
- port: http
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: kubelet
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
honorLabels: true
interval: 30s
port: https-metrics
scheme: https
tlsConfig:
insecureSkipVerify: true
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
honorLabels: true
interval: 30s
path: /metrics/cadvisor
port: https-metrics
scheme: https
tlsConfig:
insecureSkipVerify: true
namespaceSelector:
matchNames:
- kube-system
selector:
matchLabels:
app.kubernetes.io/name: kubelet
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: kube-state-metrics
spec:
groups:
- name: kube-state-metrics
rules:
- alert: KubernetesNodeReady
expr: kube_node_status_condition{condition="Ready",status="true"} == 0
for: 10m
labels:
severity: critical
annotations:
summary: Kubernetes Node ready (instance {{ $labels.instance }})
description: "Node {{ $labels.node }} has been unready for a long time\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesMemoryPressure
expr: kube_node_status_condition{condition="MemoryPressure",status="true"} == 1
for: 2m
labels:
severity: critical
annotations:
summary: Kubernetes memory pressure (instance {{ $labels.instance }})
description: "{{ $labels.node }} has MemoryPressure condition\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesDiskPressure
expr: kube_node_status_condition{condition="DiskPressure",status="true"} == 1
for: 2m
labels:
severity: critical
annotations:
summary: Kubernetes disk pressure (instance {{ $labels.instance }})
description: "{{ $labels.node }} has DiskPressure condition\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesOutOfDisk
expr: kube_node_status_condition{condition="OutOfDisk",status="true"} == 1
for: 2m
labels:
severity: critical
annotations:
summary: Kubernetes out of disk (instance {{ $labels.instance }})
description: "{{ $labels.node }} has OutOfDisk condition\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesOutOfCapacity
expr: sum by (node) ((kube_pod_status_phase{phase="Running"} == 1) + on(uid) group_left(node) (0 * kube_pod_info{pod_template_hash=""})) / sum by (node) (kube_node_status_allocatable{resource="pods"}) * 100 > 90
for: 2m
labels:
severity: warning
annotations:
summary: Kubernetes out of capacity (instance {{ $labels.instance }})
description: "{{ $labels.node }} is out of capacity\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesContainerOomKiller
expr: (kube_pod_container_status_restarts_total - kube_pod_container_status_restarts_total offset 10m >= 1) and ignoring (reason) min_over_time(kube_pod_container_status_last_terminated_reason{reason="OOMKilled"}[10m]) == 1
for: 0m
labels:
severity: warning
annotations:
summary: Kubernetes container oom killer (instance {{ $labels.instance }})
description: "Container {{ $labels.container }} in pod {{ $labels.namespace }}/{{ $labels.pod }} has been OOMKilled {{ $value }} times in the last 10 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesJobFailed
expr: kube_job_status_failed > 0
for: 0m
labels:
severity: warning
annotations:
summary: Kubernetes Job failed (instance {{ $labels.instance }})
description: "Job {{$labels.namespace}}/{{$labels.exported_job}} failed to complete\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesCronjobSuspended
expr: kube_cronjob_spec_suspend != 0
for: 0m
labels:
severity: warning
annotations:
summary: Kubernetes CronJob suspended (instance {{ $labels.instance }})
description: "CronJob {{ $labels.namespace }}/{{ $labels.cronjob }} is suspended\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesPersistentvolumeclaimPending
expr: kube_persistentvolumeclaim_status_phase{phase="Pending"} == 1
for: 2m
labels:
severity: warning
annotations:
summary: Kubernetes PersistentVolumeClaim pending (instance {{ $labels.instance }})
description: "PersistentVolumeClaim {{ $labels.namespace }}/{{ $labels.persistentvolumeclaim }} is pending\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesVolumeOutOfDiskSpace
expr: kubelet_volume_stats_available_bytes / kubelet_volume_stats_capacity_bytes * 100 < 10
for: 2m
labels:
severity: warning
annotations:
summary: Kubernetes Volume out of disk space (instance {{ $labels.instance }})
description: "Volume is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesVolumeFullInFourDays
expr: predict_linear(kubelet_volume_stats_available_bytes[6h], 4 * 24 * 3600) < 0
for: 0m
labels:
severity: critical
annotations:
summary: Kubernetes Volume full in four days (instance {{ $labels.instance }})
description: "{{ $labels.namespace }}/{{ $labels.persistentvolumeclaim }} is expected to fill up within four days. Currently {{ $value | humanize }}% is available.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesPersistentvolumeError
expr: kube_persistentvolume_status_phase{phase=~"Failed|Pending", job="kube-state-metrics"} > 0
for: 0m
labels:
severity: critical
annotations:
summary: Kubernetes PersistentVolume error (instance {{ $labels.instance }})
description: "Persistent volume is in bad state\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesStatefulsetDown
expr: (kube_statefulset_status_replicas_ready / kube_statefulset_status_replicas_current) != 1
for: 1m
labels:
severity: critical
annotations:
summary: Kubernetes StatefulSet down (instance {{ $labels.instance }})
description: "A StatefulSet went down\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesHpaScalingAbility
expr: kube_horizontalpodautoscaler_status_condition{status="false", condition="AbleToScale"} == 1
for: 2m
labels:
severity: warning
annotations:
summary: Kubernetes HPA scaling ability (instance {{ $labels.instance }})
description: "Pod is unable to scale\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesHpaMetricAvailability
expr: kube_horizontalpodautoscaler_status_condition{status="false", condition="ScalingActive"} == 1
for: 0m
labels:
severity: warning
annotations:
summary: Kubernetes HPA metric availability (instance {{ $labels.instance }})
description: "HPA is not able to collect metrics\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesHpaScaleCapability
expr: kube_horizontalpodautoscaler_status_desired_replicas >= kube_horizontalpodautoscaler_spec_max_replicas
for: 2m
labels:
severity: info
annotations:
summary: Kubernetes HPA scale capability (instance {{ $labels.instance }})
description: "The maximum number of desired Pods has been hit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesPodNotHealthy
expr: min_over_time(sum by (namespace, pod) (kube_pod_status_phase{phase=~"Pending|Unknown|Failed"})[15m:1m]) > 0
for: 0m
labels:
severity: critical
annotations:
summary: Kubernetes Pod not healthy (instance {{ $labels.instance }})
description: "Pod has been in a non-ready state for longer than 15 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesPodCrashLooping
expr: increase(kube_pod_container_status_restarts_total[1m]) > 3
for: 2m
labels:
severity: warning
annotations:
summary: Kubernetes pod crash looping (instance {{ $labels.instance }})
description: "Pod {{ $labels.pod }} is crash looping\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesReplicassetMismatch
expr: kube_replicaset_spec_replicas != kube_replicaset_status_ready_replicas
for: 10m
labels:
severity: warning
annotations:
summary: Kubernetes ReplicasSet mismatch (instance {{ $labels.instance }})
description: "Deployment Replicas mismatch\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesDeploymentReplicasMismatch
expr: kube_deployment_spec_replicas != kube_deployment_status_replicas_available
for: 10m
labels:
severity: warning
annotations:
summary: Kubernetes Deployment replicas mismatch (instance {{ $labels.instance }})
description: "Deployment Replicas mismatch\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesStatefulsetReplicasMismatch
expr: kube_statefulset_status_replicas_ready != kube_statefulset_status_replicas
for: 10m
labels:
severity: warning
annotations:
summary: Kubernetes StatefulSet replicas mismatch (instance {{ $labels.instance }})
description: "A StatefulSet does not match the expected number of replicas.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesDeploymentGenerationMismatch
expr: kube_deployment_status_observed_generation != kube_deployment_metadata_generation
for: 10m
labels:
severity: critical
annotations:
summary: Kubernetes Deployment generation mismatch (instance {{ $labels.instance }})
description: "A Deployment has failed but has not been rolled back.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesStatefulsetGenerationMismatch
expr: kube_statefulset_status_observed_generation != kube_statefulset_metadata_generation
for: 10m
labels:
severity: critical
annotations:
summary: Kubernetes StatefulSet generation mismatch (instance {{ $labels.instance }})
description: "A StatefulSet has failed but has not been rolled back.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesStatefulsetUpdateNotRolledOut
expr: max without (revision) (kube_statefulset_status_current_revision unless kube_statefulset_status_update_revision) * (kube_statefulset_replicas != kube_statefulset_status_replicas_updated)
for: 10m
labels:
severity: warning
annotations:
summary: Kubernetes StatefulSet update not rolled out (instance {{ $labels.instance }})
description: "StatefulSet update has not been rolled out.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesDaemonsetRolloutStuck
expr: kube_daemonset_status_number_ready / kube_daemonset_status_desired_number_scheduled * 100 < 100 or kube_daemonset_status_desired_number_scheduled - kube_daemonset_status_current_number_scheduled > 0
for: 10m
labels:
severity: warning
annotations:
summary: Kubernetes DaemonSet rollout stuck (instance {{ $labels.instance }})
description: "Some Pods of DaemonSet are not scheduled or not ready\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesDaemonsetMisscheduled
expr: kube_daemonset_status_number_misscheduled > 0
for: 1m
labels:
severity: critical
annotations:
summary: Kubernetes DaemonSet misscheduled (instance {{ $labels.instance }})
description: "Some DaemonSet Pods are running where they are not supposed to run\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesCronjobTooLong
expr: time() - kube_cronjob_next_schedule_time > 3600
for: 0m
labels:
severity: warning
annotations:
summary: Kubernetes CronJob too long (instance {{ $labels.instance }})
description: "CronJob {{ $labels.namespace }}/{{ $labels.cronjob }} is taking more than 1h to complete.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesJobSlowCompletion
expr: kube_job_spec_completions - kube_job_status_succeeded > 0
for: 12h
labels:
severity: critical
annotations:
summary: Kubernetes job slow completion (instance {{ $labels.instance }})
description: "Kubernetes Job {{ $labels.namespace }}/{{ $labels.job_name }} did not complete in time.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesApiServerErrors
expr: sum(rate(apiserver_request_total{job="apiserver",code=~"^(?:5..)$"}[1m])) / sum(rate(apiserver_request_total{job="apiserver"}[1m])) * 100 > 3
for: 2m
labels:
severity: critical
annotations:
summary: Kubernetes API server errors (instance {{ $labels.instance }})
description: "Kubernetes API server is experiencing high error rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesApiClientErrors
expr: (sum(rate(rest_client_requests_total{code=~"(4|5).."}[1m])) by (instance, job) / sum(rate(rest_client_requests_total[1m])) by (instance, job)) * 100 > 1
for: 2m
labels:
severity: critical
annotations:
summary: Kubernetes API client errors (instance {{ $labels.instance }})
description: "Kubernetes API client is experiencing high error rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesClientCertificateExpiresNextWeek
expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 7*24*60*60
for: 0m
labels:
severity: warning
annotations:
summary: Kubernetes client certificate expires next week (instance {{ $labels.instance }})
description: "A client certificate used to authenticate to the apiserver is expiring next week.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesClientCertificateExpiresSoon
expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 24*60*60
for: 0m
labels:
severity: critical
annotations:
summary: Kubernetes client certificate expires soon (instance {{ $labels.instance }})
description: "A client certificate used to authenticate to the apiserver is expiring in less than 24.0 hours.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesApiServerLatency
expr: histogram_quantile(0.99, sum(rate(apiserver_request_latencies_bucket{subresource!="log",verb!~"^(?:CONNECT|WATCHLIST|WATCH|PROXY)$"} [10m])) WITHOUT (instance, resource)) / 1e+06 > 1
for: 2m
labels:
severity: warning
annotations:
summary: Kubernetes API server latency (instance {{ $labels.instance }})
description: "Kubernetes API server has a 99th percentile latency of {{ $value }} seconds for {{ $labels.verb }} {{ $labels.resource }}.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"