| Rule |
State |
Error |
Last Evaluation |
Evaluation Time |
| alert: etcdMembersDown
expr: max
by(job) (sum by(job) (up{job=~".*etcd.*"} == bool 0) or count by(job, endpoint)
(sum by(job, endpoint, To) (rate(etcd_network_peer_sent_failures_total{job=~".*etcd.*"}[3m]))
> 0.01)) > 0
for: 3m
labels:
severity: critical
annotations:
message: 'etcd cluster "{{ $labels.job }}": members are down ({{ $value
}}).'
|
ok
|
|
4.099s ago
|
462.6us |
| alert: etcdInsufficientMembers
expr: sum
by(job) (up{job=~".*etcd.*"} == bool 1) < ((count by(job) (up{job=~".*etcd.*"})
+ 1) / 2)
for: 3m
labels:
severity: critical
annotations:
message: 'etcd cluster "{{ $labels.job }}": insufficient members ({{ $value
}}).'
|
ok
|
|
4.099s ago
|
174.2us |
| alert: etcdNoLeader
expr: etcd_server_has_leader{job=~".*etcd.*"}
== 0
for: 1m
labels:
severity: critical
annotations:
message: 'etcd cluster "{{ $labels.job }}": member {{ $labels.instance }}
has no leader.'
|
ok
|
|
4.099s ago
|
74.85us |
| alert: etcdHighNumberOfLeaderChanges
expr: increase((max
by(job) (etcd_server_leader_changes_seen_total{job=~".*etcd.*"}) or 0 *
absent(etcd_server_leader_changes_seen_total{job=~".*etcd.*"}))[15m:1m])
>= 3
for: 5m
labels:
severity: warning
annotations:
message: 'etcd cluster "{{ $labels.job }}": {{ $value }} leader changes
within the last 15 minutes. Frequent elections may be a sign of insufficient resources,
high network latency, or disruptions by other components and should be investigated.'
|
ok
|
|
4.099s ago
|
225.3us |
| alert: etcdHighNumberOfFailedGRPCRequests
expr: 100
* sum by(job, instance, grpc_service, grpc_method) (rate(grpc_server_handled_total{grpc_code!="OK",job=~".*etcd.*"}[5m]))
/ sum by(job, instance, grpc_service, grpc_method) (rate(grpc_server_handled_total{job=~".*etcd.*"}[5m]))
> 1
for: 10m
labels:
severity: warning
annotations:
message: 'etcd cluster "{{ $labels.job }}": {{ $value }}% of requests for
{{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}.'
|
ok
|
|
4.099s ago
|
189us |
| alert: etcdHighNumberOfFailedGRPCRequests
expr: 100
* sum by(job, instance, grpc_service, grpc_method) (rate(grpc_server_handled_total{grpc_code!="OK",job=~".*etcd.*"}[5m]))
/ sum by(job, instance, grpc_service, grpc_method) (rate(grpc_server_handled_total{job=~".*etcd.*"}[5m]))
> 5
for: 5m
labels:
severity: critical
annotations:
message: 'etcd cluster "{{ $labels.job }}": {{ $value }}% of requests for
{{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}.'
|
ok
|
|
4.099s ago
|
167.3us |
| alert: etcdGRPCRequestsSlow
expr: histogram_quantile(0.99,
sum by(job, instance, grpc_service, grpc_method, le) (rate(grpc_server_handling_seconds_bucket{grpc_type="unary",job=~".*etcd.*"}[5m])))
> 0.15
for: 10m
labels:
severity: critical
annotations:
message: 'etcd cluster "{{ $labels.job }}": gRPC requests to {{ $labels.grpc_method
}} are taking {{ $value }}s on etcd instance {{ $labels.instance }}.'
|
ok
|
|
4.099s ago
|
121.2us |
| alert: etcdMemberCommunicationSlow
expr: histogram_quantile(0.99,
rate(etcd_network_peer_round_trip_time_seconds_bucket{job=~".*etcd.*"}[5m]))
> 0.15
for: 10m
labels:
severity: warning
annotations:
message: 'etcd cluster "{{ $labels.job }}": member communication with {{
$labels.To }} is taking {{ $value }}s on etcd instance {{ $labels.instance }}.'
|
ok
|
|
4.099s ago
|
93.32us |
| alert: etcdHighNumberOfFailedProposals
expr: rate(etcd_server_proposals_failed_total{job=~".*etcd.*"}[15m])
> 5
for: 15m
labels:
severity: warning
annotations:
message: 'etcd cluster "{{ $labels.job }}": {{ $value }} proposal failures
within the last 30 minutes on etcd instance {{ $labels.instance }}.'
|
ok
|
|
4.099s ago
|
84.23us |
| alert: etcdHighFsyncDurations
expr: histogram_quantile(0.99,
rate(etcd_disk_wal_fsync_duration_seconds_bucket{job=~".*etcd.*"}[5m]))
> 0.5
for: 10m
labels:
severity: warning
annotations:
message: 'etcd cluster "{{ $labels.job }}": 99th percentile fync durations
are {{ $value }}s on etcd instance {{ $labels.instance }}.'
|
ok
|
|
4.099s ago
|
97.46us |
| alert: etcdHighCommitDurations
expr: histogram_quantile(0.99,
rate(etcd_disk_backend_commit_duration_seconds_bucket{job=~".*etcd.*"}[5m]))
> 0.25
for: 10m
labels:
severity: warning
annotations:
message: 'etcd cluster "{{ $labels.job }}": 99th percentile commit durations
{{ $value }}s on etcd instance {{ $labels.instance }}.'
|
ok
|
|
4.099s ago
|
89.67us |
| alert: etcdHighNumberOfFailedHTTPRequests
expr: sum
by(method) (rate(etcd_http_failed_total{code!="404",job=~".*etcd.*"}[5m]))
/ sum by(method) (rate(etcd_http_received_total{job=~".*etcd.*"}[5m])) >
0.01
for: 10m
labels:
severity: warning
annotations:
message: '{{ $value }}% of requests for {{ $labels.method }} failed on etcd instance
{{ $labels.instance }}'
|
ok
|
|
4.099s ago
|
202.7us |
| alert: etcdHighNumberOfFailedHTTPRequests
expr: sum
by(method) (rate(etcd_http_failed_total{code!="404",job=~".*etcd.*"}[5m]))
/ sum by(method) (rate(etcd_http_received_total{job=~".*etcd.*"}[5m])) >
0.05
for: 10m
labels:
severity: critical
annotations:
message: '{{ $value }}% of requests for {{ $labels.method }} failed on etcd instance
{{ $labels.instance }}.'
|
ok
|
|
4.099s ago
|
169.9us |
| alert: etcdHTTPRequestsSlow
expr: histogram_quantile(0.99,
rate(etcd_http_successful_duration_seconds_bucket[5m])) > 0.15
for: 10m
labels:
severity: warning
annotations:
message: etcd instance {{ $labels.instance }} HTTP requests to {{ $labels.method
}} are slow.
|
ok
|
|
4.099s ago
|
49.81us |
|
23.021s ago |
3.904ms |
| Rule |
State |
Error |
Last Evaluation |
Evaluation Time |
| record: namespace:container_cpu_usage_seconds_total:sum_rate
expr: sum
by(namespace) (rate(container_cpu_usage_seconds_total{container!="POD",image!="",job="kubelet",metrics_path="/metrics/cadvisor"}[5m]))
|
ok
|
|
21.965s ago
|
4.542ms |
| record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate
expr: sum
by(cluster, namespace, pod, container) (rate(container_cpu_usage_seconds_total{container!="POD",image!="",job="kubelet",metrics_path="/metrics/cadvisor"}[5m]))
* on(cluster, namespace, pod) group_left(node) topk by(cluster, namespace, pod)
(1, max by(cluster, namespace, pod, node) (kube_pod_info{node!=""}))
|
ok
|
|
21.96s ago
|
9.184ms |
| record: node_namespace_pod_container:container_memory_working_set_bytes
expr: container_memory_working_set_bytes{image!="",job="kubelet",metrics_path="/metrics/cadvisor"}
* on(namespace, pod) group_left(node) topk by(namespace, pod) (1, max by(namespace,
pod, node) (kube_pod_info{node!=""}))
|
ok
|
|
21.951s ago
|
15.94ms |
| record: node_namespace_pod_container:container_memory_rss
expr: container_memory_rss{image!="",job="kubelet",metrics_path="/metrics/cadvisor"}
* on(namespace, pod) group_left(node) topk by(namespace, pod) (1, max by(namespace,
pod, node) (kube_pod_info{node!=""}))
|
ok
|
|
21.935s ago
|
16.46ms |
| record: node_namespace_pod_container:container_memory_cache
expr: container_memory_cache{image!="",job="kubelet",metrics_path="/metrics/cadvisor"}
* on(namespace, pod) group_left(node) topk by(namespace, pod) (1, max by(namespace,
pod, node) (kube_pod_info{node!=""}))
|
ok
|
|
21.919s ago
|
15.52ms |
| record: node_namespace_pod_container:container_memory_swap
expr: container_memory_swap{image!="",job="kubelet",metrics_path="/metrics/cadvisor"}
* on(namespace, pod) group_left(node) topk by(namespace, pod) (1, max by(namespace,
pod, node) (kube_pod_info{node!=""}))
|
ok
|
|
21.903s ago
|
15.65ms |
| record: namespace:container_memory_usage_bytes:sum
expr: sum
by(namespace) (container_memory_usage_bytes{container!="POD",image!="",job="kubelet",metrics_path="/metrics/cadvisor"})
|
ok
|
|
21.888s ago
|
3.258ms |
| record: namespace:kube_pod_container_resource_requests_memory_bytes:sum
expr: sum
by(namespace) (sum by(namespace, pod) (max by(namespace, pod, container) (kube_pod_container_resource_requests_memory_bytes{job="kube-state-metrics"})
* on(namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase=~"Pending|Running"}
== 1)))
|
ok
|
|
21.885s ago
|
4.517ms |
| record: namespace:kube_pod_container_resource_requests_cpu_cores:sum
expr: sum
by(namespace) (sum by(namespace, pod) (max by(namespace, pod, container) (kube_pod_container_resource_requests_cpu_cores{job="kube-state-metrics"})
* on(namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase=~"Pending|Running"}
== 1)))
|
ok
|
|
21.88s ago
|
4.094ms |
| record: namespace_workload_pod:kube_pod_owner:relabel
expr: max
by(cluster, namespace, workload, pod) (label_replace(label_replace(kube_pod_owner{job="kube-state-metrics",owner_kind="ReplicaSet"},
"replicaset", "$1", "owner_name", "(.*)") * on(replicaset,
namespace) group_left(owner_name) topk by(replicaset, namespace) (1, max by(replicaset,
namespace, owner_name) (kube_replicaset_owner{job="kube-state-metrics"})),
"workload", "$1", "owner_name", "(.*)"))
labels:
workload_type: deployment
|
ok
|
|
21.876s ago
|
2.362ms |
| record: namespace_workload_pod:kube_pod_owner:relabel
expr: max
by(cluster, namespace, workload, pod) (label_replace(kube_pod_owner{job="kube-state-metrics",owner_kind="DaemonSet"},
"workload", "$1", "owner_name", "(.*)"))
labels:
workload_type: daemonset
|
ok
|
|
21.874s ago
|
1.127ms |
| record: namespace_workload_pod:kube_pod_owner:relabel
expr: max
by(cluster, namespace, workload, pod) (label_replace(kube_pod_owner{job="kube-state-metrics",owner_kind="StatefulSet"},
"workload", "$1", "owner_name", "(.*)"))
labels:
workload_type: statefulset
|
ok
|
|
21.873s ago
|
268.1us |
|
33.466s ago |
2.169s |
| Rule |
State |
Error |
Last Evaluation |
Evaluation Time |
| record: apiserver_request:availability30d
expr: 1
- ((sum(increase(apiserver_request_duration_seconds_count{verb=~"POST|PUT|PATCH|DELETE"}[30d]))
- sum(increase(apiserver_request_duration_seconds_bucket{le="1",verb=~"POST|PUT|PATCH|DELETE"}[30d])))
+ (sum(increase(apiserver_request_duration_seconds_count{verb=~"LIST|GET"}[30d]))
- ((sum(increase(apiserver_request_duration_seconds_bucket{le="0.1",scope=~"resource|",verb=~"LIST|GET"}[30d]))
or vector(0)) + sum(increase(apiserver_request_duration_seconds_bucket{le="0.5",scope="namespace",verb=~"LIST|GET"}[30d]))
+ sum(increase(apiserver_request_duration_seconds_bucket{le="5",scope="cluster",verb=~"LIST|GET"}[30d]))))
+ sum(code:apiserver_request_total:increase30d{code=~"5.."} or vector(0)))
/ sum(code:apiserver_request_total:increase30d)
labels:
verb: all
|
ok
|
|
33.466s ago
|
715.5ms |
| record: apiserver_request:availability30d
expr: 1
- (sum(increase(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[30d]))
- ((sum(increase(apiserver_request_duration_seconds_bucket{job="apiserver",le="0.1",scope=~"resource|",verb=~"LIST|GET"}[30d]))
or vector(0)) + sum(increase(apiserver_request_duration_seconds_bucket{job="apiserver",le="0.5",scope="namespace",verb=~"LIST|GET"}[30d]))
+ sum(increase(apiserver_request_duration_seconds_bucket{job="apiserver",le="5",scope="cluster",verb=~"LIST|GET"}[30d])))
+ sum(code:apiserver_request_total:increase30d{code=~"5..",verb="read"}
or vector(0))) / sum(code:apiserver_request_total:increase30d{verb="read"})
labels:
verb: read
|
ok
|
|
32.751s ago
|
504.1ms |
| record: apiserver_request:availability30d
expr: 1
- ((sum(increase(apiserver_request_duration_seconds_count{verb=~"POST|PUT|PATCH|DELETE"}[30d]))
- sum(increase(apiserver_request_duration_seconds_bucket{le="1",verb=~"POST|PUT|PATCH|DELETE"}[30d])))
+ sum(code:apiserver_request_total:increase30d{code=~"5..",verb="write"}
or vector(0))) / sum(code:apiserver_request_total:increase30d{verb="write"})
labels:
verb: write
|
ok
|
|
32.247s ago
|
204.8ms |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"2..",job="apiserver",verb="LIST"}[30d]))
|
ok
|
|
32.042s ago
|
340.2ms |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"2..",job="apiserver",verb="GET"}[30d]))
|
ok
|
|
31.702s ago
|
149ms |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"2..",job="apiserver",verb="POST"}[30d]))
|
ok
|
|
31.553s ago
|
49.39ms |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"2..",job="apiserver",verb="PUT"}[30d]))
|
ok
|
|
31.504s ago
|
43.44ms |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"2..",job="apiserver",verb="PATCH"}[30d]))
|
ok
|
|
31.46s ago
|
40.93ms |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"2..",job="apiserver",verb="DELETE"}[30d]))
|
ok
|
|
31.42s ago
|
22.68ms |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"3..",job="apiserver",verb="LIST"}[30d]))
|
ok
|
|
31.397s ago
|
585.1us |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"3..",job="apiserver",verb="GET"}[30d]))
|
ok
|
|
31.396s ago
|
413us |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"3..",job="apiserver",verb="POST"}[30d]))
|
ok
|
|
31.396s ago
|
299.2us |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"3..",job="apiserver",verb="PUT"}[30d]))
|
ok
|
|
31.396s ago
|
268.3us |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"3..",job="apiserver",verb="PATCH"}[30d]))
|
ok
|
|
31.396s ago
|
284.3us |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"3..",job="apiserver",verb="DELETE"}[30d]))
|
ok
|
|
31.395s ago
|
280.1us |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"4..",job="apiserver",verb="LIST"}[30d]))
|
ok
|
|
31.395s ago
|
424.8us |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"4..",job="apiserver",verb="GET"}[30d]))
|
ok
|
|
31.395s ago
|
28.09ms |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"4..",job="apiserver",verb="POST"}[30d]))
|
ok
|
|
31.367s ago
|
4.125ms |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"4..",job="apiserver",verb="PUT"}[30d]))
|
ok
|
|
31.363s ago
|
14.89ms |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"4..",job="apiserver",verb="PATCH"}[30d]))
|
ok
|
|
31.348s ago
|
16.29ms |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"4..",job="apiserver",verb="DELETE"}[30d]))
|
ok
|
|
31.332s ago
|
1.864ms |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"5..",job="apiserver",verb="LIST"}[30d]))
|
ok
|
|
31.33s ago
|
1.92ms |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"5..",job="apiserver",verb="GET"}[30d]))
|
ok
|
|
31.328s ago
|
13.53ms |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"5..",job="apiserver",verb="POST"}[30d]))
|
ok
|
|
31.314s ago
|
3.939ms |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"5..",job="apiserver",verb="PUT"}[30d]))
|
ok
|
|
31.311s ago
|
3.967ms |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"5..",job="apiserver",verb="PATCH"}[30d]))
|
ok
|
|
31.307s ago
|
7.022ms |
| record: code_verb:apiserver_request_total:increase30d
expr: sum
by(code, verb) (increase(apiserver_request_total{code=~"5..",job="apiserver",verb="DELETE"}[30d]))
|
ok
|
|
31.3s ago
|
407.7us |
| record: code:apiserver_request_total:increase30d
expr: sum
by(code) (code_verb:apiserver_request_total:increase30d{verb=~"LIST|GET"})
labels:
verb: read
|
ok
|
|
31.299s ago
|
148.8us |
| record: code:apiserver_request_total:increase30d
expr: sum
by(code) (code_verb:apiserver_request_total:increase30d{verb=~"POST|PUT|PATCH|DELETE"})
labels:
verb: write
|
ok
|
|
31.299s ago
|
190.3us |
|
3.859s ago |
924.5us |
| Rule |
State |
Error |
Last Evaluation |
Evaluation Time |
| record: apiserver_request:burnrate1d
expr: ((sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[1d]))
- ((sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="0.1",scope=~"resource|",verb=~"LIST|GET"}[1d]))
or vector(0)) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="0.5",scope="namespace",verb=~"LIST|GET"}[1d]))
+ sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="5",scope="cluster",verb=~"LIST|GET"}[1d]))))
+ sum(rate(apiserver_request_total{code=~"5..",job="apiserver",verb=~"LIST|GET"}[1d])))
/ sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1d]))
labels:
verb: read
|
ok
|
|
15.523s ago
|
269.5ms |
| record: apiserver_request:burnrate1h
expr: ((sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[1h]))
- ((sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="0.1",scope=~"resource|",verb=~"LIST|GET"}[1h]))
or vector(0)) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="0.5",scope="namespace",verb=~"LIST|GET"}[1h]))
+ sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="5",scope="cluster",verb=~"LIST|GET"}[1h]))))
+ sum(rate(apiserver_request_total{code=~"5..",job="apiserver",verb=~"LIST|GET"}[1h])))
/ sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1h]))
labels:
verb: read
|
ok
|
|
15.254s ago
|
17.61ms |
| record: apiserver_request:burnrate2h
expr: ((sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[2h]))
- ((sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="0.1",scope=~"resource|",verb=~"LIST|GET"}[2h]))
or vector(0)) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="0.5",scope="namespace",verb=~"LIST|GET"}[2h]))
+ sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="5",scope="cluster",verb=~"LIST|GET"}[2h]))))
+ sum(rate(apiserver_request_total{code=~"5..",job="apiserver",verb=~"LIST|GET"}[2h])))
/ sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[2h]))
labels:
verb: read
|
ok
|
|
15.236s ago
|
35.99ms |
| record: apiserver_request:burnrate30m
expr: ((sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[30m]))
- ((sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="0.1",scope=~"resource|",verb=~"LIST|GET"}[30m]))
or vector(0)) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="0.5",scope="namespace",verb=~"LIST|GET"}[30m]))
+ sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="5",scope="cluster",verb=~"LIST|GET"}[30m]))))
+ sum(rate(apiserver_request_total{code=~"5..",job="apiserver",verb=~"LIST|GET"}[30m])))
/ sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[30m]))
labels:
verb: read
|
ok
|
|
15.201s ago
|
15.87ms |
| record: apiserver_request:burnrate3d
expr: ((sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[3d]))
- ((sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="0.1",scope=~"resource|",verb=~"LIST|GET"}[3d]))
or vector(0)) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="0.5",scope="namespace",verb=~"LIST|GET"}[3d]))
+ sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="5",scope="cluster",verb=~"LIST|GET"}[3d]))))
+ sum(rate(apiserver_request_total{code=~"5..",job="apiserver",verb=~"LIST|GET"}[3d])))
/ sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[3d]))
labels:
verb: read
|
ok
|
|
15.185s ago
|
710.9ms |
| record: apiserver_request:burnrate5m
expr: ((sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[5m]))
- ((sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="0.1",scope=~"resource|",verb=~"LIST|GET"}[5m]))
or vector(0)) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="0.5",scope="namespace",verb=~"LIST|GET"}[5m]))
+ sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="5",scope="cluster",verb=~"LIST|GET"}[5m]))))
+ sum(rate(apiserver_request_total{code=~"5..",job="apiserver",verb=~"LIST|GET"}[5m])))
/ sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m]))
labels:
verb: read
|
ok
|
|
14.474s ago
|
8.928ms |
| record: apiserver_request:burnrate6h
expr: ((sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[6h]))
- ((sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="0.1",scope=~"resource|",verb=~"LIST|GET"}[6h]))
or vector(0)) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="0.5",scope="namespace",verb=~"LIST|GET"}[6h]))
+ sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="5",scope="cluster",verb=~"LIST|GET"}[6h]))))
+ sum(rate(apiserver_request_total{code=~"5..",job="apiserver",verb=~"LIST|GET"}[6h])))
/ sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[6h]))
labels:
verb: read
|
ok
|
|
14.465s ago
|
85.5ms |
| record: apiserver_request:burnrate1d
expr: ((sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1d]))
- sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="1",verb=~"POST|PUT|PATCH|DELETE"}[1d])))
+ sum(rate(apiserver_request_total{code=~"5..",job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1d])))
/ sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1d]))
labels:
verb: write
|
ok
|
|
14.38s ago
|
101.5ms |
| record: apiserver_request:burnrate1h
expr: ((sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1h]))
- sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="1",verb=~"POST|PUT|PATCH|DELETE"}[1h])))
+ sum(rate(apiserver_request_total{code=~"5..",job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1h])))
/ sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1h]))
labels:
verb: write
|
ok
|
|
14.279s ago
|
6.933ms |
| record: apiserver_request:burnrate2h
expr: ((sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[2h]))
- sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="1",verb=~"POST|PUT|PATCH|DELETE"}[2h])))
+ sum(rate(apiserver_request_total{code=~"5..",job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[2h])))
/ sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[2h]))
labels:
verb: write
|
ok
|
|
14.272s ago
|
13.91ms |
| record: apiserver_request:burnrate30m
expr: ((sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[30m]))
- sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="1",verb=~"POST|PUT|PATCH|DELETE"}[30m])))
+ sum(rate(apiserver_request_total{code=~"5..",job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[30m])))
/ sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[30m]))
labels:
verb: write
|
ok
|
|
14.258s ago
|
6.124ms |
| record: apiserver_request:burnrate3d
expr: ((sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[3d]))
- sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="1",verb=~"POST|PUT|PATCH|DELETE"}[3d])))
+ sum(rate(apiserver_request_total{code=~"5..",job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[3d])))
/ sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[3d]))
labels:
verb: write
|
ok
|
|
14.252s ago
|
288.4ms |
| record: apiserver_request:burnrate5m
expr: ((sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m]))
- sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="1",verb=~"POST|PUT|PATCH|DELETE"}[5m])))
+ sum(rate(apiserver_request_total{code=~"5..",job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])))
/ sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m]))
labels:
verb: write
|
ok
|
|
13.964s ago
|
3.757ms |
| record: apiserver_request:burnrate6h
expr: ((sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[6h]))
- sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",le="1",verb=~"POST|PUT|PATCH|DELETE"}[6h])))
+ sum(rate(apiserver_request_total{code=~"5..",job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[6h])))
/ sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[6h]))
labels:
verb: write
|
ok
|
|
13.96s ago
|
34.15ms |
| record: code_resource:apiserver_request_total:rate5m
expr: sum
by(code, resource) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m]))
labels:
verb: read
|
ok
|
|
13.926s ago
|
4.157ms |
| record: code_resource:apiserver_request_total:rate5m
expr: sum
by(code, resource) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m]))
labels:
verb: write
|
ok
|
|
13.922s ago
|
1.953ms |
| record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile
expr: histogram_quantile(0.99,
sum by(le, resource) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET"}[5m])))
> 0
labels:
quantile: "0.99"
verb: read
|
ok
|
|
13.92s ago
|
61.9ms |
| record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile
expr: histogram_quantile(0.99,
sum by(le, resource) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])))
> 0
labels:
quantile: "0.99"
verb: write
|
ok
|
|
13.858s ago
|
24.89ms |
| record: cluster:apiserver_request_duration_seconds:mean5m
expr: sum
without(instance, pod) (rate(apiserver_request_duration_seconds_sum{subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m]))
/ sum without(instance, pod) (rate(apiserver_request_duration_seconds_count{subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m]))
|
ok
|
|
13.834s ago
|
4.544ms |
| record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile
expr: histogram_quantile(0.99,
sum without(instance, pod) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])))
labels:
quantile: "0.99"
|
ok
|
|
13.829s ago
|
53.61ms |
| record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile
expr: histogram_quantile(0.9,
sum without(instance, pod) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])))
labels:
quantile: "0.9"
|
ok
|
|
13.776s ago
|
53.39ms |
| record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile
expr: histogram_quantile(0.5,
sum without(instance, pod) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])))
labels:
quantile: "0.5"
|
ok
|
|
13.722s ago
|
54.44ms |
|
12.251s ago |
2.511ms |
| Rule |
State |
Error |
Last Evaluation |
Evaluation Time |
| alert: KubePodCrashLooping
expr: rate(kube_pod_container_status_restarts_total{job="kube-state-metrics",namespace=~".*"}[5m])
* 60 * 5 > 0
for: 15m
labels:
severity: warning
annotations:
description: Pod {{ $labels.namespace }}/{{ $labels.pod }} ({{ $labels.container
}}) is restarting {{ printf "%.2f" $value }} times / 5 minutes.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepodcrashlooping
summary: Pod is crash looping.
|
ok
|
|
18.24s ago
|
4.442ms |
| alert: KubePodNotReady
expr: sum
by(namespace, pod) (max by(namespace, pod) (kube_pod_status_phase{job="kube-state-metrics",namespace=~".*",phase=~"Pending|Unknown"})
* on(namespace, pod) group_left(owner_kind) topk by(namespace, pod) (1, max by(namespace,
pod, owner_kind) (kube_pod_owner{owner_kind!="Job"}))) > 0
for: 15m
labels:
severity: warning
annotations:
description: Pod {{ $labels.namespace }}/{{ $labels.pod }} has been in a non-ready
state for longer than 15 minutes.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepodnotready
summary: Pod has been in a non-ready state for more than 15 minutes.
|
ok
|
|
18.236s ago
|
5.328ms |
| alert: KubeDeploymentGenerationMismatch
expr: kube_deployment_status_observed_generation{job="kube-state-metrics",namespace=~".*"}
!= kube_deployment_metadata_generation{job="kube-state-metrics",namespace=~".*"}
for: 15m
labels:
severity: warning
annotations:
description: Deployment generation for {{ $labels.namespace }}/{{ $labels.deployment
}} does not match, this indicates that the Deployment has failed but has not been
rolled back.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedeploymentgenerationmismatch
summary: Deployment generation mismatch due to possible roll-back
|
ok
|
|
18.231s ago
|
528.4us |
| alert: KubeDeploymentReplicasMismatch
expr: (kube_deployment_spec_replicas{job="kube-state-metrics",namespace=~".*"}
!= kube_deployment_status_replicas_available{job="kube-state-metrics",namespace=~".*"})
and (changes(kube_deployment_status_replicas_updated{job="kube-state-metrics",namespace=~".*"}[5m])
== 0)
for: 15m
labels:
severity: warning
annotations:
description: Deployment {{ $labels.namespace }}/{{ $labels.deployment }} has not
matched the expected number of replicas for longer than 15 minutes.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedeploymentreplicasmismatch
summary: Deployment has not matched the expected number of replicas.
|
ok
|
|
18.23s ago
|
1.354ms |
| alert: KubeStatefulSetReplicasMismatch
expr: (kube_statefulset_status_replicas_ready{job="kube-state-metrics",namespace=~".*"}
!= kube_statefulset_status_replicas{job="kube-state-metrics",namespace=~".*"})
and (changes(kube_statefulset_status_replicas_updated{job="kube-state-metrics",namespace=~".*"}[5m])
== 0)
for: 15m
labels:
severity: warning
annotations:
description: StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} has not
matched the expected number of replicas for longer than 15 minutes.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatefulsetreplicasmismatch
summary: Deployment has not matched the expected number of replicas.
|
ok
|
|
18.229s ago
|
650.5us |
| alert: KubeStatefulSetGenerationMismatch
expr: kube_statefulset_status_observed_generation{job="kube-state-metrics",namespace=~".*"}
!= kube_statefulset_metadata_generation{job="kube-state-metrics",namespace=~".*"}
for: 15m
labels:
severity: warning
annotations:
description: StatefulSet generation for {{ $labels.namespace }}/{{ $labels.statefulset
}} does not match, this indicates that the StatefulSet has failed but has not
been rolled back.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatefulsetgenerationmismatch
summary: StatefulSet generation mismatch due to possible roll-back
|
ok
|
|
18.228s ago
|
248.7us |
| alert: KubeStatefulSetUpdateNotRolledOut
expr: (max
without(revision) (kube_statefulset_status_current_revision{job="kube-state-metrics",namespace=~".*"}
unless kube_statefulset_status_update_revision{job="kube-state-metrics",namespace=~".*"})
* (kube_statefulset_replicas{job="kube-state-metrics",namespace=~".*"}
!= kube_statefulset_status_replicas_updated{job="kube-state-metrics",namespace=~".*"}))
and (changes(kube_statefulset_status_replicas_updated{job="kube-state-metrics",namespace=~".*"}[5m])
== 0)
for: 15m
labels:
severity: warning
annotations:
description: StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} update
has not been rolled out.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatefulsetupdatenotrolledout
summary: StatefulSet update has not been rolled out.
|
ok
|
|
18.228s ago
|
573.4us |
| alert: KubeDaemonSetRolloutStuck
expr: ((kube_daemonset_status_current_number_scheduled{job="kube-state-metrics",namespace=~".*"}
!= kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics",namespace=~".*"})
or (kube_daemonset_status_number_misscheduled{job="kube-state-metrics",namespace=~".*"}
!= 0) or (kube_daemonset_updated_number_scheduled{job="kube-state-metrics",namespace=~".*"}
!= kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics",namespace=~".*"})
or (kube_daemonset_status_number_available{job="kube-state-metrics",namespace=~".*"}
!= kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics",namespace=~".*"}))
and (changes(kube_daemonset_updated_number_scheduled{job="kube-state-metrics",namespace=~".*"}[5m])
== 0)
for: 15m
labels:
severity: warning
annotations:
description: DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} has not finished
or progressed for at least 15 minutes.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedaemonsetrolloutstuck
summary: DaemonSet rollout is stuck.
|
ok
|
|
18.228s ago
|
618.4us |
| alert: KubeContainerWaiting
expr: sum
by(namespace, pod, container) (kube_pod_container_status_waiting_reason{job="kube-state-metrics",namespace=~".*"})
> 0
for: 1h
labels:
severity: warning
annotations:
description: Pod {{ $labels.namespace }}/{{ $labels.pod }} container {{ $labels.container}}
has been in waiting state for longer than 1 hour.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecontainerwaiting
summary: Pod container waiting longer than 1 hour
|
ok
|
|
18.228s ago
|
25.38ms |
| alert: KubeDaemonSetNotScheduled
expr: kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics",namespace=~".*"}
- kube_daemonset_status_current_number_scheduled{job="kube-state-metrics",namespace=~".*"}
> 0
for: 10m
labels:
severity: warning
annotations:
description: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset
}} are not scheduled.'
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedaemonsetnotscheduled
summary: DaemonSet pods are not scheduled.
|
ok
|
|
18.203s ago
|
256.5us |
| alert: KubeDaemonSetMisScheduled
expr: kube_daemonset_status_number_misscheduled{job="kube-state-metrics",namespace=~".*"}
> 0
for: 15m
labels:
severity: warning
annotations:
description: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset
}} are running where they are not supposed to run.'
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedaemonsetmisscheduled
summary: DaemonSet pods are misscheduled.
|
ok
|
|
18.202s ago
|
81.25us |
| alert: KubeJobCompletion
expr: kube_job_spec_completions{job="kube-state-metrics",namespace=~".*"}
- kube_job_status_succeeded{job="kube-state-metrics",namespace=~".*"}
> 0
for: 12h
labels:
severity: warning
annotations:
description: Job {{ $labels.namespace }}/{{ $labels.job_name }} is taking more than
12 hours to complete.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobcompletion
summary: Job did not complete in time
|
ok
|
|
18.202s ago
|
95.89ms |
| alert: KubeJobFailed
expr: kube_job_failed{job="kube-state-metrics",namespace=~".*"}
> 0
for: 15m
labels:
severity: warning
annotations:
description: Job {{ $labels.namespace }}/{{ $labels.job_name }} failed to complete.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobfailed
summary: Job failed to complete.
|
ok
|
|
18.107s ago
|
9.723ms |
| alert: KubeHpaReplicasMismatch
expr: (kube_hpa_status_desired_replicas{job="kube-state-metrics",namespace=~".*"}
!= kube_hpa_status_current_replicas{job="kube-state-metrics",namespace=~".*"})
and changes(kube_hpa_status_current_replicas[15m]) == 0
for: 15m
labels:
severity: warning
annotations:
description: HPA {{ $labels.namespace }}/{{ $labels.hpa }} has not matched the desired
number of replicas for longer than 15 minutes.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubehpareplicasmismatch
summary: HPA has not matched descired number of replicas.
|
ok
|
|
18.097s ago
|
284.4us |
| alert: KubeHpaMaxedOut
expr: kube_hpa_status_current_replicas{job="kube-state-metrics",namespace=~".*"}
== kube_hpa_spec_max_replicas{job="kube-state-metrics",namespace=~".*"}
for: 15m
labels:
severity: warning
annotations:
description: HPA {{ $labels.namespace }}/{{ $labels.hpa }} has been running at max
replicas for longer than 15 minutes.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubehpamaxedout
summary: HPA is running at max replicas
|
ok
|
|
18.097s ago
|
125.8us |
|
15.995s ago |
4.334ms |
| Rule |
State |
Error |
Last Evaluation |
Evaluation Time |
| alert: KubeCPUOvercommit
expr: sum(namespace:kube_pod_container_resource_requests_cpu_cores:sum)
/ sum(kube_node_status_allocatable_cpu_cores) > (count(kube_node_status_allocatable_cpu_cores)
- 1) / count(kube_node_status_allocatable_cpu_cores)
for: 5m
labels:
severity: warning
annotations:
description: Cluster has overcommitted CPU resource requests for Pods and cannot
tolerate node failure.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecpuovercommit
summary: Cluster has overcommitted CPU resource requests.
|
ok
|
|
15.995s ago
|
522.2us |
| alert: KubeMemoryOvercommit
expr: sum(namespace:kube_pod_container_resource_requests_memory_bytes:sum)
/ sum(kube_node_status_allocatable_memory_bytes) > (count(kube_node_status_allocatable_memory_bytes)
- 1) / count(kube_node_status_allocatable_memory_bytes)
for: 5m
labels:
severity: warning
annotations:
description: Cluster has overcommitted memory resource requests for Pods and cannot
tolerate node failure.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubememoryovercommit
summary: Cluster has overcommitted memory resource requests.
|
ok
|
|
15.994s ago
|
336.6us |
| alert: KubeCPUQuotaOvercommit
expr: sum(kube_resourcequota{job="kube-state-metrics",resource="cpu",type="hard"})
/ sum(kube_node_status_allocatable_cpu_cores) > 1.5
for: 5m
labels:
severity: warning
annotations:
description: Cluster has overcommitted CPU resource requests for Namespaces.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecpuquotaovercommit
summary: Cluster has overcommitted CPU resource requests.
|
ok
|
|
15.994s ago
|
200.1us |
| alert: KubeMemoryQuotaOvercommit
expr: sum(kube_resourcequota{job="kube-state-metrics",resource="memory",type="hard"})
/ sum(kube_node_status_allocatable_memory_bytes{job="node-exporter"}) >
1.5
for: 5m
labels:
severity: warning
annotations:
description: Cluster has overcommitted memory resource requests for Namespaces.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubememoryquotaovercommit
summary: Cluster has overcommitted memory resource requests.
|
ok
|
|
15.994s ago
|
88.09us |
| alert: KubeQuotaAlmostFull
expr: kube_resourcequota{job="kube-state-metrics",type="used"}
/ ignoring(instance, job, type) (kube_resourcequota{job="kube-state-metrics",type="hard"}
> 0) > 0.9 < 1
for: 15m
labels:
severity: info
annotations:
description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage
}} of its {{ $labels.resource }} quota.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubequotaalmostfull
summary: Namespace quota is going to be full.
|
ok
|
|
15.994s ago
|
96.73us |
| alert: KubeQuotaFullyUsed
expr: kube_resourcequota{job="kube-state-metrics",type="used"}
/ ignoring(instance, job, type) (kube_resourcequota{job="kube-state-metrics",type="hard"}
> 0) == 1
for: 15m
labels:
severity: info
annotations:
description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage
}} of its {{ $labels.resource }} quota.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubequotafullyused
summary: Namespace quota is fully used.
|
ok
|
|
15.994s ago
|
75.38us |
| alert: KubeQuotaExceeded
expr: kube_resourcequota{job="kube-state-metrics",type="used"}
/ ignoring(instance, job, type) (kube_resourcequota{job="kube-state-metrics",type="hard"}
> 0) > 1
for: 15m
labels:
severity: warning
annotations:
description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage
}} of its {{ $labels.resource }} quota.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubequotaexceeded
summary: Namespace quota has exceeded the limits.
|
ok
|
|
15.994s ago
|
84.01us |
| alert: CPUThrottlingHigh
expr: sum
by(container, pod, namespace) (increase(container_cpu_cfs_throttled_periods_total{container!=""}[5m]))
/ sum by(container, pod, namespace) (increase(container_cpu_cfs_periods_total[5m]))
> (25 / 100)
for: 15m
labels:
severity: info
annotations:
description: '{{ $value | humanizePercentage }} throttling of CPU in namespace {{
$labels.namespace }} for container {{ $labels.container }} in pod {{ $labels.pod
}}.'
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh
summary: Processes experience elevated CPU throttling.
|
ok
|
|
15.994s ago
|
2.913ms |
|
8.487s ago |
1.421ms |
| Rule |
State |
Error |
Last Evaluation |
Evaluation Time |
| alert: KubeClientCertificateExpiration
expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"}
> 0 and on(job) histogram_quantile(0.01, sum by(job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m])))
< 604800
labels:
severity: warning
annotations:
description: A client certificate used to authenticate to the apiserver is expiring
in less than 7.0 days.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeclientcertificateexpiration
summary: Client certificate is about to expire.
|
ok
|
|
22.195s ago
|
1.58ms |
| alert: KubeClientCertificateExpiration
expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"}
> 0 and on(job) histogram_quantile(0.01, sum by(job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m])))
< 86400
labels:
severity: critical
annotations:
description: A client certificate used to authenticate to the apiserver is expiring
in less than 24.0 hours.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeclientcertificateexpiration
summary: Client certificate is about to expire.
|
ok
|
|
22.193s ago
|
1.203ms |
| alert: AggregatedAPIErrors
expr: sum
by(name, namespace) (increase(aggregator_unavailable_apiservice_count[5m])) >
2
labels:
severity: warning
annotations:
description: An aggregated API {{ $labels.name }}/{{ $labels.namespace }} has reported
errors. The number of errors have increased for it in the past five minutes. High
values indicate that the availability of the service changes too often.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-aggregatedapierrors
summary: An aggregated API has reported errors.
|
ok
|
|
22.192s ago
|
114.8us |
| alert: AggregatedAPIDown
expr: (1
- max by(name, namespace) (avg_over_time(aggregator_unavailable_apiservice[10m])))
* 100 < 85
for: 5m
labels:
severity: warning
annotations:
description: An aggregated API {{ $labels.name }}/{{ $labels.namespace }} has been
only {{ $value | humanize }}% available over the last 10m.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-aggregatedapidown
summary: An aggregated API is down.
|
ok
|
|
22.192s ago
|
106.6us |
| alert: KubeAPIDown
expr: absent(up{job="apiserver"}
== 1)
for: 15m
labels:
severity: critical
annotations:
description: KubeAPI has disappeared from Prometheus target discovery.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapidown
summary: Target disappeared from Prometheus target discovery.
|
ok
|
|
22.192s ago
|
94.16us |
|
15.498s ago |
241.2us |
| Rule |
State |
Error |
Last Evaluation |
Evaluation Time |
| alert: KubeNodeNotReady
expr: kube_node_status_condition{condition="Ready",job="kube-state-metrics",status="true"}
== 0
for: 15m
labels:
severity: warning
annotations:
description: '{{ $labels.node }} has been unready for more than 15 minutes.'
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubenodenotready
summary: Node is not ready.
|
ok
|
|
22.305s ago
|
304.2us |
| alert: KubeNodeUnreachable
expr: (kube_node_spec_taint{effect="NoSchedule",job="kube-state-metrics",key="node.kubernetes.io/unreachable"}
unless ignoring(key, value) kube_node_spec_taint{job="kube-state-metrics",key=~"ToBeDeletedByClusterAutoscaler|cloud.google.com/impending-node-termination|aws-node-termination-handler/spot-itn"})
== 1
for: 15m
labels:
severity: warning
annotations:
description: '{{ $labels.node }} is unreachable and some workloads may be rescheduled.'
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubenodeunreachable
summary: Node is unreachable.
|
ok
|
|
22.304s ago
|
208.4us |
| alert: KubeletTooManyPods
expr: count
by(node) ((kube_pod_status_phase{job="kube-state-metrics",phase="Running"}
== 1) * on(instance, pod, namespace, cluster) group_left(node) topk by(instance,
pod, namespace, cluster) (1, kube_pod_info{job="kube-state-metrics"})) /
max by(node) (kube_node_status_capacity_pods{job="kube-state-metrics"} !=
1) > 0.95
for: 15m
labels:
severity: warning
annotations:
description: Kubelet '{{ $labels.node }}' is running at {{ $value | humanizePercentage
}} of its Pod capacity.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubelettoomanypods
summary: Kubelet is running at capacity.
|
ok
|
|
22.304s ago
|
4.898ms |
| alert: KubeNodeReadinessFlapping
expr: sum
by(node) (changes(kube_node_status_condition{condition="Ready",status="true"}[15m]))
> 2
for: 15m
labels:
severity: warning
annotations:
description: The readiness status of node {{ $labels.node }} has changed {{ $value
}} times in the last 15 minutes.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubenodereadinessflapping
summary: Node readiness status is flapping.
|
ok
|
|
22.3s ago
|
205.9us |
| alert: KubeletPlegDurationHigh
expr: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile{quantile="0.99"}
>= 10
for: 5m
labels:
severity: warning
annotations:
description: The Kubelet Pod Lifecycle Event Generator has a 99th percentile duration
of {{ $value }} seconds on node {{ $labels.node }}.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletplegdurationhigh
summary: Kubelet Pod Lifecycle Event Generator is taking too long to relist.
|
ok
|
|
22.3s ago
|
171.2us |
| alert: KubeletPodStartUpLatencyHigh
expr: histogram_quantile(0.99,
sum by(instance, le) (rate(kubelet_pod_worker_duration_seconds_bucket{job="kubelet",metrics_path="/metrics"}[5m])))
* on(instance) group_left(node) kubelet_node_name{job="kubelet",metrics_path="/metrics"}
> 60
for: 15m
labels:
severity: warning
annotations:
description: Kubelet Pod startup 99th percentile latency is {{ $value }} seconds
on node {{ $labels.node }}.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletpodstartuplatencyhigh
summary: Kubelet Pod startup latency is too high.
|
ok
|
|
22.3s ago
|
2.467ms |
| alert: KubeletClientCertificateExpiration
expr: kubelet_certificate_manager_client_ttl_seconds
< 604800
labels:
severity: warning
annotations:
description: Client certificate for Kubelet on node {{ $labels.node }} expires in
{{ $value | humanizeDuration }}.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletclientcertificateexpiration
summary: Kubelet client certificate is about to expire.
|
ok
|
|
22.297s ago
|
83.26us |
| alert: KubeletClientCertificateExpiration
expr: kubelet_certificate_manager_client_ttl_seconds
< 86400
labels:
severity: critical
annotations:
description: Client certificate for Kubelet on node {{ $labels.node }} expires in
{{ $value | humanizeDuration }}.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletclientcertificateexpiration
summary: Kubelet client certificate is about to expire.
|
ok
|
|
22.297s ago
|
56.48us |
| alert: KubeletServerCertificateExpiration
expr: kubelet_certificate_manager_server_ttl_seconds
< 604800
labels:
severity: warning
annotations:
description: Server certificate for Kubelet on node {{ $labels.node }} expires in
{{ $value | humanizeDuration }}.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletservercertificateexpiration
summary: Kubelet server certificate is about to expire.
|
ok
|
|
22.297s ago
|
40.85us |
| alert: KubeletServerCertificateExpiration
expr: kubelet_certificate_manager_server_ttl_seconds
< 86400
labels:
severity: critical
annotations:
description: Server certificate for Kubelet on node {{ $labels.node }} expires in
{{ $value | humanizeDuration }}.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletservercertificateexpiration
summary: Kubelet server certificate is about to expire.
|
ok
|
|
22.297s ago
|
31.3us |
| alert: KubeletClientCertificateRenewalErrors
expr: increase(kubelet_certificate_manager_client_expiration_renew_errors[5m])
> 0
for: 15m
labels:
severity: warning
annotations:
description: Kubelet on node {{ $labels.node }} has failed to renew its client certificate
({{ $value | humanize }} errors in the last 5 minutes).
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletclientcertificaterenewalerrors
summary: Kubelet has failed to renew its client certificate.
|
ok
|
|
22.297s ago
|
54.07us |
| alert: KubeletServerCertificateRenewalErrors
expr: increase(kubelet_server_expiration_renew_errors[5m])
> 0
for: 15m
labels:
severity: warning
annotations:
description: Kubelet on node {{ $labels.node }} has failed to renew its server certificate
({{ $value | humanize }} errors in the last 5 minutes).
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletservercertificaterenewalerrors
summary: Kubelet has failed to renew its server certificate.
|
ok
|
|
22.297s ago
|
58.62us |
| alert: KubeletDown
expr: absent(up{job="kubelet",metrics_path="/metrics"}
== 1)
for: 15m
labels:
severity: critical
annotations:
description: Kubelet has disappeared from Prometheus target discovery.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletdown
summary: Target disappeared from Prometheus target discovery.
|
ok
|
|
22.297s ago
|
160.6us |
|
708ms ago |
314us |
| Rule |
State |
Error |
Last Evaluation |
Evaluation Time |
| alert: NodeFilesystemSpaceFillingUp
expr: (node_filesystem_avail_bytes{fstype!="",job="node-exporter"}
/ node_filesystem_size_bytes{fstype!="",job="node-exporter"} * 100
< 40 and predict_linear(node_filesystem_avail_bytes{fstype!="",job="node-exporter"}[6h],
24 * 60 * 60) < 0 and node_filesystem_readonly{fstype!="",job="node-exporter"}
== 0)
for: 1h
labels:
severity: warning
annotations:
description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only
{{ printf "%.2f" $value }}% available space left and is filling up.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodefilesystemspacefillingup
summary: Filesystem is predicted to run out of space within the next 24 hours.
|
ok
|
|
9.601s ago
|
2.878ms |
| alert: NodeFilesystemSpaceFillingUp
expr: (node_filesystem_avail_bytes{fstype!="",job="node-exporter"}
/ node_filesystem_size_bytes{fstype!="",job="node-exporter"} * 100
< 15 and predict_linear(node_filesystem_avail_bytes{fstype!="",job="node-exporter"}[6h],
4 * 60 * 60) < 0 and node_filesystem_readonly{fstype!="",job="node-exporter"}
== 0)
for: 1h
labels:
severity: critical
annotations:
description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only
{{ printf "%.2f" $value }}% available space left and is filling up fast.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodefilesystemspacefillingup
summary: Filesystem is predicted to run out of space within the next 4 hours.
|
ok
|
|
9.598s ago
|
2.723ms |
| alert: NodeFilesystemAlmostOutOfSpace
expr: (node_filesystem_avail_bytes{fstype!="",job="node-exporter"}
/ node_filesystem_size_bytes{fstype!="",job="node-exporter"} * 100
< 5 and node_filesystem_readonly{fstype!="",job="node-exporter"}
== 0)
for: 1h
labels:
severity: warning
annotations:
description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only
{{ printf "%.2f" $value }}% available space left.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodefilesystemalmostoutofspace
summary: Filesystem has less than 5% space left.
|
ok
|
|
9.595s ago
|
453us |
| alert: NodeFilesystemAlmostOutOfSpace
expr: (node_filesystem_avail_bytes{fstype!="",job="node-exporter"}
/ node_filesystem_size_bytes{fstype!="",job="node-exporter"} * 100
< 3 and node_filesystem_readonly{fstype!="",job="node-exporter"}
== 0)
for: 1h
labels:
severity: critical
annotations:
description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only
{{ printf "%.2f" $value }}% available space left.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodefilesystemalmostoutofspace
summary: Filesystem has less than 3% space left.
|
ok
|
|
9.595s ago
|
356.2us |
| alert: NodeFilesystemFilesFillingUp
expr: (node_filesystem_files_free{fstype!="",job="node-exporter"}
/ node_filesystem_files{fstype!="",job="node-exporter"} * 100 <
40 and predict_linear(node_filesystem_files_free{fstype!="",job="node-exporter"}[6h],
24 * 60 * 60) < 0 and node_filesystem_readonly{fstype!="",job="node-exporter"}
== 0)
for: 1h
labels:
severity: warning
annotations:
description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only
{{ printf "%.2f" $value }}% available inodes left and is filling up.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodefilesystemfilesfillingup
summary: Filesystem is predicted to run out of inodes within the next 24 hours.
|
ok
|
|
9.595s ago
|
2.101ms |
| alert: NodeFilesystemFilesFillingUp
expr: (node_filesystem_files_free{fstype!="",job="node-exporter"}
/ node_filesystem_files{fstype!="",job="node-exporter"} * 100 <
20 and predict_linear(node_filesystem_files_free{fstype!="",job="node-exporter"}[6h],
4 * 60 * 60) < 0 and node_filesystem_readonly{fstype!="",job="node-exporter"}
== 0)
for: 1h
labels:
severity: critical
annotations:
description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only
{{ printf "%.2f" $value }}% available inodes left and is filling up fast.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodefilesystemfilesfillingup
summary: Filesystem is predicted to run out of inodes within the next 4 hours.
|
ok
|
|
9.593s ago
|
2.176ms |
| alert: NodeFilesystemAlmostOutOfFiles
expr: (node_filesystem_files_free{fstype!="",job="node-exporter"}
/ node_filesystem_files{fstype!="",job="node-exporter"} * 100 <
5 and node_filesystem_readonly{fstype!="",job="node-exporter"} ==
0)
for: 1h
labels:
severity: warning
annotations:
description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only
{{ printf "%.2f" $value }}% available inodes left.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodefilesystemalmostoutoffiles
summary: Filesystem has less than 5% inodes left.
|
ok
|
|
9.591s ago
|
452.1us |
| alert: NodeFilesystemAlmostOutOfFiles
expr: (node_filesystem_files_free{fstype!="",job="node-exporter"}
/ node_filesystem_files{fstype!="",job="node-exporter"} * 100 <
3 and node_filesystem_readonly{fstype!="",job="node-exporter"} ==
0)
for: 1h
labels:
severity: critical
annotations:
description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only
{{ printf "%.2f" $value }}% available inodes left.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodefilesystemalmostoutoffiles
summary: Filesystem has less than 3% inodes left.
|
ok
|
|
9.59s ago
|
355.8us |
| alert: NodeNetworkReceiveErrs
expr: increase(node_network_receive_errs_total[2m])
> 10
for: 1h
labels:
severity: warning
annotations:
description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered
{{ printf "%.0f" $value }} receive errors in the last two minutes.'
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodenetworkreceiveerrs
summary: Network interface is reporting many receive errors.
|
ok
|
|
9.59s ago
|
1.306ms |
| alert: NodeNetworkTransmitErrs
expr: increase(node_network_transmit_errs_total[2m])
> 10
for: 1h
labels:
severity: warning
annotations:
description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered
{{ printf "%.0f" $value }} transmit errors in the last two minutes.'
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodenetworktransmiterrs
summary: Network interface is reporting many transmit errors.
|
ok
|
|
9.589s ago
|
1.239ms |
| alert: NodeHighNumberConntrackEntriesUsed
expr: (node_nf_conntrack_entries
/ node_nf_conntrack_entries_limit) > 0.75
labels:
severity: warning
annotations:
description: '{{ $value | humanizePercentage }} of conntrack entries are used.'
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodehighnumberconntrackentriesused
summary: Number of conntrack are getting close to the limit.
|
ok
|
|
9.588s ago
|
251.4us |
| alert: NodeTextFileCollectorScrapeError
expr: node_textfile_scrape_error{job="node-exporter"}
== 1
labels:
severity: warning
annotations:
description: Node Exporter text file collector failed to scrape.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodetextfilecollectorscrapeerror
summary: Node Exporter text file collector failed to scrape.
|
ok
|
|
9.588s ago
|
117.5us |
| alert: NodeClockSkewDetected
expr: (node_timex_offset_seconds
> 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds
< -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
for: 10m
labels:
severity: warning
annotations:
message: Clock on {{ $labels.instance }} is out of sync by more than 300s. Ensure
NTP is configured correctly on this host.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodeclockskewdetected
summary: Clock skew detected.
|
ok
|
|
9.588s ago
|
872.1us |
| alert: NodeClockNotSynchronising
expr: min_over_time(node_timex_sync_status[5m])
== 0
for: 10m
labels:
severity: warning
annotations:
message: Clock on {{ $labels.instance }} is not synchronising. Ensure NTP is configured
on this host.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodeclocknotsynchronising
summary: Clock not synchronising.
|
ok
|
|
9.587s ago
|
145.4us |
| alert: NodeRAIDDegraded
expr: node_md_disks_required
- ignoring(state) (node_md_disks{state="active"}) > 0
for: 15m
labels:
severity: critical
annotations:
description: RAID array '{{ $labels.device }}' on {{ $labels.instance }}
is in degraded state due to one or more disks failures. Number of spare drives
is insufficient to fix issue automatically.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-noderaiddegraded
summary: RAID Array is degraded
|
ok
|
|
9.587s ago
|
55us |
| alert: NodeRAIDDiskFailure
expr: node_md_disks{state="fail"}
> 0
labels:
severity: warning
annotations:
description: At least one device in RAID array on {{ $labels.instance }} failed.
Array '{{ $labels.device }}' needs attention and possibly a disk swap.
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-noderaiddiskfailure
summary: Failed device in RAID array
|
ok
|
|
9.587s ago
|
33.54us |
|
10.375s ago |
1.813ms |
| Rule |
State |
Error |
Last Evaluation |
Evaluation Time |
| alert: PrometheusBadConfig
expr: max_over_time(prometheus_config_last_reload_successful{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m])
== 0
for: 10m
labels:
severity: critical
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to reload
its configuration.
summary: Failed Prometheus configuration reload.
|
ok
|
|
20.203s ago
|
353.3us |
| alert: PrometheusNotificationQueueRunningFull
expr: (predict_linear(prometheus_notifications_queue_length{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m],
60 * 30) > min_over_time(prometheus_notifications_queue_capacity{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m]))
for: 15m
labels:
severity: warning
annotations:
description: Alert notification queue of Prometheus {{$labels.namespace}}/{{$labels.pod}}
is running full.
summary: Prometheus alert notification queue predicted to run full in less than
30m.
|
ok
|
|
20.202s ago
|
294.1us |
| alert: PrometheusErrorSendingAlertsToSomeAlertmanagers
expr: (rate(prometheus_notifications_errors_total{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m])
/ rate(prometheus_notifications_sent_total{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m]))
* 100 > 1
for: 15m
labels:
severity: warning
annotations:
description: '{{ printf "%.1f" $value }}% errors while sending alerts from
Prometheus {{$labels.namespace}}/{{$labels.pod}} to Alertmanager {{$labels.alertmanager}}.'
summary: Prometheus has encountered more than 1% errors sending alerts to a specific
Alertmanager.
|
ok
|
|
20.202s ago
|
244.7us |
| alert: PrometheusErrorSendingAlertsToAnyAlertmanager
expr: min
without(alertmanager) (rate(prometheus_notifications_errors_total{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m])
/ rate(prometheus_notifications_sent_total{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m]))
* 100 > 3
for: 15m
labels:
severity: critical
annotations:
description: '{{ printf "%.1f" $value }}% minimum errors while sending alerts
from Prometheus {{$labels.namespace}}/{{$labels.pod}} to any Alertmanager.'
summary: Prometheus encounters more than 3% errors sending alerts to any Alertmanager.
|
ok
|
|
20.202s ago
|
286us |
| alert: PrometheusNotConnectedToAlertmanagers
expr: max_over_time(prometheus_notifications_alertmanagers_discovered{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m])
< 1
for: 10m
labels:
severity: warning
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is not connected to
any Alertmanagers.
summary: Prometheus is not connected to any Alertmanagers.
|
ok
|
|
20.202s ago
|
155.1us |
| alert: PrometheusTSDBReloadsFailing
expr: increase(prometheus_tsdb_reloads_failures_total{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[3h])
> 0
for: 4h
labels:
severity: warning
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has detected {{$value
| humanize}} reload failures over the last 3h.
summary: Prometheus has issues reloading blocks from disk.
|
ok
|
|
20.202s ago
|
296.5us |
| alert: PrometheusTSDBCompactionsFailing
expr: increase(prometheus_tsdb_compactions_failed_total{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[3h])
> 0
for: 4h
labels:
severity: warning
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has detected {{$value
| humanize}} compaction failures over the last 3h.
summary: Prometheus has issues compacting blocks.
|
ok
|
|
20.202s ago
|
239.7us |
| alert: PrometheusNotIngestingSamples
expr: rate(prometheus_tsdb_head_samples_appended_total{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m])
<= 0
for: 10m
labels:
severity: warning
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is not ingesting samples.
summary: Prometheus is not ingesting samples.
|
ok
|
|
20.202s ago
|
130.2us |
| alert: PrometheusDuplicateTimestamps
expr: rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m])
> 0
for: 10m
labels:
severity: warning
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is dropping {{ printf
"%.4g" $value }} samples/s with different values but duplicated timestamp.
summary: Prometheus is dropping samples with duplicate timestamps.
|
ok
|
|
20.201s ago
|
129.8us |
| alert: PrometheusOutOfOrderTimestamps
expr: rate(prometheus_target_scrapes_sample_out_of_order_total{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m])
> 0
for: 10m
labels:
severity: warning
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is dropping {{ printf
"%.4g" $value }} samples/s with timestamps arriving out of order.
summary: Prometheus drops samples with out-of-order timestamps.
|
ok
|
|
20.201s ago
|
118us |
| alert: PrometheusRemoteStorageFailures
expr: (rate(prometheus_remote_storage_failed_samples_total{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m])
/ (rate(prometheus_remote_storage_failed_samples_total{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m])
+ rate(prometheus_remote_storage_succeeded_samples_total{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m])))
* 100 > 1
for: 15m
labels:
severity: critical
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} failed to send {{
printf "%.1f" $value }}% of the samples to {{ $labels.remote_name}}:{{
$labels.url }}
summary: Prometheus fails to send samples to remote storage.
|
ok
|
|
20.201s ago
|
251.3us |
| alert: PrometheusRemoteWriteBehind
expr: (max_over_time(prometheus_remote_storage_highest_timestamp_in_seconds{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m])
- on(job, instance) group_right() max_over_time(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m]))
> 120
for: 15m
labels:
severity: critical
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} remote write is {{
printf "%.1f" $value }}s behind for {{ $labels.remote_name}}:{{ $labels.url
}}.
summary: Prometheus remote write is behind.
|
ok
|
|
20.201s ago
|
204.5us |
| alert: PrometheusRemoteWriteDesiredShards
expr: (max_over_time(prometheus_remote_storage_shards_desired{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m])
> max_over_time(prometheus_remote_storage_shards_max{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m]))
for: 15m
labels:
severity: warning
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} remote write desired
shards calculation wants to run {{ $value }} shards for queue {{ $labels.remote_name}}:{{
$labels.url }}, which is more than the max of {{ printf `prometheus_remote_storage_shards_max{instance="%s",job="kube-prometheus-stack-prometheus",namespace="monitoring"}`
$labels.instance | query | first | value }}.
summary: Prometheus remote write desired shards calculation wants to run more than
configured max shards.
|
ok
|
|
20.201s ago
|
141.2us |
| alert: PrometheusRuleFailures
expr: increase(prometheus_rule_evaluation_failures_total{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m])
> 0
for: 15m
labels:
severity: critical
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to evaluate
{{ printf "%.0f" $value }} rules in the last 5m.
summary: Prometheus is failing rule evaluations.
|
ok
|
|
20.201s ago
|
478.5us |
| alert: PrometheusMissingRuleEvaluations
expr: increase(prometheus_rule_group_iterations_missed_total{job="kube-prometheus-stack-prometheus",namespace="monitoring"}[5m])
> 0
for: 15m
labels:
severity: warning
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has missed {{ printf
"%.0f" $value }} rule group evaluations in the last 5m.
summary: Prometheus is missing rule evaluations due to slow rule group evaluation.
|
ok
|
|
20.201s ago
|
137.5us |