| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://10.233.79.200:9093/metrics |
up | endpoint="web" instance="10.233.79.200:9093" job="kube-prometheus-stack-alertmanager" namespace="monitoring" pod="alertmanager-kube-prometheus-stack-alertmanager-0" service="kube-prometheus-stack-alertmanager" | 25.057s ago | 3.515ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
https://192.168.66.12:6443/metrics |
up | endpoint="https" instance="192.168.66.12:6443" job="apiserver" namespace="default" service="kubernetes" | 992ms ago | 140.9ms | |
|
https://192.168.66.13:6443/metrics |
up | endpoint="https" instance="192.168.66.13:6443" job="apiserver" namespace="default" service="kubernetes" | 17.362s ago | 73.64ms | |
|
https://192.168.66.14:6443/metrics |
up | endpoint="https" instance="192.168.66.14:6443" job="apiserver" namespace="default" service="kubernetes" | 26.138s ago | 91.67ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://10.233.108.39:9153/metrics |
up | endpoint="http-metrics" instance="10.233.108.39:9153" job="coredns" namespace="kube-system" pod="coredns-74c9d4d795-ttrdx" service="kube-prometheus-stack-coredns" | 9.273s ago | 3.095ms | |
|
http://10.233.86.44:9153/metrics |
down | endpoint="http-metrics" instance="10.233.86.44:9153" job="coredns" namespace="kube-system" pod="coredns-74c9d4d795-kftdj" service="kube-prometheus-stack-coredns" | 15.455s ago | 10s | Get "http://10.233.86.44:9153/metrics": dial tcp 10.233.86.44:9153: i/o timeout |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://10.233.79.87:3000/metrics |
up | endpoint="service" instance="10.233.79.87:3000" job="kube-prometheus-stack-grafana" namespace="monitoring" pod="kube-prometheus-stack-grafana-6b7cb99c84-n8rqx" service="kube-prometheus-stack-grafana" | 24.052s ago | 2.868ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://192.168.66.12:10252/metrics |
up | endpoint="http-metrics" instance="192.168.66.12:10252" job="kube-controller-manager" namespace="kube-system" pod="kube-controller-manager-preprod-master-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kube-controller-manager" | 21.008s ago | 2.869ms | |
|
http://192.168.66.13:10252/metrics |
up | endpoint="http-metrics" instance="192.168.66.13:10252" job="kube-controller-manager" namespace="kube-system" pod="kube-controller-manager-preprod-master-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kube-controller-manager" | 8.397s ago | 13ms | |
|
http://192.168.66.14:10252/metrics |
up | endpoint="http-metrics" instance="192.168.66.14:10252" job="kube-controller-manager" namespace="kube-system" pod="kube-controller-manager-preprod-master-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kube-controller-manager" | 28.317s ago | 2.662ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://192.168.66.11:10249/metrics |
down | endpoint="http-metrics" instance="192.168.66.11:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-rfb9h" service="kube-prometheus-stack-kube-proxy" | 2.392s ago | 600.8us | Get "http://192.168.66.11:10249/metrics": dial tcp 192.168.66.11:10249: connect: connection refused |
|
http://192.168.66.12:10249/metrics |
down | endpoint="http-metrics" instance="192.168.66.12:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-vgj7t" service="kube-prometheus-stack-kube-proxy" | 29.398s ago | 762.3us | Get "http://192.168.66.12:10249/metrics": dial tcp 192.168.66.12:10249: connect: connection refused |
|
http://192.168.66.13:10249/metrics |
down | endpoint="http-metrics" instance="192.168.66.13:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-fv5pw" service="kube-prometheus-stack-kube-proxy" | 750ms ago | 678.9us | Get "http://192.168.66.13:10249/metrics": dial tcp 192.168.66.13:10249: connect: connection refused |
|
http://192.168.66.14:10249/metrics |
down | endpoint="http-metrics" instance="192.168.66.14:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-mn48v" service="kube-prometheus-stack-kube-proxy" | 28.724s ago | 612.1us | Get "http://192.168.66.14:10249/metrics": dial tcp 192.168.66.14:10249: connect: connection refused |
|
http://192.168.66.18:10249/metrics |
down | endpoint="http-metrics" instance="192.168.66.18:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-fpwhw" service="kube-prometheus-stack-kube-proxy" | 20.824s ago | 754.4us | Get "http://192.168.66.18:10249/metrics": dial tcp 192.168.66.18:10249: connect: connection refused |
|
http://192.168.66.21:10249/metrics |
down | endpoint="http-metrics" instance="192.168.66.21:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-262mg" service="kube-prometheus-stack-kube-proxy" | 28.864s ago | 821.7us | Get "http://192.168.66.21:10249/metrics": dial tcp 192.168.66.21:10249: connect: connection refused |
|
http://192.168.66.22:10249/metrics |
down | endpoint="http-metrics" instance="192.168.66.22:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-gwv5n" service="kube-prometheus-stack-kube-proxy" | 10.821s ago | 740.1us | Get "http://192.168.66.22:10249/metrics": dial tcp 192.168.66.22:10249: connect: connection refused |
|
http://192.168.66.24:10249/metrics |
down | endpoint="http-metrics" instance="192.168.66.24:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-bl779" service="kube-prometheus-stack-kube-proxy" | 6.509s ago | 391.1us | Get "http://192.168.66.24:10249/metrics": dial tcp 192.168.66.24:10249: connect: connection refused |
|
http://192.168.66.25:10249/metrics |
down | endpoint="http-metrics" instance="192.168.66.25:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-jxv7h" service="kube-prometheus-stack-kube-proxy" | 22.064s ago | 634.7us | Get "http://192.168.66.25:10249/metrics": dial tcp 192.168.66.25:10249: connect: connection refused |
|
http://192.168.66.26:10249/metrics |
down | endpoint="http-metrics" instance="192.168.66.26:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-77gzh" service="kube-prometheus-stack-kube-proxy" | 9.754s ago | 752.3us | Get "http://192.168.66.26:10249/metrics": dial tcp 192.168.66.26:10249: connect: connection refused |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://192.168.66.12:10251/metrics |
up | endpoint="http-metrics" instance="192.168.66.12:10251" job="kube-scheduler" namespace="kube-system" pod="kube-scheduler-preprod-master-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kube-scheduler" | 18.488s ago | 3.367ms | |
|
http://192.168.66.13:10251/metrics |
up | endpoint="http-metrics" instance="192.168.66.13:10251" job="kube-scheduler" namespace="kube-system" pod="kube-scheduler-preprod-master-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kube-scheduler" | 7.033s ago | 4.168ms | |
|
http://192.168.66.14:10251/metrics |
up | endpoint="http-metrics" instance="192.168.66.14:10251" job="kube-scheduler" namespace="kube-system" pod="kube-scheduler-preprod-master-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kube-scheduler" | 15.331s ago | 3.703ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://10.233.79.197:8080/metrics |
up | endpoint="http" instance="10.233.79.197:8080" job="kube-state-metrics" namespace="monitoring" pod="kube-prometheus-stack-kube-state-metrics-857d997b65-wcgkq" service="kube-prometheus-stack-kube-state-metrics" | 11.501s ago | 78.61ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
https://192.168.66.11:10250/metrics |
up | endpoint="https-metrics" instance="192.168.66.11:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-worker-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 24.176s ago | 25.67ms | |
|
https://192.168.66.12:10250/metrics |
up | endpoint="https-metrics" instance="192.168.66.12:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-master-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 28.352s ago | 10.48ms | |
|
https://192.168.66.13:10250/metrics |
up | endpoint="https-metrics" instance="192.168.66.13:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-master-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 18.44s ago | 20.03ms | |
|
https://192.168.66.14:10250/metrics |
up | endpoint="https-metrics" instance="192.168.66.14:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-master-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 5.373s ago | 12.71ms | |
|
https://192.168.66.18:10250/metrics |
up | endpoint="https-metrics" instance="192.168.66.18:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-worker-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 13.169s ago | 23.92ms | |
|
https://192.168.66.21:10250/metrics |
up | endpoint="https-metrics" instance="192.168.66.21:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-worker-4.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 2.771s ago | 15.16ms | |
|
https://192.168.66.22:10250/metrics |
up | endpoint="https-metrics" instance="192.168.66.22:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-worker-5.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 16.859s ago | 23.54ms | |
|
https://192.168.66.24:10250/metrics |
up | endpoint="https-metrics" instance="192.168.66.24:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-ingress-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 10.042s ago | 9.758ms | |
|
https://192.168.66.25:10250/metrics |
up | endpoint="https-metrics" instance="192.168.66.25:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-worker-6.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 22.451s ago | 43.39ms | |
|
https://192.168.66.26:10250/metrics |
up | endpoint="https-metrics" instance="192.168.66.26:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-ingress-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 2.33s ago | 19.62ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
https://192.168.66.11:10250/metrics/cadvisor |
up | endpoint="https-metrics" instance="192.168.66.11:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-worker-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 11.042s ago | 85.67ms | |
|
https://192.168.66.12:10250/metrics/cadvisor |
up | endpoint="https-metrics" instance="192.168.66.12:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-master-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 15.576s ago | 37.72ms | |
|
https://192.168.66.13:10250/metrics/cadvisor |
up | endpoint="https-metrics" instance="192.168.66.13:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-master-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 4.345s ago | 58.34ms | |
|
https://192.168.66.14:10250/metrics/cadvisor |
up | endpoint="https-metrics" instance="192.168.66.14:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-master-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 25.795s ago | 55.57ms | |
|
https://192.168.66.18:10250/metrics/cadvisor |
up | endpoint="https-metrics" instance="192.168.66.18:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-worker-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 4.671s ago | 155ms | |
|
https://192.168.66.21:10250/metrics/cadvisor |
up | endpoint="https-metrics" instance="192.168.66.21:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-worker-4.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 17.806s ago | 245.5ms | |
|
https://192.168.66.22:10250/metrics/cadvisor |
up | endpoint="https-metrics" instance="192.168.66.22:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-worker-5.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 11.911s ago | 120ms | |
|
https://192.168.66.24:10250/metrics/cadvisor |
up | endpoint="https-metrics" instance="192.168.66.24:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-ingress-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 7.25s ago | 81.22ms | |
|
https://192.168.66.25:10250/metrics/cadvisor |
up | endpoint="https-metrics" instance="192.168.66.25:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-worker-6.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 25.757s ago | 90.65ms | |
|
https://192.168.66.26:10250/metrics/cadvisor |
up | endpoint="https-metrics" instance="192.168.66.26:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-ingress-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 25.591s ago | 83ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
https://192.168.66.11:10250/metrics/probes |
up | endpoint="https-metrics" instance="192.168.66.11:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-worker-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 16.861s ago | 1.57ms | |
|
https://192.168.66.12:10250/metrics/probes |
up | endpoint="https-metrics" instance="192.168.66.12:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-master-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 14.937s ago | 1.362ms | |
|
https://192.168.66.13:10250/metrics/probes |
up | endpoint="https-metrics" instance="192.168.66.13:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-master-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 11.99s ago | 1.332ms | |
|
https://192.168.66.14:10250/metrics/probes |
up | endpoint="https-metrics" instance="192.168.66.14:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-master-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 1.879s ago | 3.197ms | |
|
https://192.168.66.18:10250/metrics/probes |
up | endpoint="https-metrics" instance="192.168.66.18:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-worker-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 24.228s ago | 2.281ms | |
|
https://192.168.66.21:10250/metrics/probes |
up | endpoint="https-metrics" instance="192.168.66.21:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-worker-4.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 24.368s ago | 2.155ms | |
|
https://192.168.66.22:10250/metrics/probes |
up | endpoint="https-metrics" instance="192.168.66.22:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-worker-5.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 25.565s ago | 4.105ms | |
|
https://192.168.66.24:10250/metrics/probes |
up | endpoint="https-metrics" instance="192.168.66.24:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-ingress-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 10.713s ago | 1.255ms | |
|
https://192.168.66.25:10250/metrics/probes |
up | endpoint="https-metrics" instance="192.168.66.25:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-worker-6.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 17.345s ago | 13.76ms | |
|
https://192.168.66.26:10250/metrics/probes |
up | endpoint="https-metrics" instance="192.168.66.26:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-ingress-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 3.375s ago | 1.622ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
https://192.168.66.11:10250/metrics/resource/v1alpha1 |
up | endpoint="https-metrics" instance="192.168.66.11:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-worker-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 18.225s ago | 20.52ms | |
|
https://192.168.66.12:10250/metrics/resource/v1alpha1 |
up | endpoint="https-metrics" instance="192.168.66.12:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-master-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 27.663s ago | 1.556ms | |
|
https://192.168.66.13:10250/metrics/resource/v1alpha1 |
up | endpoint="https-metrics" instance="192.168.66.13:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-master-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 19.8s ago | 1.743ms | |
|
https://192.168.66.14:10250/metrics/resource/v1alpha1 |
up | endpoint="https-metrics" instance="192.168.66.14:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-master-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 8.221s ago | 8.259ms | |
|
https://192.168.66.18:10250/metrics/resource/v1alpha1 |
up | endpoint="https-metrics" instance="192.168.66.18:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-worker-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 25.533s ago | 3.218ms | |
|
https://192.168.66.21:10250/metrics/resource/v1alpha1 |
up | endpoint="https-metrics" instance="192.168.66.21:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-worker-4.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 12.445s ago | 27.9ms | |
|
https://192.168.66.22:10250/metrics/resource/v1alpha1 |
up | endpoint="https-metrics" instance="192.168.66.22:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-worker-5.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 13.856s ago | 19.29ms | |
|
https://192.168.66.24:10250/metrics/resource/v1alpha1 |
up | endpoint="https-metrics" instance="192.168.66.24:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-ingress-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 20.417s ago | 1.909ms | |
|
https://192.168.66.25:10250/metrics/resource/v1alpha1 |
up | endpoint="https-metrics" instance="192.168.66.25:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-worker-6.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 27.523s ago | 18.34ms | |
|
https://192.168.66.26:10250/metrics/resource/v1alpha1 |
up | endpoint="https-metrics" instance="192.168.66.26:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-ingress-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" | 15.395s ago | 6.795ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://192.168.66.11:9100/metrics |
up | endpoint="metrics" instance="192.168.66.11:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-5vj9n" service="kube-prometheus-stack-prometheus-node-exporter" | 24.951s ago | 43.99ms | |
|
http://192.168.66.12:9100/metrics |
up | endpoint="metrics" instance="192.168.66.12:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-5glnz" service="kube-prometheus-stack-prometheus-node-exporter" | 21.726s ago | 18.79ms | |
|
http://192.168.66.13:9100/metrics |
up | endpoint="metrics" instance="192.168.66.13:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-j5wrx" service="kube-prometheus-stack-prometheus-node-exporter" | 25.005s ago | 21.41ms | |
|
http://192.168.66.14:9100/metrics |
up | endpoint="metrics" instance="192.168.66.14:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-vgtxj" service="kube-prometheus-stack-prometheus-node-exporter" | 23.199s ago | 15.45ms | |
|
http://192.168.66.18:9100/metrics |
up | endpoint="metrics" instance="192.168.66.18:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-rknlq" service="kube-prometheus-stack-prometheus-node-exporter" | 22.129s ago | 43.88ms | |
|
http://192.168.66.21:9100/metrics |
up | endpoint="metrics" instance="192.168.66.21:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-cvnh6" service="kube-prometheus-stack-prometheus-node-exporter" | 7.731s ago | 56.79ms | |
|
http://192.168.66.22:9100/metrics |
up | endpoint="metrics" instance="192.168.66.22:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-fh7dg" service="kube-prometheus-stack-prometheus-node-exporter" | 24.789s ago | 39.09ms | |
|
http://192.168.66.24:9100/metrics |
up | endpoint="metrics" instance="192.168.66.24:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-gs8gk" service="kube-prometheus-stack-prometheus-node-exporter" | 25.739s ago | 48.27ms | |
|
http://192.168.66.25:9100/metrics |
up | endpoint="metrics" instance="192.168.66.25:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-92w7w" service="kube-prometheus-stack-prometheus-node-exporter" | 19.013s ago | 42.91ms | |
|
http://192.168.66.26:9100/metrics |
up | endpoint="metrics" instance="192.168.66.26:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-7p564" service="kube-prometheus-stack-prometheus-node-exporter" | 28.001s ago | 20.03ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://10.233.79.69:8080/metrics |
up | endpoint="http" instance="10.233.79.69:8080" job="kube-prometheus-stack-operator" namespace="monitoring" pod="kube-prometheus-stack-operator-5b48747dff-w9tf7" service="kube-prometheus-stack-operator" | 8.619s ago | 2.447ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://10.233.108.51:9090/metrics |
up | endpoint="web" instance="10.233.108.51:9090" job="kube-prometheus-stack-prometheus" namespace="monitoring" pod="prometheus-kube-prometheus-stack-prometheus-0" service="kube-prometheus-stack-prometheus" | 15.31s ago | 8.398ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://10.233.111.10:9913/metrics |
down | endpoint="metrics" instance="10.233.111.10:9913" job="pmc-partner-api" namespace="pmc-production" pod="pmc-partner-api-7d4bb7b9c8-kf4d4" service="pmc-partner-api" tier="pmc-metrics-api" | 11.327s ago | 761.1us | Get "http://10.233.111.10:9913/metrics": dial tcp 10.233.111.10:9913: connect: connection refused |
|
http://10.233.126.60:9913/metrics |
down | endpoint="metrics" instance="10.233.126.60:9913" job="pmc-partner-api" namespace="pmc-production" pod="pmc-partner-api-7d4bb7b9c8-fcllm" service="pmc-partner-api" tier="pmc-metrics-api" | 14.305s ago | 770.2us | Get "http://10.233.126.60:9913/metrics": dial tcp 10.233.126.60:9913: connect: connection refused |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://10.233.110.156:15672/api/metrics |
down | endpoint="http" instance="10.233.110.156:15672" job="rabbitmq-internal" namespace="default" pod="rabbitmq-1" service="rabbitmq-internal" tier="pmc-rabbit" | 7.453s ago | 756.7us | Get "http://10.233.110.156:15672/api/metrics": dial tcp 10.233.110.156:15672: connect: connection refused |
|
http://10.233.115.35:15672/api/metrics |
up | endpoint="http" instance="10.233.115.35:15672" job="rabbitmq-internal" namespace="default" pod="rabbitmq-0" service="rabbitmq-internal" tier="pmc-rabbit" | 26.358s ago | 311.7ms | |
|
http://10.233.126.166:15672/api/metrics |
up | endpoint="http" instance="10.233.126.166:15672" job="rabbitmq-internal" namespace="default" pod="rabbitmq-2" service="rabbitmq-internal" tier="pmc-rabbit" | 15.078s ago | 356.9ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://10.233.126.150:8080/metrics |
up | endpoint="http" instance="10.233.126.150:8080" job="pmc-statistic-metrics" namespace="pmc-production" pod="pmc-statistic-metrics-7469b6f694-skvkb" service="pmc-statistic-metrics" tier="pmc-statistic" | 11.238s ago | 44.66ms |