Targets


monitoring/kube-prometheus-stack-alertmanager/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.233.79.238:9093/metrics
up endpoint="web" instance="10.233.79.238:9093" job="kube-prometheus-stack-alertmanager" namespace="monitoring" pod="alertmanager-kube-prometheus-stack-alertmanager-0" service="kube-prometheus-stack-alertmanager" 26.12s ago 4.637ms

monitoring/kube-prometheus-stack-apiserver/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.66.12:6443/metrics
up endpoint="https" instance="192.168.66.12:6443" job="apiserver" namespace="default" service="kubernetes" 19.724s ago 132.1ms

monitoring/kube-prometheus-stack-coredns/0 (2/2 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.233.108.27:9153/metrics
up endpoint="http-metrics" instance="10.233.108.27:9153" job="coredns" namespace="kube-system" pod="coredns-74c9d4d795-ttrdx" service="kube-prometheus-stack-coredns" 4.783s ago 3.509ms
http://10.233.86.38:9153/metrics
up endpoint="http-metrics" instance="10.233.86.38:9153" job="coredns" namespace="kube-system" pod="coredns-74c9d4d795-27xmv" service="kube-prometheus-stack-coredns" 7.376s ago 5.067ms

monitoring/kube-prometheus-stack-grafana/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.233.79.73:3000/metrics
up endpoint="service" instance="10.233.79.73:3000" job="kube-prometheus-stack-grafana" namespace="monitoring" pod="kube-prometheus-stack-grafana-6b7cb99c84-n8rqx" service="kube-prometheus-stack-grafana" 11.1s ago 2.898ms

monitoring/kube-prometheus-stack-kube-controller-manager/0 (1/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.66.12:10252/metrics
up endpoint="http-metrics" instance="192.168.66.12:10252" job="kube-controller-manager" namespace="kube-system" pod="kube-controller-manager-preprod-master-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kube-controller-manager" 9.74s ago 14.78ms
http://192.168.66.13:10252/metrics
down endpoint="http-metrics" instance="192.168.66.13:10252" job="kube-controller-manager" namespace="kube-system" pod="kube-controller-manager-preprod-master-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kube-controller-manager" 27.129s ago 749.5us Get "http://192.168.66.13:10252/metrics": dial tcp 192.168.66.13:10252: connect: connection refused
http://192.168.66.14:10252/metrics
down endpoint="http-metrics" instance="192.168.66.14:10252" job="kube-controller-manager" namespace="kube-system" pod="kube-controller-manager-preprod-master-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kube-controller-manager" 17.049s ago 10.16ms Get "http://192.168.66.14:10252/metrics": dial tcp 192.168.66.14:10252: connect: connection refused

monitoring/kube-prometheus-stack-kube-etcd/0 (0/0 up)

Endpoint State Labels Last Scrape Scrape Duration Error

monitoring/kube-prometheus-stack-kube-proxy/0 (0/11 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.66.11:10249/metrics
down endpoint="http-metrics" instance="192.168.66.11:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-rfb9h" service="kube-prometheus-stack-kube-proxy" 21.124s ago 821us Get "http://192.168.66.11:10249/metrics": dial tcp 192.168.66.11:10249: connect: connection refused
http://192.168.66.12:10249/metrics
down endpoint="http-metrics" instance="192.168.66.12:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-vgj7t" service="kube-prometheus-stack-kube-proxy" 18.13s ago 724.3us Get "http://192.168.66.12:10249/metrics": dial tcp 192.168.66.12:10249: connect: connection refused
http://192.168.66.13:10249/metrics
down endpoint="http-metrics" instance="192.168.66.13:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-fv5pw" service="kube-prometheus-stack-kube-proxy" 19.471s ago 642.1us Get "http://192.168.66.13:10249/metrics": dial tcp 192.168.66.13:10249: connect: connection refused
http://192.168.66.14:10249/metrics
down endpoint="http-metrics" instance="192.168.66.14:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-mn48v" service="kube-prometheus-stack-kube-proxy" 17.457s ago 670.5us Get "http://192.168.66.14:10249/metrics": dial tcp 192.168.66.14:10249: connect: connection refused
http://192.168.66.16:10249/metrics
down endpoint="http-metrics" instance="192.168.66.16:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-jv9rq" service="kube-prometheus-stack-kube-proxy" 2.142s ago 716us Get "http://192.168.66.16:10249/metrics": dial tcp 192.168.66.16:10249: connect: connection refused
http://192.168.66.18:10249/metrics
down endpoint="http-metrics" instance="192.168.66.18:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-fpwhw" service="kube-prometheus-stack-kube-proxy" 9.556s ago 787.6us Get "http://192.168.66.18:10249/metrics": dial tcp 192.168.66.18:10249: connect: connection refused
http://192.168.66.21:10249/metrics
down endpoint="http-metrics" instance="192.168.66.21:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-262mg" service="kube-prometheus-stack-kube-proxy" 17.597s ago 672us Get "http://192.168.66.21:10249/metrics": dial tcp 192.168.66.21:10249: connect: connection refused
http://192.168.66.22:10249/metrics
down endpoint="http-metrics" instance="192.168.66.22:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-gwv5n" service="kube-prometheus-stack-kube-proxy" 29.573s ago 820.1us Get "http://192.168.66.22:10249/metrics": dial tcp 192.168.66.22:10249: connect: connection refused
http://192.168.66.24:10249/metrics
down endpoint="http-metrics" instance="192.168.66.24:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-bl779" service="kube-prometheus-stack-kube-proxy" 25.242s ago 807.5us Get "http://192.168.66.24:10249/metrics": dial tcp 192.168.66.24:10249: connect: connection refused
http://192.168.66.25:10249/metrics
down endpoint="http-metrics" instance="192.168.66.25:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-jxv7h" service="kube-prometheus-stack-kube-proxy" 10.804s ago 616us Get "http://192.168.66.25:10249/metrics": dial tcp 192.168.66.25:10249: connect: connection refused
http://192.168.66.26:10249/metrics
down endpoint="http-metrics" instance="192.168.66.26:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-77gzh" service="kube-prometheus-stack-kube-proxy" 28.487s ago 528.8us Get "http://192.168.66.26:10249/metrics": dial tcp 192.168.66.26:10249: connect: connection refused

monitoring/kube-prometheus-stack-kube-scheduler/0 (1/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.66.12:10251/metrics
up endpoint="http-metrics" instance="192.168.66.12:10251" job="kube-scheduler" namespace="kube-system" pod="kube-scheduler-preprod-master-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kube-scheduler" 7.221s ago 7.959ms
http://192.168.66.13:10251/metrics
down endpoint="http-metrics" instance="192.168.66.13:10251" job="kube-scheduler" namespace="kube-system" pod="kube-scheduler-preprod-master-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kube-scheduler" 25.766s ago 684us Get "http://192.168.66.13:10251/metrics": dial tcp 192.168.66.13:10251: connect: connection refused
http://192.168.66.14:10251/metrics
down endpoint="http-metrics" instance="192.168.66.14:10251" job="kube-scheduler" namespace="kube-system" pod="kube-scheduler-preprod-master-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kube-scheduler" 4.055s ago 1.03ms Get "http://192.168.66.14:10251/metrics": dial tcp 192.168.66.14:10251: connect: connection refused

monitoring/kube-prometheus-stack-kube-state-metrics/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.233.79.108:8080/metrics
up endpoint="http" instance="10.233.79.108:8080" job="kube-state-metrics" namespace="monitoring" pod="kube-prometheus-stack-kube-state-metrics-857d997b65-wcgkq" service="kube-prometheus-stack-kube-state-metrics" 17.295s ago 71.47ms

monitoring/kube-prometheus-stack-kubelet/0 (9/11 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.66.11:10250/metrics
up endpoint="https-metrics" instance="192.168.66.11:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-worker-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 12.909s ago 1.705s
https://192.168.66.12:10250/metrics
up endpoint="https-metrics" instance="192.168.66.12:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-master-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 17.069s ago 11.19ms
https://192.168.66.13:10250/metrics
down endpoint="https-metrics" instance="192.168.66.13:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-master-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 7.173s ago 715.1us Get "https://192.168.66.13:10250/metrics": dial tcp 192.168.66.13:10250: connect: connection refused
https://192.168.66.14:10250/metrics
down endpoint="https-metrics" instance="192.168.66.14:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-master-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 24.106s ago 674.3us Get "https://192.168.66.14:10250/metrics": dial tcp 192.168.66.14:10250: connect: connection refused
https://192.168.66.16:10250/metrics
up endpoint="https-metrics" instance="192.168.66.16:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-worker-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 15.894s ago 15.82ms
https://192.168.66.18:10250/metrics
up endpoint="https-metrics" instance="192.168.66.18:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-worker-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 1.903s ago 11.48ms
https://192.168.66.21:10250/metrics
up endpoint="https-metrics" instance="192.168.66.21:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-worker-4.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 21.504s ago 33.65ms
https://192.168.66.22:10250/metrics
up endpoint="https-metrics" instance="192.168.66.22:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-worker-5.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 5.927s ago 22.92ms
https://192.168.66.24:10250/metrics
up endpoint="https-metrics" instance="192.168.66.24:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-ingress-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 28.776s ago 9.766ms
https://192.168.66.25:10250/metrics
up endpoint="https-metrics" instance="192.168.66.25:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-worker-6.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 11.184s ago 11.85ms
https://192.168.66.26:10250/metrics
up endpoint="https-metrics" instance="192.168.66.26:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="preprod-ingress-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 21.063s ago 9.604ms

monitoring/kube-prometheus-stack-kubelet/1 (9/11 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.66.11:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.66.11:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-worker-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 29.781s ago 112.3ms
https://192.168.66.12:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.66.12:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-master-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 4.309s ago 68.64ms
https://192.168.66.13:10250/metrics/cadvisor
down endpoint="https-metrics" instance="192.168.66.13:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-master-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 23.072s ago 767.5us Get "https://192.168.66.13:10250/metrics/cadvisor": dial tcp 192.168.66.13:10250: connect: connection refused
https://192.168.66.14:10250/metrics/cadvisor
down endpoint="https-metrics" instance="192.168.66.14:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-master-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 14.527s ago 641.3us Get "https://192.168.66.14:10250/metrics/cadvisor": dial tcp 192.168.66.14:10250: connect: connection refused
https://192.168.66.16:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.66.16:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-worker-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 14.587s ago 56.3ms
https://192.168.66.18:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.66.18:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-worker-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 23.404s ago 140.8ms
https://192.168.66.21:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.66.21:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-worker-4.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 6.54s ago 233.3ms
https://192.168.66.22:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.66.22:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-worker-5.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 645ms ago 98.89ms
https://192.168.66.24:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.66.24:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-ingress-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 25.983s ago 68.64ms
https://192.168.66.25:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.66.25:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-worker-6.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 14.49s ago 436.8ms
https://192.168.66.26:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.66.26:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="preprod-ingress-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 14.325s ago 76.89ms

monitoring/kube-prometheus-stack-kubelet/2 (9/11 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.66.11:10250/metrics/probes
up endpoint="https-metrics" instance="192.168.66.11:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-worker-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 5.781s ago 1.657ms
https://192.168.66.12:10250/metrics/probes
up endpoint="https-metrics" instance="192.168.66.12:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-master-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 3.67s ago 2.758ms
https://192.168.66.13:10250/metrics/probes
down endpoint="https-metrics" instance="192.168.66.13:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-master-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 723ms ago 690.8us Get "https://192.168.66.13:10250/metrics/probes": dial tcp 192.168.66.13:10250: connect: connection refused
https://192.168.66.14:10250/metrics/probes
down endpoint="https-metrics" instance="192.168.66.14:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-master-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 20.613s ago 823.5us Get "https://192.168.66.14:10250/metrics/probes": dial tcp 192.168.66.14:10250: connect: connection refused
https://192.168.66.16:10250/metrics/probes
up endpoint="https-metrics" instance="192.168.66.16:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-worker-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 25.459s ago 1.645ms
https://192.168.66.18:10250/metrics/probes
up endpoint="https-metrics" instance="192.168.66.18:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-worker-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 12.962s ago 1.836ms
https://192.168.66.21:10250/metrics/probes
up endpoint="https-metrics" instance="192.168.66.21:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-worker-4.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 13.101s ago 2.571ms
https://192.168.66.22:10250/metrics/probes
up endpoint="https-metrics" instance="192.168.66.22:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-worker-5.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 14.3s ago 2ms
https://192.168.66.24:10250/metrics/probes
up endpoint="https-metrics" instance="192.168.66.24:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-ingress-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 29.449s ago 1.766ms
https://192.168.66.25:10250/metrics/probes
up endpoint="https-metrics" instance="192.168.66.25:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-worker-6.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 6.066s ago 1.643ms
https://192.168.66.26:10250/metrics/probes
up endpoint="https-metrics" instance="192.168.66.26:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="preprod-ingress-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 22.107s ago 1.187ms

monitoring/kube-prometheus-stack-kubelet/3 (9/11 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.66.11:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="192.168.66.11:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-worker-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 6.959s ago 3.554s
https://192.168.66.12:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="192.168.66.12:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-master-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 16.407s ago 10.57ms
https://192.168.66.13:10250/metrics/resource/v1alpha1
down endpoint="https-metrics" instance="192.168.66.13:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-master-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 8.534s ago 730.3us Get "https://192.168.66.13:10250/metrics/resource/v1alpha1": dial tcp 192.168.66.13:10250: connect: connection refused
https://192.168.66.14:10250/metrics/resource/v1alpha1
down endpoint="https-metrics" instance="192.168.66.14:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-master-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 26.955s ago 717.7us Get "https://192.168.66.14:10250/metrics/resource/v1alpha1": dial tcp 192.168.66.14:10250: connect: connection refused
https://192.168.66.16:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="192.168.66.16:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-worker-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 27.463s ago 10.25ms
https://192.168.66.18:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="192.168.66.18:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-worker-3.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 14.266s ago 19.44ms
https://192.168.66.21:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="192.168.66.21:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-worker-4.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 1.178s ago 42.26ms
https://192.168.66.22:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="192.168.66.22:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-worker-5.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 2.59s ago 2.811ms
https://192.168.66.24:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="192.168.66.24:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-ingress-2.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 9.15s ago 2.162ms
https://192.168.66.25:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="192.168.66.25:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-worker-6.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 16.262s ago 2.233ms
https://192.168.66.26:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="192.168.66.26:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="preprod-ingress-1.preprod.pmc.vas-stream.ru" service="kube-prometheus-stack-kubelet" 4.097s ago 12.36ms

monitoring/kube-prometheus-stack-node-exporter/0 (9/11 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.66.11:9100/metrics
up endpoint="metrics" instance="192.168.66.11:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-5vj9n" service="kube-prometheus-stack-prometheus-node-exporter" 13.685s ago 43.39ms
http://192.168.66.12:9100/metrics
up endpoint="metrics" instance="192.168.66.12:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-5glnz" service="kube-prometheus-stack-prometheus-node-exporter" 10.459s ago 16.85ms
http://192.168.66.13:9100/metrics
down endpoint="metrics" instance="192.168.66.13:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-j5wrx" service="kube-prometheus-stack-prometheus-node-exporter" 13.739s ago 697.5us Get "http://192.168.66.13:9100/metrics": dial tcp 192.168.66.13:9100: connect: connection refused
http://192.168.66.14:9100/metrics
down endpoint="metrics" instance="192.168.66.14:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-vgtxj" service="kube-prometheus-stack-prometheus-node-exporter" 11.933s ago 664.9us Get "http://192.168.66.14:9100/metrics": dial tcp 192.168.66.14:9100: connect: connection refused
http://192.168.66.16:9100/metrics
up endpoint="metrics" instance="192.168.66.16:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-jpqb6" service="kube-prometheus-stack-prometheus-node-exporter" 21.972s ago 31.28ms
http://192.168.66.18:9100/metrics
up endpoint="metrics" instance="192.168.66.18:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-rknlq" service="kube-prometheus-stack-prometheus-node-exporter" 10.864s ago 32.56ms
http://192.168.66.21:9100/metrics
up endpoint="metrics" instance="192.168.66.21:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-cvnh6" service="kube-prometheus-stack-prometheus-node-exporter" 26.466s ago 88.41ms
http://192.168.66.22:9100/metrics
up endpoint="metrics" instance="192.168.66.22:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-fh7dg" service="kube-prometheus-stack-prometheus-node-exporter" 13.525s ago 24.55ms
http://192.168.66.24:9100/metrics
up endpoint="metrics" instance="192.168.66.24:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-gs8gk" service="kube-prometheus-stack-prometheus-node-exporter" 14.473s ago 26.76ms
http://192.168.66.25:9100/metrics
up endpoint="metrics" instance="192.168.66.25:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-92w7w" service="kube-prometheus-stack-prometheus-node-exporter" 7.747s ago 32.49ms
http://192.168.66.26:9100/metrics
up endpoint="metrics" instance="192.168.66.26:9100" job="node-exporter" namespace="monitoring" pod="kube-prometheus-stack-prometheus-node-exporter-7p564" service="kube-prometheus-stack-prometheus-node-exporter" 16.736s ago 30.09ms

monitoring/kube-prometheus-stack-operator/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.233.79.71:8080/metrics
up endpoint="http" instance="10.233.79.71:8080" job="kube-prometheus-stack-operator" namespace="monitoring" pod="kube-prometheus-stack-operator-5b48747dff-w9tf7" service="kube-prometheus-stack-operator" 11.776s ago 2.344ms

monitoring/kube-prometheus-stack-prometheus/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.233.85.36:9090/metrics
up endpoint="web" instance="10.233.85.36:9090" job="kube-prometheus-stack-prometheus" namespace="monitoring" pod="prometheus-kube-prometheus-stack-prometheus-0" service="kube-prometheus-stack-prometheus" 17.416s ago 13.78ms

monitoring/partner-metrics-exporter/0 (0/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.233.111.183:9913/metrics
down endpoint="metrics" instance="10.233.111.183:9913" job="pmc-partner-api" namespace="pmc-production" pod="pmc-partner-api-69fcbd545d-vvbsr" service="pmc-partner-api" tier="pmc-metrics-api" 13.794s ago 774.6us Get "http://10.233.111.183:9913/metrics": dial tcp 10.233.111.183:9913: connect: connection refused

monitoring/rabbit-metrics-exporter/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.233.111.81:15672/api/metrics
up endpoint="http" instance="10.233.111.81:15672" job="rabbitmq-internal" namespace="default" pod="rabbitmq-0" service="rabbitmq-internal" tier="pmc-rabbit" 17.58s ago 264.1ms

monitoring/statistic-metrics-exporter/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.233.126.249:8080/metrics
up endpoint="http" instance="10.233.126.249:8080" job="pmc-statistic-metrics" namespace="pmc-production" pod="pmc-statistic-metrics-7469b6f694-skvkb" service="pmc-statistic-metrics" tier="pmc-statistic" 10.314s ago 10.73ms