Prometheus Notes/Prometheus Internal Metrics

From Federal Burro of Information
Jump to navigationJump to search

Of note:

Note that per job stats are not in the default exposed metrics, you need to do more work:


Raw:

# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
# TYPE go_gc_duration_seconds summary
go_gc_duration_seconds{quantile="0"} 5.6665e-05
go_gc_duration_seconds{quantile="0.25"} 0.000266691
go_gc_duration_seconds{quantile="0.5"} 0.000849635
go_gc_duration_seconds{quantile="0.75"} 0.003956719
go_gc_duration_seconds{quantile="1"} 0.212668408
go_gc_duration_seconds_sum 4.153114455
go_gc_duration_seconds_count 755
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
go_goroutines 725
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
go_info{version="go1.14.2"} 1
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes 4.434670312e+09
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
# TYPE go_memstats_alloc_bytes_total counter
go_memstats_alloc_bytes_total 9.23021287984e+11
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
# TYPE go_memstats_buck_hash_sys_bytes gauge
go_memstats_buck_hash_sys_bytes 4.664464e+06
# HELP go_memstats_frees_total Total number of frees.
# TYPE go_memstats_frees_total counter
go_memstats_frees_total 3.343492765e+09
# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.
# TYPE go_memstats_gc_cpu_fraction gauge
go_memstats_gc_cpu_fraction 0.021421316669685626
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
# TYPE go_memstats_gc_sys_bytes gauge
go_memstats_gc_sys_bytes 4.52779688e+08
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
# TYPE go_memstats_heap_alloc_bytes gauge
go_memstats_heap_alloc_bytes 4.434670312e+09
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
# TYPE go_memstats_heap_idle_bytes gauge
go_memstats_heap_idle_bytes 7.409999872e+09
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
# TYPE go_memstats_heap_inuse_bytes gauge
go_memstats_heap_inuse_bytes 4.72068096e+09
# HELP go_memstats_heap_objects Number of allocated objects.
# TYPE go_memstats_heap_objects gauge
go_memstats_heap_objects 2.0041378e+07
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
# TYPE go_memstats_heap_released_bytes gauge
go_memstats_heap_released_bytes 7.219003392e+09
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
# TYPE go_memstats_heap_sys_bytes gauge
go_memstats_heap_sys_bytes 1.2130680832e+10
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
go_memstats_last_gc_time_seconds 1.6064884154064028e+09
# HELP go_memstats_lookups_total Total number of pointer lookups.
# TYPE go_memstats_lookups_total counter
go_memstats_lookups_total 0
# HELP go_memstats_mallocs_total Total number of mallocs.
# TYPE go_memstats_mallocs_total counter
go_memstats_mallocs_total 3.363534143e+09
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
# TYPE go_memstats_mcache_inuse_bytes gauge
go_memstats_mcache_inuse_bytes 13888
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
# TYPE go_memstats_mcache_sys_bytes gauge
go_memstats_mcache_sys_bytes 16384
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
# TYPE go_memstats_mspan_inuse_bytes gauge
go_memstats_mspan_inuse_bytes 6.8039304e+07
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
# TYPE go_memstats_mspan_sys_bytes gauge
go_memstats_mspan_sys_bytes 1.21667584e+08
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
# TYPE go_memstats_next_gc_bytes gauge
go_memstats_next_gc_bytes 4.146525488e+09
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
# TYPE go_memstats_other_sys_bytes gauge
go_memstats_other_sys_bytes 1.5018616e+07
# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
# TYPE go_memstats_stack_inuse_bytes gauge
go_memstats_stack_inuse_bytes 1.55648e+07
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
# TYPE go_memstats_stack_sys_bytes gauge
go_memstats_stack_sys_bytes 1.55648e+07
# HELP go_memstats_sys_bytes Number of bytes obtained from system.
# TYPE go_memstats_sys_bytes gauge
go_memstats_sys_bytes 1.2740392368e+10
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
go_threads 15
# HELP net_conntrack_dialer_conn_attempted_total Total number of connections attempted by the given dialer a given name.
# TYPE net_conntrack_dialer_conn_attempted_total counter
net_conntrack_dialer_conn_attempted_total{dialer_name="alertmanager"} 1
net_conntrack_dialer_conn_attempted_total{dialer_name="book"} 126541
net_conntrack_dialer_conn_attempted_total{dialer_name="cache-search"} 2950
net_conntrack_dialer_conn_attempted_total{dialer_name="default"} 35
net_conntrack_dialer_conn_attempted_total{dialer_name="esclient-production"} 0
net_conntrack_dialer_conn_attempted_total{dialer_name="grafana_metrics"} 21
net_conntrack_dialer_conn_attempted_total{dialer_name="kubernetes-apiservers"} 1
net_conntrack_dialer_conn_attempted_total{dialer_name="kubernetes-cluster-daemonsets"} 232
net_conntrack_dialer_conn_attempted_total{dialer_name="kubernetes-cluster-services"} 52
net_conntrack_dialer_conn_attempted_total{dialer_name="kubernetes-node-exporter"} 22825
net_conntrack_dialer_conn_attempted_total{dialer_name="kubernetes-nodes"} 372
net_conntrack_dialer_conn_attempted_total{dialer_name="kubernetes-nodes-cadvisor"} 97
net_conntrack_dialer_conn_attempted_total{dialer_name="live-search"} 54944
net_conntrack_dialer_conn_attempted_total{dialer_name="logstash"} 0
net_conntrack_dialer_conn_attempted_total{dialer_name="postgres-exporter"} 1
net_conntrack_dialer_conn_attempted_total{dialer_name="prometheus"} 2
net_conntrack_dialer_conn_attempted_total{dialer_name="prometheus-blackbox-exporter"} 24
net_conntrack_dialer_conn_attempted_total{dialer_name="prometheus-pushgateway"} 3
# HELP net_conntrack_dialer_conn_closed_total Total number of connections closed which originated from the dialer of a given name.
# TYPE net_conntrack_dialer_conn_closed_total counter
net_conntrack_dialer_conn_closed_total{dialer_name="alertmanager"} 1
net_conntrack_dialer_conn_closed_total{dialer_name="book"} 0
net_conntrack_dialer_conn_closed_total{dialer_name="cache-search"} 0
net_conntrack_dialer_conn_closed_total{dialer_name="default"} 34
net_conntrack_dialer_conn_closed_total{dialer_name="esclient-production"} 0
net_conntrack_dialer_conn_closed_total{dialer_name="grafana_metrics"} 20
net_conntrack_dialer_conn_closed_total{dialer_name="kubernetes-apiservers"} 0
net_conntrack_dialer_conn_closed_total{dialer_name="kubernetes-cluster-daemonsets"} 108
net_conntrack_dialer_conn_closed_total{dialer_name="kubernetes-cluster-services"} 5
net_conntrack_dialer_conn_closed_total{dialer_name="kubernetes-node-exporter"} 5
net_conntrack_dialer_conn_closed_total{dialer_name="kubernetes-nodes"} 224
net_conntrack_dialer_conn_closed_total{dialer_name="kubernetes-nodes-cadvisor"} 66
net_conntrack_dialer_conn_closed_total{dialer_name="live-search"} 0
net_conntrack_dialer_conn_closed_total{dialer_name="logstash"} 0
net_conntrack_dialer_conn_closed_total{dialer_name="postgres-exporter"} 0
net_conntrack_dialer_conn_closed_total{dialer_name="prometheus"} 1
net_conntrack_dialer_conn_closed_total{dialer_name="prometheus-blackbox-exporter"} 10
net_conntrack_dialer_conn_closed_total{dialer_name="prometheus-pushgateway"} 1
# HELP net_conntrack_dialer_conn_established_total Total number of connections successfully established by the given dialer a given name.
# TYPE net_conntrack_dialer_conn_established_total counter
net_conntrack_dialer_conn_established_total{dialer_name="alertmanager"} 1
net_conntrack_dialer_conn_established_total{dialer_name="book"} 0
net_conntrack_dialer_conn_established_total{dialer_name="cache-search"} 0
net_conntrack_dialer_conn_established_total{dialer_name="default"} 35
net_conntrack_dialer_conn_established_total{dialer_name="esclient-production"} 0
net_conntrack_dialer_conn_established_total{dialer_name="grafana_metrics"} 21
net_conntrack_dialer_conn_established_total{dialer_name="kubernetes-apiservers"} 1
net_conntrack_dialer_conn_established_total{dialer_name="kubernetes-cluster-daemonsets"} 144
net_conntrack_dialer_conn_established_total{dialer_name="kubernetes-cluster-services"} 37
net_conntrack_dialer_conn_established_total{dialer_name="kubernetes-node-exporter"} 35
net_conntrack_dialer_conn_established_total{dialer_name="kubernetes-nodes"} 229
net_conntrack_dialer_conn_established_total{dialer_name="kubernetes-nodes-cadvisor"} 71
net_conntrack_dialer_conn_established_total{dialer_name="live-search"} 0
net_conntrack_dialer_conn_established_total{dialer_name="logstash"} 0
net_conntrack_dialer_conn_established_total{dialer_name="postgres-exporter"} 1
net_conntrack_dialer_conn_established_total{dialer_name="prometheus"} 2
net_conntrack_dialer_conn_established_total{dialer_name="prometheus-blackbox-exporter"} 11
net_conntrack_dialer_conn_established_total{dialer_name="prometheus-pushgateway"} 2
# HELP net_conntrack_dialer_conn_failed_total Total number of connections failed to dial by the dialer a given name.
# TYPE net_conntrack_dialer_conn_failed_total counter
net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="refused"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="resolution"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="timeout"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="unknown"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="book",reason="refused"} 126541
net_conntrack_dialer_conn_failed_total{dialer_name="book",reason="resolution"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="book",reason="timeout"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="book",reason="unknown"} 126541
net_conntrack_dialer_conn_failed_total{dialer_name="cache-search",reason="refused"} 2888
net_conntrack_dialer_conn_failed_total{dialer_name="cache-search",reason="resolution"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="cache-search",reason="timeout"} 53
net_conntrack_dialer_conn_failed_total{dialer_name="cache-search",reason="unknown"} 2950
net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="refused"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="resolution"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="timeout"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="unknown"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="esclient-production",reason="refused"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="esclient-production",reason="resolution"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="esclient-production",reason="timeout"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="esclient-production",reason="unknown"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="grafana_metrics",reason="refused"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="grafana_metrics",reason="resolution"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="grafana_metrics",reason="timeout"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="grafana_metrics",reason="unknown"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-apiservers",reason="refused"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-apiservers",reason="resolution"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-apiservers",reason="timeout"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-apiservers",reason="unknown"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-cluster-daemonsets",reason="refused"} 2
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-cluster-daemonsets",reason="resolution"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-cluster-daemonsets",reason="timeout"} 55
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-cluster-daemonsets",reason="unknown"} 88
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-cluster-services",reason="refused"} 12
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-cluster-services",reason="resolution"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-cluster-services",reason="timeout"} 2
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-cluster-services",reason="unknown"} 15
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-node-exporter",reason="refused"} 22733
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-node-exporter",reason="resolution"} 26
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-node-exporter",reason="timeout"} 30
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-node-exporter",reason="unknown"} 22764
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes",reason="refused"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes",reason="resolution"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes",reason="timeout"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes",reason="unknown"} 143
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes-cadvisor",reason="refused"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes-cadvisor",reason="resolution"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes-cadvisor",reason="timeout"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes-cadvisor",reason="unknown"} 26
net_conntrack_dialer_conn_failed_total{dialer_name="live-search",reason="refused"} 54694
net_conntrack_dialer_conn_failed_total{dialer_name="live-search",reason="resolution"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="live-search",reason="timeout"} 166
net_conntrack_dialer_conn_failed_total{dialer_name="live-search",reason="unknown"} 54944
net_conntrack_dialer_conn_failed_total{dialer_name="logstash",reason="refused"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="logstash",reason="resolution"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="logstash",reason="timeout"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="logstash",reason="unknown"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="postgres-exporter",reason="refused"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="postgres-exporter",reason="resolution"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="postgres-exporter",reason="timeout"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="postgres-exporter",reason="unknown"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="refused"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="resolution"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="timeout"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="unknown"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="prometheus-blackbox-exporter",reason="refused"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="prometheus-blackbox-exporter",reason="resolution"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="prometheus-blackbox-exporter",reason="timeout"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="prometheus-blackbox-exporter",reason="unknown"} 13
net_conntrack_dialer_conn_failed_total{dialer_name="prometheus-pushgateway",reason="refused"} 1
net_conntrack_dialer_conn_failed_total{dialer_name="prometheus-pushgateway",reason="resolution"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="prometheus-pushgateway",reason="timeout"} 0
net_conntrack_dialer_conn_failed_total{dialer_name="prometheus-pushgateway",reason="unknown"} 1
# HELP net_conntrack_listener_conn_accepted_total Total number of connections opened to the listener of a given name.
# TYPE net_conntrack_listener_conn_accepted_total counter
net_conntrack_listener_conn_accepted_total{listener_name="http"} 140721
# HELP net_conntrack_listener_conn_closed_total Total number of connections closed that were made to the listener of a given name.
# TYPE net_conntrack_listener_conn_closed_total counter
net_conntrack_listener_conn_closed_total{listener_name="http"} 140741
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
# TYPE process_cpu_seconds_total counter
process_cpu_seconds_total 17133.11
# HELP process_max_fds Maximum number of open file descriptors.
# TYPE process_max_fds gauge
process_max_fds 1.048576e+06
# HELP process_open_fds Number of open file descriptors.
# TYPE process_open_fds gauge
process_open_fds 284
# HELP process_resident_memory_bytes Resident memory size in bytes.
# TYPE process_resident_memory_bytes gauge
process_resident_memory_bytes 9.592438784e+09
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
# TYPE process_start_time_seconds gauge
process_start_time_seconds 1.60641235218e+09
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
# TYPE process_virtual_memory_bytes gauge
process_virtual_memory_bytes 7.7612429312e+10
# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.
# TYPE process_virtual_memory_max_bytes gauge
process_virtual_memory_max_bytes -1
# HELP prometheus_api_remote_read_queries The current number of remote read queries being executed or waiting.
# TYPE prometheus_api_remote_read_queries gauge
prometheus_api_remote_read_queries 0
# HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which prometheus was built.
# TYPE prometheus_build_info gauge
prometheus_build_info{branch="HEAD",goversion="go1.14.2",revision="ecee9c8abfd118f139014cb1b174b08db3f342cf",version="2.18.1"} 1
# HELP prometheus_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload.
# TYPE prometheus_config_last_reload_success_timestamp_seconds gauge
prometheus_config_last_reload_success_timestamp_seconds 1.606412487716399e+09
# HELP prometheus_config_last_reload_successful Whether the last configuration reload attempt was successful.
# TYPE prometheus_config_last_reload_successful gauge
prometheus_config_last_reload_successful 1
# HELP prometheus_engine_queries The current number of queries being executed or waiting.
# TYPE prometheus_engine_queries gauge
prometheus_engine_queries 0
# HELP prometheus_engine_queries_concurrent_max The max number of concurrent queries.
# TYPE prometheus_engine_queries_concurrent_max gauge
prometheus_engine_queries_concurrent_max 20
# HELP prometheus_engine_query_duration_seconds Query timings
# TYPE prometheus_engine_query_duration_seconds summary
prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.5"} 0.001971334
prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.9"} 5.804267723
prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.99"} 28.806898217
prometheus_engine_query_duration_seconds_sum{slice="inner_eval"} 30351.65754879196
prometheus_engine_query_duration_seconds_count{slice="inner_eval"} 62212
prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.5"} 0.000175127
prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.9"} 0.020430116
prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.99"} 0.501713422
prometheus_engine_query_duration_seconds_sum{slice="prepare_time"} 685.878358523006
prometheus_engine_query_duration_seconds_count{slice="prepare_time"} 63096
prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.5"} 1.7494e-05
prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.9"} 3.9941856700000002
prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.99"} 14.895645189
prometheus_engine_query_duration_seconds_sum{slice="queue_time"} 13508.520414486144
prometheus_engine_query_duration_seconds_count{slice="queue_time"} 63311
prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.5"} 8.98e-07
prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.9"} 1.549e-06
prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.99"} 0.003256707
prometheus_engine_query_duration_seconds_sum{slice="result_sort"} 30.202703958000058
prometheus_engine_query_duration_seconds_count{slice="result_sort"} 20440
# HELP prometheus_engine_query_log_enabled State of the query log.
# TYPE prometheus_engine_query_log_enabled gauge
prometheus_engine_query_log_enabled 0
# HELP prometheus_engine_query_log_failures_total The number of query log failures.
# TYPE prometheus_engine_query_log_failures_total counter
prometheus_engine_query_log_failures_total 0
# HELP prometheus_http_request_duration_seconds Histogram of latencies for HTTP requests.
# TYPE prometheus_http_request_duration_seconds histogram
prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.1"} 7603
prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.2"} 7604
prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.4"} 7604
prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="1"} 7604
prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="3"} 7604
prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="8"} 7604
prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="20"} 7604
prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="60"} 7604
prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="120"} 7604
prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="+Inf"} 7604
prometheus_http_request_duration_seconds_sum{handler="/-/healthy"} 0.22440614199999998
prometheus_http_request_duration_seconds_count{handler="/-/healthy"} 7604
prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.1"} 116366
prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.2"} 116366
prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.4"} 116366
prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="1"} 116366
prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="3"} 116366
prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="8"} 116366
prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="20"} 116366
prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="60"} 116366
prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="120"} 116366
prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="+Inf"} 116366
prometheus_http_request_duration_seconds_sum{handler="/-/ready"} 1.99415340899999
prometheus_http_request_duration_seconds_count{handler="/-/ready"} 116366
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.1"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.2"} 4
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.4"} 4
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="1"} 4
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="3"} 4
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="8"} 4
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="20"} 4
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="60"} 4
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="120"} 4
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="+Inf"} 4
prometheus_http_request_duration_seconds_sum{handler="/api/v1/label/:name/values"} 0.129984588
prometheus_http_request_duration_seconds_count{handler="/api/v1/label/:name/values"} 4
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="0.1"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="0.2"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="0.4"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="1"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="3"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="8"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="20"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="60"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="120"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="+Inf"} 3
prometheus_http_request_duration_seconds_sum{handler="/api/v1/metadata"} 0.064583264
prometheus_http_request_duration_seconds_count{handler="/api/v1/metadata"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.1"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.2"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.4"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="1"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="3"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="8"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="20"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="60"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="120"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="+Inf"} 3
prometheus_http_request_duration_seconds_sum{handler="/api/v1/query"} 0.020305370000000003
prometheus_http_request_duration_seconds_count{handler="/api/v1/query"} 3
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.1"} 11212
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.2"} 13058
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.4"} 13928
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="1"} 14936
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="3"} 16842
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="8"} 19287
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="20"} 21126
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="60"} 21489
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="120"} 21552
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="+Inf"} 21552
prometheus_http_request_duration_seconds_sum{handler="/api/v1/query_range"} 57336.090573764006
prometheus_http_request_duration_seconds_count{handler="/api/v1/query_range"} 21552
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="0.1"} 25
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="0.2"} 27
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="0.4"} 32
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="1"} 37
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="3"} 44
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="8"} 60
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="20"} 65
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="60"} 69
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="120"} 69
prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="+Inf"} 69
prometheus_http_request_duration_seconds_sum{handler="/api/v1/series"} 280.731791435
prometheus_http_request_duration_seconds_count{handler="/api/v1/series"} 69
prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.1"} 0
prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.2"} 0
prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.4"} 0
prometheus_http_request_duration_seconds_bucket{handler="/graph",le="1"} 1
prometheus_http_request_duration_seconds_bucket{handler="/graph",le="3"} 1
prometheus_http_request_duration_seconds_bucket{handler="/graph",le="8"} 1
prometheus_http_request_duration_seconds_bucket{handler="/graph",le="20"} 1
prometheus_http_request_duration_seconds_bucket{handler="/graph",le="60"} 1
prometheus_http_request_duration_seconds_bucket{handler="/graph",le="120"} 1
prometheus_http_request_duration_seconds_bucket{handler="/graph",le="+Inf"} 1
prometheus_http_request_duration_seconds_sum{handler="/graph"} 0.503670233
prometheus_http_request_duration_seconds_count{handler="/graph"} 1
prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.1"} 2309
prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.2"} 2342
prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.4"} 2368
prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="1"} 2390
prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="3"} 2529
prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="8"} 2530
prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="20"} 2531
prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="60"} 2532
prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="120"} 2532
prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="+Inf"} 2532
prometheus_http_request_duration_seconds_sum{handler="/metrics"} 236.70754538300037
prometheus_http_request_duration_seconds_count{handler="/metrics"} 2532
prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="0.1"} 4
prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="0.2"} 4
prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="0.4"} 4
prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="1"} 4
prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="3"} 4
prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="8"} 4
prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="20"} 4
prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="60"} 4
prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="120"} 4
prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="+Inf"} 4
prometheus_http_request_duration_seconds_sum{handler="/static/*filepath"} 0.004195508
prometheus_http_request_duration_seconds_count{handler="/static/*filepath"} 4
prometheus_http_request_duration_seconds_bucket{handler="/targets",le="0.1"} 0
prometheus_http_request_duration_seconds_bucket{handler="/targets",le="0.2"} 0
prometheus_http_request_duration_seconds_bucket{handler="/targets",le="0.4"} 0
prometheus_http_request_duration_seconds_bucket{handler="/targets",le="1"} 1
prometheus_http_request_duration_seconds_bucket{handler="/targets",le="3"} 1
prometheus_http_request_duration_seconds_bucket{handler="/targets",le="8"} 1
prometheus_http_request_duration_seconds_bucket{handler="/targets",le="20"} 1
prometheus_http_request_duration_seconds_bucket{handler="/targets",le="60"} 1
prometheus_http_request_duration_seconds_bucket{handler="/targets",le="120"} 1
prometheus_http_request_duration_seconds_bucket{handler="/targets",le="+Inf"} 1
prometheus_http_request_duration_seconds_sum{handler="/targets"} 0.495659996
prometheus_http_request_duration_seconds_count{handler="/targets"} 1
# HELP prometheus_http_requests_total Counter of HTTP requests.
# TYPE prometheus_http_requests_total counter
prometheus_http_requests_total{code="200",handler="/-/healthy"} 7604
prometheus_http_requests_total{code="200",handler="/-/ready"} 116355
prometheus_http_requests_total{code="200",handler="/api/v1/label/:name/values"} 4
prometheus_http_requests_total{code="200",handler="/api/v1/metadata"} 3
prometheus_http_requests_total{code="200",handler="/api/v1/query"} 3
prometheus_http_requests_total{code="200",handler="/api/v1/query_range"} 20440
prometheus_http_requests_total{code="200",handler="/api/v1/series"} 69
prometheus_http_requests_total{code="200",handler="/graph"} 1
prometheus_http_requests_total{code="200",handler="/metrics"} 2532
prometheus_http_requests_total{code="200",handler="/static/*filepath"} 4
prometheus_http_requests_total{code="200",handler="/targets"} 1
prometheus_http_requests_total{code="422",handler="/api/v1/query_range"} 213
prometheus_http_requests_total{code="503",handler="/-/ready"} 11
prometheus_http_requests_total{code="503",handler="/api/v1/query_range"} 899
# HELP prometheus_http_response_size_bytes Histogram of response size for HTTP requests.
# TYPE prometheus_http_response_size_bytes histogram
prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="100"} 7604
prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1000"} 7604
prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="10000"} 7604
prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="100000"} 7604
prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+06"} 7604
prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+07"} 7604
prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+08"} 7604
prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+09"} 7604
prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="+Inf"} 7604
prometheus_http_response_size_bytes_sum{handler="/-/healthy"} 174892
prometheus_http_response_size_bytes_count{handler="/-/healthy"} 7604
prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="100"} 116366
prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1000"} 116366
prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="10000"} 116366
prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="100000"} 116366
prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+06"} 116366
prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+07"} 116366
prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+08"} 116366
prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+09"} 116366
prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="+Inf"} 116366
prometheus_http_response_size_bytes_sum{handler="/-/ready"} 2.443664e+06
prometheus_http_response_size_bytes_count{handler="/-/ready"} 116366
prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="100"} 0
prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1000"} 0
prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="10000"} 4
prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="100000"} 4
prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+06"} 4
prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+07"} 4
prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+08"} 4
prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+09"} 4
prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="+Inf"} 4
prometheus_http_response_size_bytes_sum{handler="/api/v1/label/:name/values"} 29406
prometheus_http_response_size_bytes_count{handler="/api/v1/label/:name/values"} 4
prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="100"} 0
prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1000"} 0
prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="10000"} 0
prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="100000"} 3
prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+06"} 3
prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+07"} 3
prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+08"} 3
prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+09"} 3
prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="+Inf"} 3
prometheus_http_response_size_bytes_sum{handler="/api/v1/metadata"} 55369
prometheus_http_response_size_bytes_count{handler="/api/v1/metadata"} 3
prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="100"} 0
prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1000"} 2
prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="10000"} 3
prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="100000"} 3
prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+06"} 3
prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+07"} 3
prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+08"} 3
prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+09"} 3
prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="+Inf"} 3
prometheus_http_response_size_bytes_sum{handler="/api/v1/query"} 8395
prometheus_http_response_size_bytes_count{handler="/api/v1/query"} 3
prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="100"} 1579
prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1000"} 5512
prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="10000"} 20742
prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="100000"} 21147
prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+06"} 21544
prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+07"} 21552
prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+08"} 21552
prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+09"} 21552
prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="+Inf"} 21552
prometheus_http_response_size_bytes_sum{handler="/api/v1/query_range"} 2.0997442e+08
prometheus_http_response_size_bytes_count{handler="/api/v1/query_range"} 21552
prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="100"} 3
prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1000"} 8
prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="10000"} 26
prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="100000"} 35
prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+06"} 54
prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+07"} 69
prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+08"} 69
prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+09"} 69
prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="+Inf"} 69
prometheus_http_response_size_bytes_sum{handler="/api/v1/series"} 6.4797057e+07
prometheus_http_response_size_bytes_count{handler="/api/v1/series"} 69
prometheus_http_response_size_bytes_bucket{handler="/graph",le="100"} 0
prometheus_http_response_size_bytes_bucket{handler="/graph",le="1000"} 0
prometheus_http_response_size_bytes_bucket{handler="/graph",le="10000"} 1
prometheus_http_response_size_bytes_bucket{handler="/graph",le="100000"} 1
prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+06"} 1
prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+07"} 1
prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+08"} 1
prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+09"} 1
prometheus_http_response_size_bytes_bucket{handler="/graph",le="+Inf"} 1
prometheus_http_response_size_bytes_sum{handler="/graph"} 5987
prometheus_http_response_size_bytes_count{handler="/graph"} 1
prometheus_http_response_size_bytes_bucket{handler="/metrics",le="100"} 0
prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1000"} 0
prometheus_http_response_size_bytes_bucket{handler="/metrics",le="10000"} 2
prometheus_http_response_size_bytes_bucket{handler="/metrics",le="100000"} 2532
prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+06"} 2532
prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+07"} 2532
prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+08"} 2532
prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+09"} 2532
prometheus_http_response_size_bytes_bucket{handler="/metrics",le="+Inf"} 2532
prometheus_http_response_size_bytes_sum{handler="/metrics"} 3.0447766e+07
prometheus_http_response_size_bytes_count{handler="/metrics"} 2532
prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="100"} 0
prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="1000"} 1
prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="10000"} 3
prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="100000"} 3
prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="1e+06"} 4
prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="1e+07"} 4
prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="1e+08"} 4
prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="1e+09"} 4
prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="+Inf"} 4
prometheus_http_response_size_bytes_sum{handler="/static/*filepath"} 166564
prometheus_http_response_size_bytes_count{handler="/static/*filepath"} 4
prometheus_http_response_size_bytes_bucket{handler="/targets",le="100"} 0
prometheus_http_response_size_bytes_bucket{handler="/targets",le="1000"} 0
prometheus_http_response_size_bytes_bucket{handler="/targets",le="10000"} 0
prometheus_http_response_size_bytes_bucket{handler="/targets",le="100000"} 0
prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+06"} 0
prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+07"} 1
prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+08"} 1
prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+09"} 1
prometheus_http_response_size_bytes_bucket{handler="/targets",le="+Inf"} 1
prometheus_http_response_size_bytes_sum{handler="/targets"} 1.70295e+06
prometheus_http_response_size_bytes_count{handler="/targets"} 1
# HELP prometheus_notifications_alertmanagers_discovered The number of alertmanagers discovered and active.
# TYPE prometheus_notifications_alertmanagers_discovered gauge
prometheus_notifications_alertmanagers_discovered 1
# HELP prometheus_notifications_dropped_total Total number of alerts dropped due to errors when sending to Alertmanager.
# TYPE prometheus_notifications_dropped_total counter
prometheus_notifications_dropped_total 0
# HELP prometheus_notifications_errors_total Total number of errors sending alert notifications.
# TYPE prometheus_notifications_errors_total counter
prometheus_notifications_errors_total{alertmanager="http://10.132.34.10:9093/api/v1/alerts"} 0
# HELP prometheus_notifications_latency_seconds Latency quantiles for sending alert notifications.
# TYPE prometheus_notifications_latency_seconds summary
prometheus_notifications_latency_seconds{alertmanager="http://10.132.34.10:9093/api/v1/alerts",quantile="0.5"} NaN
prometheus_notifications_latency_seconds{alertmanager="http://10.132.34.10:9093/api/v1/alerts",quantile="0.9"} NaN
prometheus_notifications_latency_seconds{alertmanager="http://10.132.34.10:9093/api/v1/alerts",quantile="0.99"} NaN
prometheus_notifications_latency_seconds_sum{alertmanager="http://10.132.34.10:9093/api/v1/alerts"} 2.6597786200000004
prometheus_notifications_latency_seconds_count{alertmanager="http://10.132.34.10:9093/api/v1/alerts"} 411
# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue.
# TYPE prometheus_notifications_queue_capacity gauge
prometheus_notifications_queue_capacity 10000
# HELP prometheus_notifications_queue_length The number of alert notifications in the queue.
# TYPE prometheus_notifications_queue_length gauge
prometheus_notifications_queue_length 0
# HELP prometheus_notifications_sent_total Total number of alerts sent.
# TYPE prometheus_notifications_sent_total counter
prometheus_notifications_sent_total{alertmanager="http://10.132.34.10:9093/api/v1/alerts"} 411
# HELP prometheus_remote_storage_highest_timestamp_in_seconds Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch.
# TYPE prometheus_remote_storage_highest_timestamp_in_seconds gauge
prometheus_remote_storage_highest_timestamp_in_seconds 1.606488415e+09
# HELP prometheus_remote_storage_samples_in_total Samples in to remote storage, compare to samples out for queue managers.
# TYPE prometheus_remote_storage_samples_in_total counter
prometheus_remote_storage_samples_in_total 1.015171868e+09
# HELP prometheus_remote_storage_string_interner_zero_reference_releases_total The number of times release has been called for strings that are not interned.
# TYPE prometheus_remote_storage_string_interner_zero_reference_releases_total counter
prometheus_remote_storage_string_interner_zero_reference_releases_total 0
# HELP prometheus_rule_evaluation_duration_seconds The duration for a rule to execute.
# TYPE prometheus_rule_evaluation_duration_seconds summary
prometheus_rule_evaluation_duration_seconds{quantile="0.5"} 0.001484062
prometheus_rule_evaluation_duration_seconds{quantile="0.9"} 0.076168735
prometheus_rule_evaluation_duration_seconds{quantile="0.99"} 4.816066888
prometheus_rule_evaluation_duration_seconds_sum 1605.812052829012
prometheus_rule_evaluation_duration_seconds_count 41756
# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures.
# TYPE prometheus_rule_evaluation_failures_total counter
prometheus_rule_evaluation_failures_total{rule_group="/etc/config/alerting_rules.yml;Endpoints Probes"} 0
prometheus_rule_evaluation_failures_total{rule_group="/etc/config/alerting_rules.yml;elasticsearch-alerts"} 0
prometheus_rule_evaluation_failures_total{rule_group="/etc/config/alerting_rules.yml;filebeat"} 0
prometheus_rule_evaluation_failures_total{rule_group="/etc/config/alerting_rules.yml;kubernetes-cluster"} 0
prometheus_rule_evaluation_failures_total{rule_group="/etc/config/alerting_rules.yml;smartcmo-alerts"} 0
prometheus_rule_evaluation_failures_total{rule_group="/etc/config/recording_rules.yml;elasticsearch-rules"} 0
# HELP prometheus_rule_evaluations_total The total number of rule evaluations.
# TYPE prometheus_rule_evaluations_total counter
prometheus_rule_evaluations_total{rule_group="/etc/config/alerting_rules.yml;Endpoints Probes"} 7596
prometheus_rule_evaluations_total{rule_group="/etc/config/alerting_rules.yml;elasticsearch-alerts"} 10120
prometheus_rule_evaluations_total{rule_group="/etc/config/alerting_rules.yml;filebeat"} 3798
prometheus_rule_evaluations_total{rule_group="/etc/config/alerting_rules.yml;kubernetes-cluster"} 16445
prometheus_rule_evaluations_total{rule_group="/etc/config/alerting_rules.yml;smartcmo-alerts"} 2532
prometheus_rule_evaluations_total{rule_group="/etc/config/recording_rules.yml;elasticsearch-rules"} 1265
# HELP prometheus_rule_group_duration_seconds The duration of rule group evaluations.
# TYPE prometheus_rule_group_duration_seconds summary
prometheus_rule_group_duration_seconds{quantile="0.01"} 0.001149901
prometheus_rule_group_duration_seconds{quantile="0.05"} 0.001455493
prometheus_rule_group_duration_seconds{quantile="0.5"} 0.009651096
prometheus_rule_group_duration_seconds{quantile="0.9"} 0.267329799
prometheus_rule_group_duration_seconds{quantile="0.99"} 29.003379805
prometheus_rule_group_duration_seconds_sum 1614.0304924660045
prometheus_rule_group_duration_seconds_count 7593
# HELP prometheus_rule_group_interval_seconds The interval of a rule group.
# TYPE prometheus_rule_group_interval_seconds gauge
prometheus_rule_group_interval_seconds{rule_group="/etc/config/alerting_rules.yml;Endpoints Probes"} 60
prometheus_rule_group_interval_seconds{rule_group="/etc/config/alerting_rules.yml;elasticsearch-alerts"} 60
prometheus_rule_group_interval_seconds{rule_group="/etc/config/alerting_rules.yml;filebeat"} 60
prometheus_rule_group_interval_seconds{rule_group="/etc/config/alerting_rules.yml;kubernetes-cluster"} 60
prometheus_rule_group_interval_seconds{rule_group="/etc/config/alerting_rules.yml;smartcmo-alerts"} 60
prometheus_rule_group_interval_seconds{rule_group="/etc/config/recording_rules.yml;elasticsearch-rules"} 60
# HELP prometheus_rule_group_iterations_missed_total The total number of rule group evaluations missed due to slow rule group evaluation.
# TYPE prometheus_rule_group_iterations_missed_total counter
prometheus_rule_group_iterations_missed_total 1
# HELP prometheus_rule_group_iterations_total The total number of scheduled rule group evaluations, whether executed or missed.
# TYPE prometheus_rule_group_iterations_total counter
prometheus_rule_group_iterations_total 7593
# HELP prometheus_rule_group_last_duration_seconds The duration of the last rule group evaluation.
# TYPE prometheus_rule_group_last_duration_seconds gauge
prometheus_rule_group_last_duration_seconds{rule_group="/etc/config/alerting_rules.yml;Endpoints Probes"} 0.005513828
prometheus_rule_group_last_duration_seconds{rule_group="/etc/config/alerting_rules.yml;elasticsearch-alerts"} 0.065089015
prometheus_rule_group_last_duration_seconds{rule_group="/etc/config/alerting_rules.yml;filebeat"} 0.006070087
prometheus_rule_group_last_duration_seconds{rule_group="/etc/config/alerting_rules.yml;kubernetes-cluster"} 0.08272807
prometheus_rule_group_last_duration_seconds{rule_group="/etc/config/alerting_rules.yml;smartcmo-alerts"} 0.02859971
prometheus_rule_group_last_duration_seconds{rule_group="/etc/config/recording_rules.yml;elasticsearch-rules"} 0.008305625
# HELP prometheus_rule_group_last_evaluation_timestamp_seconds The timestamp of the last rule group evaluation in seconds.
# TYPE prometheus_rule_group_last_evaluation_timestamp_seconds gauge
prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/config/alerting_rules.yml;Endpoints Probes"} 1.6064884108026094e+09
prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/config/alerting_rules.yml;elasticsearch-alerts"} 1.6064883629268441e+09
prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/config/alerting_rules.yml;filebeat"} 1.606488403906394e+09
prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/config/alerting_rules.yml;kubernetes-cluster"} 1.6064883835366383e+09
prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/config/alerting_rules.yml;smartcmo-alerts"} 1.6064883925764208e+09
prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/config/recording_rules.yml;elasticsearch-rules"} 1.60648838039807e+09
# HELP prometheus_rule_group_rules The number of rules.
# TYPE prometheus_rule_group_rules gauge
prometheus_rule_group_rules{rule_group="/etc/config/alerting_rules.yml;Endpoints Probes"} 6
prometheus_rule_group_rules{rule_group="/etc/config/alerting_rules.yml;elasticsearch-alerts"} 8
prometheus_rule_group_rules{rule_group="/etc/config/alerting_rules.yml;filebeat"} 3
prometheus_rule_group_rules{rule_group="/etc/config/alerting_rules.yml;kubernetes-cluster"} 13
prometheus_rule_group_rules{rule_group="/etc/config/alerting_rules.yml;smartcmo-alerts"} 2
prometheus_rule_group_rules{rule_group="/etc/config/recording_rules.yml;elasticsearch-rules"} 1
# HELP prometheus_sd_consul_rpc_duration_seconds The duration of a Consul RPC call in seconds.
# TYPE prometheus_sd_consul_rpc_duration_seconds summary
prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN
prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN
prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN
prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0
prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0
prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN
prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN
prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN
prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0
prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0
# HELP prometheus_sd_consul_rpc_failures_total The number of Consul RPC call failures.
# TYPE prometheus_sd_consul_rpc_failures_total counter
prometheus_sd_consul_rpc_failures_total 0
# HELP prometheus_sd_discovered_targets Current number of discovered targets.
# TYPE prometheus_sd_discovered_targets gauge
prometheus_sd_discovered_targets{config="book",name="scrape"} 0
prometheus_sd_discovered_targets{config="cache-search",name="scrape"} 31
prometheus_sd_discovered_targets{config="config-0",name="notify"} 844
prometheus_sd_discovered_targets{config="esclient-production",name="scrape"} 32
prometheus_sd_discovered_targets{config="grafana_metrics",name="scrape"} 1
prometheus_sd_discovered_targets{config="kubernetes-apiservers",name="scrape"} 488
prometheus_sd_discovered_targets{config="kubernetes-cluster-daemonsets",name="scrape"} 368
prometheus_sd_discovered_targets{config="kubernetes-cluster-services",name="scrape"} 62
prometheus_sd_discovered_targets{config="kubernetes-node-exporter",name="scrape"} 33
prometheus_sd_discovered_targets{config="kubernetes-nodes",name="scrape"} 33
prometheus_sd_discovered_targets{config="kubernetes-nodes-cadvisor",name="scrape"} 33
prometheus_sd_discovered_targets{config="live-search",name="scrape"} 4
prometheus_sd_discovered_targets{config="logstash",name="scrape"} 31
prometheus_sd_discovered_targets{config="postgres-exporter",name="scrape"} 1
prometheus_sd_discovered_targets{config="prometheus",name="scrape"} 1
prometheus_sd_discovered_targets{config="prometheus-blackbox-exporter",name="scrape"} 5
prometheus_sd_discovered_targets{config="prometheus-pushgateway",name="scrape"} 45
# HELP prometheus_sd_dns_lookup_failures_total The number of DNS-SD lookup failures.
# TYPE prometheus_sd_dns_lookup_failures_total counter
prometheus_sd_dns_lookup_failures_total 0
# HELP prometheus_sd_dns_lookups_total The number of DNS-SD lookups.
# TYPE prometheus_sd_dns_lookups_total counter
prometheus_sd_dns_lookups_total 0
# HELP prometheus_sd_failed_configs Current number of service discovery configurations that failed to load.
# TYPE prometheus_sd_failed_configs gauge
prometheus_sd_failed_configs{name="notify"} 0
prometheus_sd_failed_configs{name="scrape"} 0
# HELP prometheus_sd_file_read_errors_total The number of File-SD read errors.
# TYPE prometheus_sd_file_read_errors_total counter
prometheus_sd_file_read_errors_total 0
# HELP prometheus_sd_file_scan_duration_seconds The duration of the File-SD scan in seconds.
# TYPE prometheus_sd_file_scan_duration_seconds summary
prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN
prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN
prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN
prometheus_sd_file_scan_duration_seconds_sum 0
prometheus_sd_file_scan_duration_seconds_count 0
# HELP prometheus_sd_kubernetes_events_total The number of Kubernetes events handled.
# TYPE prometheus_sd_kubernetes_events_total counter
prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 67
prometheus_sd_kubernetes_events_total{event="add",role="ingress"} 0
prometheus_sd_kubernetes_events_total{event="add",role="node"} 39
prometheus_sd_kubernetes_events_total{event="add",role="pod"} 6690
prometheus_sd_kubernetes_events_total{event="add",role="service"} 93
prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0
prometheus_sd_kubernetes_events_total{event="delete",role="ingress"} 0
prometheus_sd_kubernetes_events_total{event="delete",role="node"} 6
prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 5807
prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0
prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 508708
prometheus_sd_kubernetes_events_total{event="update",role="ingress"} 0
prometheus_sd_kubernetes_events_total{event="update",role="node"} 54780
prometheus_sd_kubernetes_events_total{event="update",role="pod"} 151121
prometheus_sd_kubernetes_events_total{event="update",role="service"} 11739
# HELP prometheus_sd_kubernetes_http_request_duration_seconds Summary of latencies for HTTP requests to the Kubernetes API by endpoint.
# TYPE prometheus_sd_kubernetes_http_request_duration_seconds summary
prometheus_sd_kubernetes_http_request_duration_seconds_sum{endpoint="/%7Bprefix%7D"} 2.025012762
prometheus_sd_kubernetes_http_request_duration_seconds_count{endpoint="/%7Bprefix%7D"} 17
# HELP prometheus_sd_kubernetes_http_request_total Total number of HTTP requests to the Kubernetes API by status code.
# TYPE prometheus_sd_kubernetes_http_request_total counter
prometheus_sd_kubernetes_http_request_total{status_code="200"} 2901
# HELP prometheus_sd_kubernetes_workqueue_depth Current depth of the work queue.
# TYPE prometheus_sd_kubernetes_workqueue_depth gauge
prometheus_sd_kubernetes_workqueue_depth{queue_name="endpoints"} 0
prometheus_sd_kubernetes_workqueue_depth{queue_name="node"} 0
prometheus_sd_kubernetes_workqueue_depth{queue_name="pod"} 0
prometheus_sd_kubernetes_workqueue_depth{queue_name="service"} 0
# HELP prometheus_sd_kubernetes_workqueue_items_total Total number of items added to the work queue.
# TYPE prometheus_sd_kubernetes_workqueue_items_total counter
prometheus_sd_kubernetes_workqueue_items_total{queue_name="endpoints"} 514790
prometheus_sd_kubernetes_workqueue_items_total{queue_name="node"} 54821
prometheus_sd_kubernetes_workqueue_items_total{queue_name="pod"} 162808
prometheus_sd_kubernetes_workqueue_items_total{queue_name="service"} 5090
# HELP prometheus_sd_kubernetes_workqueue_latency_seconds How long an item stays in the work queue.
# TYPE prometheus_sd_kubernetes_workqueue_latency_seconds summary
prometheus_sd_kubernetes_workqueue_latency_seconds_sum{queue_name="endpoints"} 487.24194068106493
prometheus_sd_kubernetes_workqueue_latency_seconds_count{queue_name="endpoints"} 514790
prometheus_sd_kubernetes_workqueue_latency_seconds_sum{queue_name="node"} 25.35504356600008
prometheus_sd_kubernetes_workqueue_latency_seconds_count{queue_name="node"} 54821
prometheus_sd_kubernetes_workqueue_latency_seconds_sum{queue_name="pod"} 2741.597879431052
prometheus_sd_kubernetes_workqueue_latency_seconds_count{queue_name="pod"} 162808
prometheus_sd_kubernetes_workqueue_latency_seconds_sum{queue_name="service"} 9.83863182800004
prometheus_sd_kubernetes_workqueue_latency_seconds_count{queue_name="service"} 5090
# HELP prometheus_sd_kubernetes_workqueue_longest_running_processor_seconds Duration of the longest running processor in the work queue.
# TYPE prometheus_sd_kubernetes_workqueue_longest_running_processor_seconds gauge
prometheus_sd_kubernetes_workqueue_longest_running_processor_seconds{queue_name="endpoints"} 0
prometheus_sd_kubernetes_workqueue_longest_running_processor_seconds{queue_name="node"} 0
prometheus_sd_kubernetes_workqueue_longest_running_processor_seconds{queue_name="pod"} 0
prometheus_sd_kubernetes_workqueue_longest_running_processor_seconds{queue_name="service"} 0
# HELP prometheus_sd_kubernetes_workqueue_unfinished_work_seconds How long an item has remained unfinished in the work queue.
# TYPE prometheus_sd_kubernetes_workqueue_unfinished_work_seconds gauge
prometheus_sd_kubernetes_workqueue_unfinished_work_seconds{queue_name="endpoints"} 0
prometheus_sd_kubernetes_workqueue_unfinished_work_seconds{queue_name="node"} 0
prometheus_sd_kubernetes_workqueue_unfinished_work_seconds{queue_name="pod"} 0
prometheus_sd_kubernetes_workqueue_unfinished_work_seconds{queue_name="service"} 0
# HELP prometheus_sd_kubernetes_workqueue_work_duration_seconds How long processing an item from the work queue takes.
# TYPE prometheus_sd_kubernetes_workqueue_work_duration_seconds summary
prometheus_sd_kubernetes_workqueue_work_duration_seconds_sum{queue_name="endpoints"} 87.92303003300033
prometheus_sd_kubernetes_workqueue_work_duration_seconds_count{queue_name="endpoints"} 514790
prometheus_sd_kubernetes_workqueue_work_duration_seconds_sum{queue_name="node"} 13.112350405999967
prometheus_sd_kubernetes_workqueue_work_duration_seconds_count{queue_name="node"} 54821
prometheus_sd_kubernetes_workqueue_work_duration_seconds_sum{queue_name="pod"} 14.400603794999835
prometheus_sd_kubernetes_workqueue_work_duration_seconds_count{queue_name="pod"} 162808
prometheus_sd_kubernetes_workqueue_work_duration_seconds_sum{queue_name="service"} 0.47729717499999874
prometheus_sd_kubernetes_workqueue_work_duration_seconds_count{queue_name="service"} 5090
# HELP prometheus_sd_received_updates_total Total number of update events received from the SD providers.
# TYPE prometheus_sd_received_updates_total counter
prometheus_sd_received_updates_total{name="notify"} 134939
prometheus_sd_received_updates_total{name="scrape"} 608908
# HELP prometheus_sd_refresh_duration_seconds The duration of a refresh in seconds for the given SD mechanism.
# TYPE prometheus_sd_refresh_duration_seconds summary
prometheus_sd_refresh_duration_seconds{mechanism="gce",quantile="0.5"} 1.47e-07
prometheus_sd_refresh_duration_seconds{mechanism="gce",quantile="0.9"} 1.95e-07
prometheus_sd_refresh_duration_seconds{mechanism="gce",quantile="0.99"} 4.95e-07
prometheus_sd_refresh_duration_seconds_sum{mechanism="gce"} 0.0009773629999999987
prometheus_sd_refresh_duration_seconds_count{mechanism="gce"} 6330
# HELP prometheus_sd_refresh_failures_total Number of refresh failures for the given SD mechanism.
# TYPE prometheus_sd_refresh_failures_total counter
prometheus_sd_refresh_failures_total{mechanism="gce"} 0
# HELP prometheus_sd_updates_delayed_total Total number of update events that couldn't be sent immediately.
# TYPE prometheus_sd_updates_delayed_total counter
prometheus_sd_updates_delayed_total{name="notify"} 1
prometheus_sd_updates_delayed_total{name="scrape"} 4
# HELP prometheus_sd_updates_total Total number of update events sent to the SD consumers.
# TYPE prometheus_sd_updates_total counter
prometheus_sd_updates_total{name="notify"} 6281
prometheus_sd_updates_total{name="scrape"} 15028
# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes.
# TYPE prometheus_target_interval_length_seconds summary
prometheus_target_interval_length_seconds{interval="10s",quantile="0.01"} 5.2902147809999995
prometheus_target_interval_length_seconds{interval="10s",quantile="0.05"} 9.93834948
prometheus_target_interval_length_seconds{interval="10s",quantile="0.5"} 10.000023666
prometheus_target_interval_length_seconds{interval="10s",quantile="0.9"} 10.021159446
prometheus_target_interval_length_seconds{interval="10s",quantile="0.99"} 15.597237559
prometheus_target_interval_length_seconds_sum{interval="10s"} 2.539060797041389e+06
prometheus_target_interval_length_seconds_count{interval="10s"} 253546
prometheus_target_interval_length_seconds{interval="30s",quantile="0.01"} 23.973047444
prometheus_target_interval_length_seconds{interval="30s",quantile="0.05"} 29.909995672
prometheus_target_interval_length_seconds{interval="30s",quantile="0.5"} 30.000028874
prometheus_target_interval_length_seconds{interval="30s",quantile="0.9"} 30.055214378
prometheus_target_interval_length_seconds{interval="30s",quantile="0.99"} 36.60300635
prometheus_target_interval_length_seconds_sum{interval="30s"} 1.8898699253074072e+07
prometheus_target_interval_length_seconds_count{interval="30s"} 629930
# HELP prometheus_target_metadata_cache_bytes The number of bytes that are currently used for storing metric metadata in the cache
# TYPE prometheus_target_metadata_cache_bytes gauge
prometheus_target_metadata_cache_bytes{scrape_job="book"} 0
prometheus_target_metadata_cache_bytes{scrape_job="cache-search"} 0
prometheus_target_metadata_cache_bytes{scrape_job="esclient-production"} 0
prometheus_target_metadata_cache_bytes{scrape_job="grafana_metrics"} 3825
prometheus_target_metadata_cache_bytes{scrape_job="kubernetes-apiservers"} 7036
prometheus_target_metadata_cache_bytes{scrape_job="kubernetes-cluster-daemonsets"} 55770
prometheus_target_metadata_cache_bytes{scrape_job="kubernetes-cluster-services"} 351860
prometheus_target_metadata_cache_bytes{scrape_job="kubernetes-node-exporter"} 333360
prometheus_target_metadata_cache_bytes{scrape_job="kubernetes-nodes"} 193960
prometheus_target_metadata_cache_bytes{scrape_job="kubernetes-nodes-cadvisor"} 96855
prometheus_target_metadata_cache_bytes{scrape_job="live-search"} 0
prometheus_target_metadata_cache_bytes{scrape_job="logstash"} 0
prometheus_target_metadata_cache_bytes{scrape_job="postgres-exporter"} 6167
prometheus_target_metadata_cache_bytes{scrape_job="prometheus"} 9753
prometheus_target_metadata_cache_bytes{scrape_job="prometheus-blackbox-exporter"} 2953
prometheus_target_metadata_cache_bytes{scrape_job="prometheus-pushgateway"} 1953
# HELP prometheus_target_metadata_cache_entries Total number of metric metadata entries in the cache
# TYPE prometheus_target_metadata_cache_entries gauge
prometheus_target_metadata_cache_entries{scrape_job="book"} 0
prometheus_target_metadata_cache_entries{scrape_job="cache-search"} 0
prometheus_target_metadata_cache_entries{scrape_job="esclient-production"} 0
prometheus_target_metadata_cache_entries{scrape_job="grafana_metrics"} 84
prometheus_target_metadata_cache_entries{scrape_job="kubernetes-apiservers"} 88
prometheus_target_metadata_cache_entries{scrape_job="kubernetes-cluster-daemonsets"} 1914
prometheus_target_metadata_cache_entries{scrape_job="kubernetes-cluster-services"} 7697
prometheus_target_metadata_cache_entries{scrape_job="kubernetes-node-exporter"} 7320
prometheus_target_metadata_cache_entries{scrape_job="kubernetes-nodes"} 2803
prometheus_target_metadata_cache_entries{scrape_job="kubernetes-nodes-cadvisor"} 1947
prometheus_target_metadata_cache_entries{scrape_job="live-search"} 0
prometheus_target_metadata_cache_entries{scrape_job="logstash"} 0
prometheus_target_metadata_cache_entries{scrape_job="postgres-exporter"} 88
prometheus_target_metadata_cache_entries{scrape_job="prometheus"} 169
prometheus_target_metadata_cache_entries{scrape_job="prometheus-blackbox-exporter"} 62
prometheus_target_metadata_cache_entries{scrape_job="prometheus-pushgateway"} 37
# HELP prometheus_target_scrape_pool_reloads_failed_total Total number of failed scrape loop reloads.
# TYPE prometheus_target_scrape_pool_reloads_failed_total counter
prometheus_target_scrape_pool_reloads_failed_total 0
# HELP prometheus_target_scrape_pool_reloads_total Total number of scrape loop reloads.
# TYPE prometheus_target_scrape_pool_reloads_total counter
prometheus_target_scrape_pool_reloads_total 0
# HELP prometheus_target_scrape_pool_sync_total Total number of syncs that were executed on a scrape pool.
# TYPE prometheus_target_scrape_pool_sync_total counter
prometheus_target_scrape_pool_sync_total{scrape_job="book"} 14967
prometheus_target_scrape_pool_sync_total{scrape_job="cache-search"} 14967
prometheus_target_scrape_pool_sync_total{scrape_job="esclient-production"} 14967
prometheus_target_scrape_pool_sync_total{scrape_job="grafana_metrics"} 14967
prometheus_target_scrape_pool_sync_total{scrape_job="kubernetes-apiservers"} 14967
prometheus_target_scrape_pool_sync_total{scrape_job="kubernetes-cluster-daemonsets"} 14967
prometheus_target_scrape_pool_sync_total{scrape_job="kubernetes-cluster-services"} 14967
prometheus_target_scrape_pool_sync_total{scrape_job="kubernetes-node-exporter"} 14967
prometheus_target_scrape_pool_sync_total{scrape_job="kubernetes-nodes"} 14967
prometheus_target_scrape_pool_sync_total{scrape_job="kubernetes-nodes-cadvisor"} 14967
prometheus_target_scrape_pool_sync_total{scrape_job="live-search"} 14967
prometheus_target_scrape_pool_sync_total{scrape_job="logstash"} 14967
prometheus_target_scrape_pool_sync_total{scrape_job="postgres-exporter"} 14967
prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 14967
prometheus_target_scrape_pool_sync_total{scrape_job="prometheus-blackbox-exporter"} 14967
prometheus_target_scrape_pool_sync_total{scrape_job="prometheus-pushgateway"} 14967
# HELP prometheus_target_scrape_pools_failed_total Total number of scrape pool creations that failed.
# TYPE prometheus_target_scrape_pools_failed_total counter
prometheus_target_scrape_pools_failed_total 0
# HELP prometheus_target_scrape_pools_total Total number of scrape pool creation attempts.
# TYPE prometheus_target_scrape_pools_total counter
prometheus_target_scrape_pools_total 16
# HELP prometheus_target_scrapes_cache_flush_forced_total How many times a scrape cache was flushed due to getting big while scrapes are failing.
# TYPE prometheus_target_scrapes_cache_flush_forced_total counter
prometheus_target_scrapes_cache_flush_forced_total 0
# HELP prometheus_target_scrapes_exceeded_sample_limit_total Total number of scrapes that hit the sample limit and were rejected.
# TYPE prometheus_target_scrapes_exceeded_sample_limit_total counter
prometheus_target_scrapes_exceeded_sample_limit_total 0
# HELP prometheus_target_scrapes_sample_duplicate_timestamp_total Total number of samples rejected due to duplicate timestamps but different values
# TYPE prometheus_target_scrapes_sample_duplicate_timestamp_total counter
prometheus_target_scrapes_sample_duplicate_timestamp_total 0
# HELP prometheus_target_scrapes_sample_out_of_bounds_total Total number of samples rejected due to timestamp falling outside of the time bounds
# TYPE prometheus_target_scrapes_sample_out_of_bounds_total counter
prometheus_target_scrapes_sample_out_of_bounds_total 0
# HELP prometheus_target_scrapes_sample_out_of_order_total Total number of samples rejected due to not being out of the expected order
# TYPE prometheus_target_scrapes_sample_out_of_order_total counter
prometheus_target_scrapes_sample_out_of_order_total 0
# HELP prometheus_target_sync_length_seconds Actual interval to sync the scrape pool.
# TYPE prometheus_target_sync_length_seconds summary
prometheus_target_sync_length_seconds{scrape_job="book",quantile="0.01"} 0.005360413
prometheus_target_sync_length_seconds{scrape_job="book",quantile="0.05"} 0.006137737
prometheus_target_sync_length_seconds{scrape_job="book",quantile="0.5"} 0.007331509
prometheus_target_sync_length_seconds{scrape_job="book",quantile="0.9"} 0.091395125
prometheus_target_sync_length_seconds{scrape_job="book",quantile="0.99"} 1.214300125
prometheus_target_sync_length_seconds_sum{scrape_job="book"} 359.93792800000125
prometheus_target_sync_length_seconds_count{scrape_job="book"} 14967
prometheus_target_sync_length_seconds{scrape_job="cache-search",quantile="0.01"} 0.002148038
prometheus_target_sync_length_seconds{scrape_job="cache-search",quantile="0.05"} 0.002241791
prometheus_target_sync_length_seconds{scrape_job="cache-search",quantile="0.5"} 0.002517652
prometheus_target_sync_length_seconds{scrape_job="cache-search",quantile="0.9"} 0.018806541
prometheus_target_sync_length_seconds{scrape_job="cache-search",quantile="0.99"} 0.603255594
prometheus_target_sync_length_seconds_sum{scrape_job="cache-search"} 196.83041010600064
prometheus_target_sync_length_seconds_count{scrape_job="cache-search"} 14967
prometheus_target_sync_length_seconds{scrape_job="esclient-production",quantile="0.01"} 0.001754809
prometheus_target_sync_length_seconds{scrape_job="esclient-production",quantile="0.05"} 0.002093115
prometheus_target_sync_length_seconds{scrape_job="esclient-production",quantile="0.5"} 0.002402936
prometheus_target_sync_length_seconds{scrape_job="esclient-production",quantile="0.9"} 0.020397932
prometheus_target_sync_length_seconds{scrape_job="esclient-production",quantile="0.99"} 0.601140971
prometheus_target_sync_length_seconds_sum{scrape_job="esclient-production"} 161.74192390100077
prometheus_target_sync_length_seconds_count{scrape_job="esclient-production"} 14967
prometheus_target_sync_length_seconds{scrape_job="grafana_metrics",quantile="0.01"} 1.9802e-05
prometheus_target_sync_length_seconds{scrape_job="grafana_metrics",quantile="0.05"} 2.1532e-05
prometheus_target_sync_length_seconds{scrape_job="grafana_metrics",quantile="0.5"} 4.1358e-05
prometheus_target_sync_length_seconds{scrape_job="grafana_metrics",quantile="0.9"} 9.403e-05
prometheus_target_sync_length_seconds{scrape_job="grafana_metrics",quantile="0.99"} 0.092944183
prometheus_target_sync_length_seconds_sum{scrape_job="grafana_metrics"} 14.82530588000007
prometheus_target_sync_length_seconds_count{scrape_job="grafana_metrics"} 14967
prometheus_target_sync_length_seconds{scrape_job="kubernetes-apiservers",quantile="0.01"} 0.012142077
prometheus_target_sync_length_seconds{scrape_job="kubernetes-apiservers",quantile="0.05"} 0.012514053
prometheus_target_sync_length_seconds{scrape_job="kubernetes-apiservers",quantile="0.5"} 0.017412349
prometheus_target_sync_length_seconds{scrape_job="kubernetes-apiservers",quantile="0.9"} 0.298282347
prometheus_target_sync_length_seconds{scrape_job="kubernetes-apiservers",quantile="0.99"} 2.109972828
prometheus_target_sync_length_seconds_sum{scrape_job="kubernetes-apiservers"} 749.3448683000012
prometheus_target_sync_length_seconds_count{scrape_job="kubernetes-apiservers"} 14967
prometheus_target_sync_length_seconds{scrape_job="kubernetes-cluster-daemonsets",quantile="0.01"} 0.011786912
prometheus_target_sync_length_seconds{scrape_job="kubernetes-cluster-daemonsets",quantile="0.05"} 0.013201658
prometheus_target_sync_length_seconds{scrape_job="kubernetes-cluster-daemonsets",quantile="0.5"} 0.016155395
prometheus_target_sync_length_seconds{scrape_job="kubernetes-cluster-daemonsets",quantile="0.9"} 0.384920741
prometheus_target_sync_length_seconds{scrape_job="kubernetes-cluster-daemonsets",quantile="0.99"} 2.800594658
prometheus_target_sync_length_seconds_sum{scrape_job="kubernetes-cluster-daemonsets"} 727.3244067850005
prometheus_target_sync_length_seconds_count{scrape_job="kubernetes-cluster-daemonsets"} 14967
prometheus_target_sync_length_seconds{scrape_job="kubernetes-cluster-services",quantile="0.01"} 0.006199614
prometheus_target_sync_length_seconds{scrape_job="kubernetes-cluster-services",quantile="0.05"} 0.00725669
prometheus_target_sync_length_seconds{scrape_job="kubernetes-cluster-services",quantile="0.5"} 0.00895596
prometheus_target_sync_length_seconds{scrape_job="kubernetes-cluster-services",quantile="0.9"} 0.109412939
prometheus_target_sync_length_seconds{scrape_job="kubernetes-cluster-services",quantile="0.99"} 1.693866997
prometheus_target_sync_length_seconds_sum{scrape_job="kubernetes-cluster-services"} 395.9414035249988
prometheus_target_sync_length_seconds_count{scrape_job="kubernetes-cluster-services"} 14967
prometheus_target_sync_length_seconds{scrape_job="kubernetes-node-exporter",quantile="0.01"} 0.00354043
prometheus_target_sync_length_seconds{scrape_job="kubernetes-node-exporter",quantile="0.05"} 0.004365264
prometheus_target_sync_length_seconds{scrape_job="kubernetes-node-exporter",quantile="0.5"} 0.004893739
prometheus_target_sync_length_seconds{scrape_job="kubernetes-node-exporter",quantile="0.9"} 0.091627417
prometheus_target_sync_length_seconds{scrape_job="kubernetes-node-exporter",quantile="0.99"} 1.098200251
prometheus_target_sync_length_seconds_sum{scrape_job="kubernetes-node-exporter"} 216.3825843319998
prometheus_target_sync_length_seconds_count{scrape_job="kubernetes-node-exporter"} 14967
prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes",quantile="0.01"} 0.003949539
prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes",quantile="0.05"} 0.004949619
prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes",quantile="0.5"} 0.00565313
prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes",quantile="0.9"} 0.093201023
prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes",quantile="0.99"} 0.702137003
prometheus_target_sync_length_seconds_sum{scrape_job="kubernetes-nodes"} 256.34800567500025
prometheus_target_sync_length_seconds_count{scrape_job="kubernetes-nodes"} 14967
prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes-cadvisor",quantile="0.01"} 0.004351145
prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes-cadvisor",quantile="0.05"} 0.004429057
prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes-cadvisor",quantile="0.5"} 0.005674505
prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes-cadvisor",quantile="0.9"} 0.082155085
prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes-cadvisor",quantile="0.99"} 0.998102727
prometheus_target_sync_length_seconds_sum{scrape_job="kubernetes-nodes-cadvisor"} 246.9668598780003
prometheus_target_sync_length_seconds_count{scrape_job="kubernetes-nodes-cadvisor"} 14967
prometheus_target_sync_length_seconds{scrape_job="live-search",quantile="0.01"} 0.002438372
prometheus_target_sync_length_seconds{scrape_job="live-search",quantile="0.05"} 0.002965215
prometheus_target_sync_length_seconds{scrape_job="live-search",quantile="0.5"} 0.00357374
prometheus_target_sync_length_seconds{scrape_job="live-search",quantile="0.9"} 0.014866712
prometheus_target_sync_length_seconds{scrape_job="live-search",quantile="0.99"} 1.001300327
prometheus_target_sync_length_seconds_sum{scrape_job="live-search"} 264.82836405600034
prometheus_target_sync_length_seconds_count{scrape_job="live-search"} 14967
prometheus_target_sync_length_seconds{scrape_job="logstash",quantile="0.01"} 0.00175951
prometheus_target_sync_length_seconds{scrape_job="logstash",quantile="0.05"} 0.001892125
prometheus_target_sync_length_seconds{scrape_job="logstash",quantile="0.5"} 0.00240945
prometheus_target_sync_length_seconds{scrape_job="logstash",quantile="0.9"} 0.01841961
prometheus_target_sync_length_seconds{scrape_job="logstash",quantile="0.99"} 0.317992454
prometheus_target_sync_length_seconds_sum{scrape_job="logstash"} 188.4597575319997
prometheus_target_sync_length_seconds_count{scrape_job="logstash"} 14967
prometheus_target_sync_length_seconds{scrape_job="postgres-exporter",quantile="0.01"} 2.0567e-05
prometheus_target_sync_length_seconds{scrape_job="postgres-exporter",quantile="0.05"} 2.3119e-05
prometheus_target_sync_length_seconds{scrape_job="postgres-exporter",quantile="0.5"} 4.1852e-05
prometheus_target_sync_length_seconds{scrape_job="postgres-exporter",quantile="0.9"} 8.1707e-05
prometheus_target_sync_length_seconds{scrape_job="postgres-exporter",quantile="0.99"} 0.089587614
prometheus_target_sync_length_seconds_sum{scrape_job="postgres-exporter"} 15.952402503000004
prometheus_target_sync_length_seconds_count{scrape_job="postgres-exporter"} 14967
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 1.4766e-05
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 1.6559e-05
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 2.9624e-05
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 6.5435e-05
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.000182183
prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 11.560746412999999
prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 14967
prometheus_target_sync_length_seconds{scrape_job="prometheus-blackbox-exporter",quantile="0.01"} 9.2539e-05
prometheus_target_sync_length_seconds{scrape_job="prometheus-blackbox-exporter",quantile="0.05"} 0.000128484
prometheus_target_sync_length_seconds{scrape_job="prometheus-blackbox-exporter",quantile="0.5"} 0.000170654
prometheus_target_sync_length_seconds{scrape_job="prometheus-blackbox-exporter",quantile="0.9"} 0.000287494
prometheus_target_sync_length_seconds{scrape_job="prometheus-blackbox-exporter",quantile="0.99"} 0.018435659
prometheus_target_sync_length_seconds_sum{scrape_job="prometheus-blackbox-exporter"} 19.839499531999973
prometheus_target_sync_length_seconds_count{scrape_job="prometheus-blackbox-exporter"} 14967
prometheus_target_sync_length_seconds{scrape_job="prometheus-pushgateway",quantile="0.01"} 0.000597565
prometheus_target_sync_length_seconds{scrape_job="prometheus-pushgateway",quantile="0.05"} 0.000623621
prometheus_target_sync_length_seconds{scrape_job="prometheus-pushgateway",quantile="0.5"} 0.000711078
prometheus_target_sync_length_seconds{scrape_job="prometheus-pushgateway",quantile="0.9"} 0.006961932
prometheus_target_sync_length_seconds{scrape_job="prometheus-pushgateway",quantile="0.99"} 0.395509339
prometheus_target_sync_length_seconds_sum{scrape_job="prometheus-pushgateway"} 53.96700571700007
prometheus_target_sync_length_seconds_count{scrape_job="prometheus-pushgateway"} 14967
# HELP prometheus_template_text_expansion_failures_total The total number of template text expansion failures.
# TYPE prometheus_template_text_expansion_failures_total counter
prometheus_template_text_expansion_failures_total 0
# HELP prometheus_template_text_expansions_total The total number of template text expansions.
# TYPE prometheus_template_text_expansions_total counter
prometheus_template_text_expansions_total 4119
# HELP prometheus_treecache_watcher_goroutines The current number of watcher goroutines.
# TYPE prometheus_treecache_watcher_goroutines gauge
prometheus_treecache_watcher_goroutines 0
# HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures.
# TYPE prometheus_treecache_zookeeper_failures_total counter
prometheus_treecache_zookeeper_failures_total 0
# HELP prometheus_tsdb_blocks_loaded Number of currently loaded data blocks
# TYPE prometheus_tsdb_blocks_loaded gauge
prometheus_tsdb_blocks_loaded 31
# HELP prometheus_tsdb_checkpoint_creations_failed_total Total number of checkpoint creations that failed.
# TYPE prometheus_tsdb_checkpoint_creations_failed_total counter
prometheus_tsdb_checkpoint_creations_failed_total 0
# HELP prometheus_tsdb_checkpoint_creations_total Total number of checkpoint creations attempted.
# TYPE prometheus_tsdb_checkpoint_creations_total counter
prometheus_tsdb_checkpoint_creations_total 10
# HELP prometheus_tsdb_checkpoint_deletions_failed_total Total number of checkpoint deletions that failed.
# TYPE prometheus_tsdb_checkpoint_deletions_failed_total counter
prometheus_tsdb_checkpoint_deletions_failed_total 0
# HELP prometheus_tsdb_checkpoint_deletions_total Total number of checkpoint deletions attempted.
# TYPE prometheus_tsdb_checkpoint_deletions_total counter
prometheus_tsdb_checkpoint_deletions_total 10
# HELP prometheus_tsdb_compaction_chunk_range_seconds Final time range of chunks on their first compaction
# TYPE prometheus_tsdb_compaction_chunk_range_seconds histogram
prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="100"} 19010
prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="400"} 19010
prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="1600"} 19010
prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="6400"} 19082
prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="25600"} 36641
prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="102400"} 58504
prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="409600"} 525153
prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="1.6384e+06"} 2.856671e+06
prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="6.5536e+06"} 8.898209e+06
prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="2.62144e+07"} 8.926037e+06
prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="+Inf"} 8.926037e+06
prometheus_tsdb_compaction_chunk_range_seconds_sum 2.4185885457713e+13
prometheus_tsdb_compaction_chunk_range_seconds_count 8.926037e+06
# HELP prometheus_tsdb_compaction_chunk_samples Final number of samples on their first compaction
# TYPE prometheus_tsdb_compaction_chunk_samples histogram
prometheus_tsdb_compaction_chunk_samples_bucket{le="4"} 56734
prometheus_tsdb_compaction_chunk_samples_bucket{le="6"} 61489
prometheus_tsdb_compaction_chunk_samples_bucket{le="9"} 133830
prometheus_tsdb_compaction_chunk_samples_bucket{le="13.5"} 373862
prometheus_tsdb_compaction_chunk_samples_bucket{le="20.25"} 902370
prometheus_tsdb_compaction_chunk_samples_bucket{le="30.375"} 938361
prometheus_tsdb_compaction_chunk_samples_bucket{le="45.5625"} 1.056455e+06
prometheus_tsdb_compaction_chunk_samples_bucket{le="68.34375"} 1.173685e+06
prometheus_tsdb_compaction_chunk_samples_bucket{le="102.515625"} 1.266357e+06
prometheus_tsdb_compaction_chunk_samples_bucket{le="153.7734375"} 8.848017e+06
prometheus_tsdb_compaction_chunk_samples_bucket{le="230.66015625"} 8.902476e+06
prometheus_tsdb_compaction_chunk_samples_bucket{le="345.990234375"} 8.926037e+06
prometheus_tsdb_compaction_chunk_samples_bucket{le="+Inf"} 8.926037e+06
prometheus_tsdb_compaction_chunk_samples_sum 9.52801517e+08
prometheus_tsdb_compaction_chunk_samples_count 8.926037e+06
# HELP prometheus_tsdb_compaction_chunk_size_bytes Final size of chunks on their first compaction
# TYPE prometheus_tsdb_compaction_chunk_size_bytes histogram
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="32"} 126502
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="48"} 598739
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="72"} 1.017374e+06
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="108"} 2.603345e+06
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="162"} 4.582063e+06
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="243"} 6.15123e+06
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="364.5"} 8.04679e+06
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="546.75"} 8.448104e+06
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="820.125"} 8.747772e+06
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1230.1875"} 8.921516e+06
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1845.28125"} 8.923602e+06
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="2767.921875"} 8.926037e+06
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="+Inf"} 8.926037e+06
prometheus_tsdb_compaction_chunk_size_bytes_sum 1.877714644e+09
prometheus_tsdb_compaction_chunk_size_bytes_count 8.926037e+06
# HELP prometheus_tsdb_compaction_duration_seconds Duration of compaction runs
# TYPE prometheus_tsdb_compaction_duration_seconds histogram
prometheus_tsdb_compaction_duration_seconds_bucket{le="1"} 0
prometheus_tsdb_compaction_duration_seconds_bucket{le="2"} 0
prometheus_tsdb_compaction_duration_seconds_bucket{le="4"} 0
prometheus_tsdb_compaction_duration_seconds_bucket{le="8"} 0
prometheus_tsdb_compaction_duration_seconds_bucket{le="16"} 0
prometheus_tsdb_compaction_duration_seconds_bucket{le="32"} 0
prometheus_tsdb_compaction_duration_seconds_bucket{le="64"} 8
prometheus_tsdb_compaction_duration_seconds_bucket{le="128"} 13
prometheus_tsdb_compaction_duration_seconds_bucket{le="256"} 14
prometheus_tsdb_compaction_duration_seconds_bucket{le="512"} 14
prometheus_tsdb_compaction_duration_seconds_bucket{le="+Inf"} 14
prometheus_tsdb_compaction_duration_seconds_sum 991.0450132020001
prometheus_tsdb_compaction_duration_seconds_count 14
# HELP prometheus_tsdb_compaction_populating_block Set to 1 when a block is currently being written to the disk.
# TYPE prometheus_tsdb_compaction_populating_block gauge
prometheus_tsdb_compaction_populating_block 0
# HELP prometheus_tsdb_compactions_failed_total Total number of compactions that failed for the partition.
# TYPE prometheus_tsdb_compactions_failed_total counter
prometheus_tsdb_compactions_failed_total 0
# HELP prometheus_tsdb_compactions_skipped_total Total number of skipped compactions due to disabled auto compaction.
# TYPE prometheus_tsdb_compactions_skipped_total counter
prometheus_tsdb_compactions_skipped_total 0
# HELP prometheus_tsdb_compactions_total Total number of compactions that were executed for the partition.
# TYPE prometheus_tsdb_compactions_total counter
prometheus_tsdb_compactions_total 14
# HELP prometheus_tsdb_compactions_triggered_total Total number of triggered compactions for the partition.
# TYPE prometheus_tsdb_compactions_triggered_total counter
prometheus_tsdb_compactions_triggered_total 1254
# HELP prometheus_tsdb_head_active_appenders Number of currently active appender transactions
# TYPE prometheus_tsdb_head_active_appenders gauge
prometheus_tsdb_head_active_appenders 1
# HELP prometheus_tsdb_head_chunks Total number of chunks in the head block.
# TYPE prometheus_tsdb_head_chunks gauge
prometheus_tsdb_head_chunks 1.428779e+06
# HELP prometheus_tsdb_head_chunks_created_total Total number of chunks created in the head
# TYPE prometheus_tsdb_head_chunks_created_total counter
prometheus_tsdb_head_chunks_created_total 1.0354816e+07
# HELP prometheus_tsdb_head_chunks_removed_total Total number of chunks removed in the head
# TYPE prometheus_tsdb_head_chunks_removed_total counter
prometheus_tsdb_head_chunks_removed_total 8.926037e+06
# HELP prometheus_tsdb_head_gc_duration_seconds Runtime of garbage collection in the head block.
# TYPE prometheus_tsdb_head_gc_duration_seconds summary
prometheus_tsdb_head_gc_duration_seconds_sum 19.277296577
prometheus_tsdb_head_gc_duration_seconds_count 10
# HELP prometheus_tsdb_head_max_time Maximum timestamp of the head block. The unit is decided by the library consumer.
# TYPE prometheus_tsdb_head_max_time gauge
prometheus_tsdb_head_max_time 1.606488415391e+12
# HELP prometheus_tsdb_head_max_time_seconds Maximum timestamp of the head block.
# TYPE prometheus_tsdb_head_max_time_seconds gauge
prometheus_tsdb_head_max_time_seconds 1.606488415e+09
# HELP prometheus_tsdb_head_min_time Minimum time bound of the head block. The unit is decided by the library consumer.
# TYPE prometheus_tsdb_head_min_time gauge
prometheus_tsdb_head_min_time 1.6064784e+12
# HELP prometheus_tsdb_head_min_time_seconds Minimum time bound of the head block.
# TYPE prometheus_tsdb_head_min_time_seconds gauge
prometheus_tsdb_head_min_time_seconds 1.6064784e+09
# HELP prometheus_tsdb_head_samples_appended_total Total number of appended samples.
# TYPE prometheus_tsdb_head_samples_appended_total counter
prometheus_tsdb_head_samples_appended_total 1.015142315e+09
# HELP prometheus_tsdb_head_series Total number of series in the head block.
# TYPE prometheus_tsdb_head_series gauge
prometheus_tsdb_head_series 550317
# HELP prometheus_tsdb_head_series_created_total Total number of series created in the head
# TYPE prometheus_tsdb_head_series_created_total counter
prometheus_tsdb_head_series_created_total 2.224364e+06
# HELP prometheus_tsdb_head_series_not_found_total Total number of requests for series that were not found.
# TYPE prometheus_tsdb_head_series_not_found_total counter
prometheus_tsdb_head_series_not_found_total 0
# HELP prometheus_tsdb_head_series_removed_total Total number of series removed in the head
# TYPE prometheus_tsdb_head_series_removed_total counter
prometheus_tsdb_head_series_removed_total 1.674047e+06
# HELP prometheus_tsdb_head_truncations_failed_total Total number of head truncations that failed.
# TYPE prometheus_tsdb_head_truncations_failed_total counter
prometheus_tsdb_head_truncations_failed_total 0
# HELP prometheus_tsdb_head_truncations_total Total number of head truncations attempted.
# TYPE prometheus_tsdb_head_truncations_total counter
prometheus_tsdb_head_truncations_total 10
# HELP prometheus_tsdb_isolation_high_watermark The highest TSDB append ID that has been given out.
# TYPE prometheus_tsdb_isolation_high_watermark gauge
prometheus_tsdb_isolation_high_watermark 1.810121e+06
# HELP prometheus_tsdb_isolation_low_watermark The lowest TSDB append ID that is still referenced.
# TYPE prometheus_tsdb_isolation_low_watermark gauge
prometheus_tsdb_isolation_low_watermark 1.810084e+06
# HELP prometheus_tsdb_lowest_timestamp Lowest timestamp value stored in the database. The unit is decided by the library consumer.
# TYPE prometheus_tsdb_lowest_timestamp gauge
prometheus_tsdb_lowest_timestamp 1.6046424e+12
# HELP prometheus_tsdb_lowest_timestamp_seconds Lowest timestamp value stored in the database.
# TYPE prometheus_tsdb_lowest_timestamp_seconds gauge
prometheus_tsdb_lowest_timestamp_seconds 1.6046424e+09
# HELP prometheus_tsdb_reloads_failures_total Number of times the database failed to reload block data from disk.
# TYPE prometheus_tsdb_reloads_failures_total counter
prometheus_tsdb_reloads_failures_total 0
# HELP prometheus_tsdb_reloads_total Number of times the database reloaded block data from disk.
# TYPE prometheus_tsdb_reloads_total counter
prometheus_tsdb_reloads_total 15
# HELP prometheus_tsdb_retention_limit_bytes Max number of bytes to be retained in the tsdb blocks, configured 0 means disabled
# TYPE prometheus_tsdb_retention_limit_bytes gauge
prometheus_tsdb_retention_limit_bytes 0
# HELP prometheus_tsdb_size_retentions_total The number of times that blocks were deleted because the maximum number of bytes was exceeded.
# TYPE prometheus_tsdb_size_retentions_total counter
prometheus_tsdb_size_retentions_total 0
# HELP prometheus_tsdb_storage_blocks_bytes The number of bytes that are currently used for local storage by all blocks.
# TYPE prometheus_tsdb_storage_blocks_bytes gauge
prometheus_tsdb_storage_blocks_bytes 6.4159049919e+10
# HELP prometheus_tsdb_symbol_table_size_bytes Size of symbol table on disk (in bytes)
# TYPE prometheus_tsdb_symbol_table_size_bytes gauge
prometheus_tsdb_symbol_table_size_bytes 499208
# HELP prometheus_tsdb_time_retentions_total The number of times that blocks were deleted because the maximum time limit was exceeded.
# TYPE prometheus_tsdb_time_retentions_total counter
prometheus_tsdb_time_retentions_total 1
# HELP prometheus_tsdb_tombstone_cleanup_seconds The time taken to recompact blocks to remove tombstones.
# TYPE prometheus_tsdb_tombstone_cleanup_seconds histogram
prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.005"} 0
prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.01"} 0
prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.025"} 0
prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.05"} 0
prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.1"} 0
prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.25"} 0
prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.5"} 0
prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="1"} 0
prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="2.5"} 0
prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="5"} 0
prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="10"} 0
prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="+Inf"} 0
prometheus_tsdb_tombstone_cleanup_seconds_sum 0
prometheus_tsdb_tombstone_cleanup_seconds_count 0
# HELP prometheus_tsdb_vertical_compactions_total Total number of compactions done on overlapping blocks.
# TYPE prometheus_tsdb_vertical_compactions_total counter
prometheus_tsdb_vertical_compactions_total 0
# HELP prometheus_tsdb_wal_completed_pages_total Total number of completed pages.
# TYPE prometheus_tsdb_wal_completed_pages_total counter
prometheus_tsdb_wal_completed_pages_total 204090
# HELP prometheus_tsdb_wal_corruptions_total Total number of WAL corruptions.
# TYPE prometheus_tsdb_wal_corruptions_total counter
prometheus_tsdb_wal_corruptions_total 0
# HELP prometheus_tsdb_wal_fsync_duration_seconds Duration of WAL fsync.
# TYPE prometheus_tsdb_wal_fsync_duration_seconds summary
prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.5"} NaN
prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.9"} NaN
prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.99"} NaN
prometheus_tsdb_wal_fsync_duration_seconds_sum 1.4927938339999995
prometheus_tsdb_wal_fsync_duration_seconds_count 53
# HELP prometheus_tsdb_wal_page_flushes_total Total number of page flushes.
# TYPE prometheus_tsdb_wal_page_flushes_total counter
prometheus_tsdb_wal_page_flushes_total 1.785278e+06
# HELP prometheus_tsdb_wal_segment_current WAL segment index that TSDB is currently writing to.
# TYPE prometheus_tsdb_wal_segment_current gauge
prometheus_tsdb_wal_segment_current 10419
# HELP prometheus_tsdb_wal_truncate_duration_seconds Duration of WAL truncation.
# TYPE prometheus_tsdb_wal_truncate_duration_seconds summary
prometheus_tsdb_wal_truncate_duration_seconds_sum 176.54260449699996
prometheus_tsdb_wal_truncate_duration_seconds_count 10
# HELP prometheus_tsdb_wal_truncations_failed_total Total number of WAL truncations that failed.
# TYPE prometheus_tsdb_wal_truncations_failed_total counter
prometheus_tsdb_wal_truncations_failed_total 0
# HELP prometheus_tsdb_wal_truncations_total Total number of WAL truncations attempted.
# TYPE prometheus_tsdb_wal_truncations_total counter
prometheus_tsdb_wal_truncations_total 10
# HELP prometheus_tsdb_wal_writes_failed_total Total number of WAL writes that failed.
# TYPE prometheus_tsdb_wal_writes_failed_total counter
prometheus_tsdb_wal_writes_failed_total 0
# HELP prometheus_web_federation_errors_total Total number of errors that occurred while sending federation responses.
# TYPE prometheus_web_federation_errors_total counter
prometheus_web_federation_errors_total 0
# HELP prometheus_web_federation_warnings_total Total number of warnings that occurred while sending federation responses.
# TYPE prometheus_web_federation_warnings_total counter
prometheus_web_federation_warnings_total 0
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
# TYPE promhttp_metric_handler_requests_in_flight gauge
promhttp_metric_handler_requests_in_flight 1
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
# TYPE promhttp_metric_handler_requests_total counter
promhttp_metric_handler_requests_total{code="200"} 2532
promhttp_metric_handler_requests_total{code="500"} 0
promhttp_metric_handler_requests_total{code="503"} 0