scrape: Add metrics to track bytes and entries in the metadata cache (#6675)
Signed-off-by: gotjosh <josue@grafana.com>
This commit is contained in:
parent
9adad8ad30
commit
8b49c9285d
|
@ -27,12 +27,79 @@ import (
|
|||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
)
|
||||
|
||||
var targetMetadataCache = newMetadataMetricsCollector()
|
||||
|
||||
// MetadataMetricsCollector is a Custom Collector for the metadata cache metrics.
|
||||
type MetadataMetricsCollector struct {
|
||||
CacheEntries *prometheus.Desc
|
||||
CacheBytes *prometheus.Desc
|
||||
|
||||
scrapeManager *Manager
|
||||
}
|
||||
|
||||
func newMetadataMetricsCollector() *MetadataMetricsCollector {
|
||||
return &MetadataMetricsCollector{
|
||||
CacheEntries: prometheus.NewDesc(
|
||||
"prometheus_target_metadata_cache_entries",
|
||||
"Total number of metric metadata entries in the cache",
|
||||
[]string{"scrape_job"},
|
||||
nil,
|
||||
),
|
||||
CacheBytes: prometheus.NewDesc(
|
||||
"prometheus_target_metadata_cache_bytes",
|
||||
"The number of bytes that are currently used for storing metric metadata in the cache",
|
||||
[]string{"scrape_job"},
|
||||
nil,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *MetadataMetricsCollector) registerManager(m *Manager) {
|
||||
mc.scrapeManager = m
|
||||
}
|
||||
|
||||
// Describe sends the metrics descriptions to the channel.
|
||||
func (mc *MetadataMetricsCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- mc.CacheEntries
|
||||
ch <- mc.CacheBytes
|
||||
}
|
||||
|
||||
// Collect creates and sends the metrics for the metadata cache.
|
||||
func (mc *MetadataMetricsCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
if mc.scrapeManager == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for tset, targets := range mc.scrapeManager.TargetsActive() {
|
||||
var size, length int
|
||||
for _, t := range targets {
|
||||
size += t.MetadataSize()
|
||||
length += t.MetadataLength()
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
mc.CacheEntries,
|
||||
prometheus.GaugeValue,
|
||||
float64(length),
|
||||
tset,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
mc.CacheBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(size),
|
||||
tset,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Appendable returns an Appender.
|
||||
type Appendable interface {
|
||||
Appender() (storage.Appender, error)
|
||||
|
@ -43,7 +110,7 @@ func NewManager(logger log.Logger, app Appendable) *Manager {
|
|||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
return &Manager{
|
||||
m := &Manager{
|
||||
append: app,
|
||||
logger: logger,
|
||||
scrapeConfigs: make(map[string]*config.ScrapeConfig),
|
||||
|
@ -51,6 +118,9 @@ func NewManager(logger log.Logger, app Appendable) *Manager {
|
|||
graceShut: make(chan struct{}),
|
||||
triggerReload: make(chan struct{}, 1),
|
||||
}
|
||||
targetMetadataCache.registerManager(m)
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Manager maintains a set of scrape pools and manages start/stop cycles
|
||||
|
|
|
@ -136,19 +136,22 @@ var (
|
|||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(targetIntervalLength)
|
||||
prometheus.MustRegister(targetReloadIntervalLength)
|
||||
prometheus.MustRegister(targetScrapePools)
|
||||
prometheus.MustRegister(targetScrapePoolsFailed)
|
||||
prometheus.MustRegister(targetScrapePoolReloads)
|
||||
prometheus.MustRegister(targetScrapePoolReloadsFailed)
|
||||
prometheus.MustRegister(targetSyncIntervalLength)
|
||||
prometheus.MustRegister(targetScrapePoolSyncsCounter)
|
||||
prometheus.MustRegister(targetScrapeSampleLimit)
|
||||
prometheus.MustRegister(targetScrapeSampleDuplicate)
|
||||
prometheus.MustRegister(targetScrapeSampleOutOfOrder)
|
||||
prometheus.MustRegister(targetScrapeSampleOutOfBounds)
|
||||
prometheus.MustRegister(targetScrapeCacheFlushForced)
|
||||
prometheus.MustRegister(
|
||||
targetIntervalLength,
|
||||
targetReloadIntervalLength,
|
||||
targetScrapePools,
|
||||
targetScrapePoolsFailed,
|
||||
targetScrapePoolReloads,
|
||||
targetScrapePoolReloadsFailed,
|
||||
targetSyncIntervalLength,
|
||||
targetScrapePoolSyncsCounter,
|
||||
targetScrapeSampleLimit,
|
||||
targetScrapeSampleDuplicate,
|
||||
targetScrapeSampleOutOfOrder,
|
||||
targetScrapeSampleOutOfBounds,
|
||||
targetScrapeCacheFlushForced,
|
||||
targetMetadataCache,
|
||||
)
|
||||
}
|
||||
|
||||
// scrapePool manages scrapes for sets of targets.
|
||||
|
@ -658,6 +661,11 @@ type metaEntry struct {
|
|||
unit string
|
||||
}
|
||||
|
||||
func (m *metaEntry) size() int {
|
||||
// The attribute lastIter although part of the struct it is not metadata.
|
||||
return len(m.help) + len(m.unit) + len(m.typ)
|
||||
}
|
||||
|
||||
func newScrapeCache() *scrapeCache {
|
||||
return &scrapeCache{
|
||||
series: map[string]*cacheEntry{},
|
||||
|
@ -842,6 +850,25 @@ func (c *scrapeCache) ListMetadata() []MetricMetadata {
|
|||
return res
|
||||
}
|
||||
|
||||
// MetadataSize returns the size of the metadata cache.
|
||||
func (c *scrapeCache) SizeMetadata() (s int) {
|
||||
c.metaMtx.Lock()
|
||||
defer c.metaMtx.Unlock()
|
||||
for _, e := range c.metadata {
|
||||
s += e.size()
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// MetadataLen returns the number of metadata entries in the cache.
|
||||
func (c *scrapeCache) LengthMetadata() int {
|
||||
c.metaMtx.Lock()
|
||||
defer c.metaMtx.Unlock()
|
||||
|
||||
return len(c.metadata)
|
||||
}
|
||||
|
||||
func newScrapeLoop(ctx context.Context,
|
||||
sc scraper,
|
||||
l log.Logger,
|
||||
|
|
|
@ -78,6 +78,8 @@ func (t *Target) String() string {
|
|||
type MetricMetadataStore interface {
|
||||
ListMetadata() []MetricMetadata
|
||||
GetMetadata(metric string) (MetricMetadata, bool)
|
||||
SizeMetadata() int
|
||||
LengthMetadata() int
|
||||
}
|
||||
|
||||
// MetricMetadata is a piece of metadata for a metric.
|
||||
|
@ -98,6 +100,28 @@ func (t *Target) MetadataList() []MetricMetadata {
|
|||
return t.metadata.ListMetadata()
|
||||
}
|
||||
|
||||
func (t *Target) MetadataSize() int {
|
||||
t.mtx.RLock()
|
||||
defer t.mtx.RUnlock()
|
||||
|
||||
if t.metadata == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return t.metadata.SizeMetadata()
|
||||
}
|
||||
|
||||
func (t *Target) MetadataLength() int {
|
||||
t.mtx.RLock()
|
||||
defer t.mtx.RUnlock()
|
||||
|
||||
if t.metadata == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return t.metadata.LengthMetadata()
|
||||
}
|
||||
|
||||
// Metadata returns type and help metadata for the given metric.
|
||||
func (t *Target) Metadata(metric string) (MetricMetadata, bool) {
|
||||
t.mtx.RLock()
|
||||
|
|
|
@ -77,6 +77,9 @@ func (s *testMetaStore) GetMetadata(metric string) (scrape.MetricMetadata, bool)
|
|||
return scrape.MetricMetadata{}, false
|
||||
}
|
||||
|
||||
func (s *testMetaStore) SizeMetadata() int { return 0 }
|
||||
func (s *testMetaStore) LengthMetadata() int { return 0 }
|
||||
|
||||
// testTargetRetriever represents a list of targets to scrape.
|
||||
// It is used to represent targets as part of test cases.
|
||||
type testTargetRetriever struct {
|
||||
|
|
Loading…
Reference in New Issue