Merge pull request #4698 from prometheus/beorn7/vendoring

Update prometheus/client_golang vendoring
This commit is contained in:
Björn Rabenstein 2018-10-05 14:10:44 +02:00 committed by GitHub
commit 8dbd84bb9f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 64 additions and 45 deletions

View File

@ -40,7 +40,8 @@ type Collector interface {
// Collector may yield any Metric it sees fit in its Collect method.
//
// This method idempotently sends the same descriptors throughout the
// lifetime of the Collector.
// lifetime of the Collector. It may be called concurrently and
// therefore must be implemented in a concurrency safe way.
//
// If a Collector encounters an error while executing this method, it
// must send an invalid descriptor (created with NewInvalidDesc) to

View File

@ -187,6 +187,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
desc: desc,
upperBounds: opts.Buckets,
labelPairs: makeLabelPairs(desc, labelValues),
counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}},
}
for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 {
@ -223,6 +224,21 @@ type histogramCounts struct {
}
type histogram struct {
// countAndHotIdx is a complicated one. For lock-free yet atomic
// observations, we need to save the total count of observations again,
// combined with the index of the currently-hot counts struct, so that
// we can perform the operation on both values atomically. The least
// significant bit defines the hot counts struct. The remaining 63 bits
// represent the total count of observations. This happens under the
// assumption that the 63bit count will never overflow. Rationale: An
// observations takes about 30ns. Let's assume it could happen in
// 10ns. Overflowing the counter will then take at least (2^63)*10ns,
// which is about 3000 years.
//
// This has to be first in the struct for 64bit alignment. See
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
countAndHotIdx uint64
selfCollector
desc *Desc
writeMtx sync.Mutex // Only used in the Write method.
@ -230,23 +246,12 @@ type histogram struct {
upperBounds []float64
// Two counts, one is "hot" for lock-free observations, the other is
// "cold" for writing out a dto.Metric.
counts [2]histogramCounts
// "cold" for writing out a dto.Metric. It has to be an array of
// pointers to guarantee 64bit alignment of the histogramCounts, see
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
counts [2]*histogramCounts
hotIdx int // Index of currently-hot counts. Only used within Write.
// This is a complicated one. For lock-free yet atomic observations, we
// need to save the total count of observations again, combined with the
// index of the currently-hot counts struct, so that we can perform the
// operation on both values atomically. The least significant bit
// defines the hot counts struct. The remaining 63 bits represent the
// total count of observations. This happens under the assumption that
// the 63bit count will never overflow. Rationale: An observations takes
// about 30ns. Let's assume it could happen in 10ns. Overflowing the
// counter will then take at least (2^63)*10ns, which is about 3000
// years.
countAndHotIdx uint64
labelPairs []*dto.LabelPair
}
@ -270,7 +275,7 @@ func (h *histogram) Observe(v float64) {
// 63 bits gets incremented by 1. At the same time, we get the new value
// back, which we can use to find the currently-hot counts.
n := atomic.AddUint64(&h.countAndHotIdx, 2)
hotCounts := &h.counts[n%2]
hotCounts := h.counts[n%2]
if i < len(h.upperBounds) {
atomic.AddUint64(&hotCounts.buckets[i], 1)
@ -322,13 +327,13 @@ func (h *histogram) Write(out *dto.Metric) error {
if h.hotIdx == 0 {
count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1
h.hotIdx = 1
hotCounts = &h.counts[1]
coldCounts = &h.counts[0]
hotCounts = h.counts[1]
coldCounts = h.counts[0]
} else {
count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
h.hotIdx = 0
hotCounts = &h.counts[0]
coldCounts = &h.counts[1]
hotCounts = h.counts[0]
coldCounts = h.counts[1]
}
// Now we have to wait for the now-declared-cold counts to actually cool

View File

@ -107,9 +107,6 @@ type Registerer interface {
// Collector, and for providing a Collector that will not cause
// inconsistent metrics on collection. (This would lead to scrape
// errors.)
//
// It is in general not safe to register the same Collector multiple
// times concurrently.
Register(Collector) error
// MustRegister works like Register but registers any number of
// Collectors and panics upon the first registration that causes an
@ -273,7 +270,12 @@ func (r *Registry) Register(c Collector) error {
close(descChan)
}()
r.mtx.Lock()
defer r.mtx.Unlock()
defer func() {
// Drain channel in case of premature return to not leak a goroutine.
for range descChan {
}
r.mtx.Unlock()
}()
// Conduct various tests...
for desc := range descChan {
@ -785,6 +787,8 @@ func checkMetricConsistency(
dtoMetric *dto.Metric,
metricHashes map[uint64]struct{},
) error {
name := metricFamily.GetName()
// Type consistency with metric family.
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
@ -793,33 +797,42 @@ func checkMetricConsistency(
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
return fmt.Errorf(
"collected metric %q { %s} is not a %s",
metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
name, dtoMetric, metricFamily.GetType(),
)
}
previousLabelName := ""
for _, labelPair := range dtoMetric.GetLabel() {
if !checkLabelName(labelPair.GetName()) {
labelName := labelPair.GetName()
if labelName == previousLabelName {
return fmt.Errorf(
"collected metric %q { %s} has a label with an invalid name: %s",
metricFamily.GetName(), dtoMetric, labelPair.GetName(),
"collected metric %q { %s} has two or more labels with the same name: %s",
name, dtoMetric, labelName,
)
}
if dtoMetric.Summary != nil && labelPair.GetName() == quantileLabel {
if !checkLabelName(labelName) {
return fmt.Errorf(
"collected metric %q { %s} has a label with an invalid name: %s",
name, dtoMetric, labelName,
)
}
if dtoMetric.Summary != nil && labelName == quantileLabel {
return fmt.Errorf(
"collected metric %q { %s} must not have an explicit %q label",
metricFamily.GetName(), dtoMetric, quantileLabel,
name, dtoMetric, quantileLabel,
)
}
if !utf8.ValidString(labelPair.GetValue()) {
return fmt.Errorf(
"collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
metricFamily.GetName(), dtoMetric, labelPair.GetName(), labelPair.GetValue())
name, dtoMetric, labelName, labelPair.GetValue())
}
previousLabelName = labelName
}
// Is the metric unique (i.e. no other metric with the same name and the same labels)?
h := hashNew()
h = hashAdd(h, metricFamily.GetName())
h = hashAdd(h, name)
h = hashAddByte(h, separatorByte)
// Make sure label pairs are sorted. We depend on it for the consistency
// check.
@ -833,7 +846,7 @@ func checkMetricConsistency(
if _, exists := metricHashes[h]; exists {
return fmt.Errorf(
"collected metric %q { %s} was collected before with the same name and label values",
metricFamily.GetName(), dtoMetric,
name, dtoMetric,
)
}
metricHashes[h] = struct{}{}

22
vendor/vendor.json vendored
View File

@ -759,32 +759,32 @@
{
"checksumSHA1": "qeRNcE15+58PvlfKwBA2n96Qa5I=",
"path": "github.com/prometheus/client_golang/api",
"revision": "e637cec7d9c8990247098639ebc6d43dd34ddd49",
"revisionTime": "2018-09-17T10:21:22Z"
"revision": "0a8115f42e037a6e327f9a269a26ff6603fb8472",
"revisionTime": "2018-10-01T17:40:01Z"
},
{
"checksumSHA1": "83pGB3cRG5uqx9O5d+7MCB+TFT4=",
"path": "github.com/prometheus/client_golang/api/prometheus/v1",
"revision": "e637cec7d9c8990247098639ebc6d43dd34ddd49",
"revisionTime": "2018-09-17T10:21:22Z"
"revision": "0a8115f42e037a6e327f9a269a26ff6603fb8472",
"revisionTime": "2018-10-01T17:40:01Z"
},
{
"checksumSHA1": "lLvg5TpUtFbkyAoh+aI5T/nnpWw=",
"checksumSHA1": "frS661rlSEZWE9CezHhnFioQK/I=",
"path": "github.com/prometheus/client_golang/prometheus",
"revision": "e637cec7d9c8990247098639ebc6d43dd34ddd49",
"revisionTime": "2018-09-17T10:21:22Z"
"revision": "0a8115f42e037a6e327f9a269a26ff6603fb8472",
"revisionTime": "2018-10-01T17:40:01Z"
},
{
"checksumSHA1": "UBqhkyjCz47+S19MVTigxJ2VjVQ=",
"path": "github.com/prometheus/client_golang/prometheus/internal",
"revision": "e637cec7d9c8990247098639ebc6d43dd34ddd49",
"revisionTime": "2018-09-17T10:21:22Z"
"revision": "0a8115f42e037a6e327f9a269a26ff6603fb8472",
"revisionTime": "2018-10-01T17:40:01Z"
},
{
"checksumSHA1": "d5BiEvD8MrgpWQ6PQJUvawJsMak=",
"path": "github.com/prometheus/client_golang/prometheus/promhttp",
"revision": "e637cec7d9c8990247098639ebc6d43dd34ddd49",
"revisionTime": "2018-09-17T10:21:22Z"
"revision": "0a8115f42e037a6e327f9a269a26ff6603fb8472",
"revisionTime": "2018-10-01T17:40:01Z"
},
{
"checksumSHA1": "V8xkqgmP66sq2ZW4QO5wi9a4oZE=",