2015-01-21 19:07:45 +00:00
|
|
|
// Copyright 2014 The Prometheus Authors
|
2014-09-19 16:18:44 +00:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2014-09-16 13:47:24 +00:00
|
|
|
package local
|
2014-06-06 09:55:53 +00:00
|
|
|
|
|
|
|
import (
|
2014-11-13 19:50:25 +00:00
|
|
|
"container/list"
|
2015-03-04 12:40:18 +00:00
|
|
|
"fmt"
|
2014-06-06 09:55:53 +00:00
|
|
|
"io"
|
2014-10-22 17:21:23 +00:00
|
|
|
"sync"
|
2014-10-23 13:18:32 +00:00
|
|
|
"sync/atomic"
|
2014-06-06 09:55:53 +00:00
|
|
|
|
2015-08-20 15:18:46 +00:00
|
|
|
"github.com/prometheus/common/model"
|
2014-06-06 09:55:53 +00:00
|
|
|
|
|
|
|
"github.com/prometheus/prometheus/storage/metric"
|
|
|
|
)
|
|
|
|
|
2015-07-06 23:10:14 +00:00
|
|
|
// The DefaultChunkEncoding can be changed via a flag.
|
2015-06-15 10:49:28 +00:00
|
|
|
var DefaultChunkEncoding = doubleDelta
|
2015-03-13 14:49:07 +00:00
|
|
|
|
|
|
|
type chunkEncoding byte
|
|
|
|
|
2015-06-15 10:49:28 +00:00
|
|
|
// String implements flag.Value.
|
|
|
|
func (ce chunkEncoding) String() string {
|
|
|
|
return fmt.Sprintf("%d", ce)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set implements flag.Value.
|
|
|
|
func (ce *chunkEncoding) Set(s string) error {
|
|
|
|
switch s {
|
|
|
|
case "0":
|
|
|
|
*ce = delta
|
|
|
|
case "1":
|
|
|
|
*ce = doubleDelta
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("invalid chunk encoding: %s", s)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-03-13 14:49:07 +00:00
|
|
|
const (
|
|
|
|
delta chunkEncoding = iota
|
|
|
|
doubleDelta
|
|
|
|
)
|
|
|
|
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 17:47:50 +00:00
|
|
|
// chunkDesc contains meta-data for a chunk. Pay special attention to the
|
|
|
|
// documented requirements for calling its method (WRT pinning and locking).
|
|
|
|
// The doc comments spell out the requirements for each method, but here is an
|
|
|
|
// overview and general explanation:
|
|
|
|
//
|
|
|
|
// Everything that changes the pinning of the underlying chunk or deals with its
|
|
|
|
// eviction is protected by a mutex. This affects the following methods: pin,
|
|
|
|
// unpin, refCount, isEvicted, maybeEvict. These methods can be called at any
|
|
|
|
// time without further prerequisites.
|
|
|
|
//
|
|
|
|
// Another group of methods acts on (or sets) the underlying chunk. These
|
|
|
|
// methods involve no locking. They may only be called if the caller has pinned
|
|
|
|
// the chunk (to guarantee the chunk is not evicted concurrently). Also, the
|
|
|
|
// caller must make sure nobody else will call these methods concurrently,
|
|
|
|
// either by holding the sole reference to the chunkDesc (usually during loading
|
|
|
|
// or creation) or by locking the fingerprint of the series the chunkDesc
|
|
|
|
// belongs to. The affected methods are: add, lastTime, maybePopulateLastTime,
|
|
|
|
// lastSamplePair, setChunk.
|
|
|
|
//
|
|
|
|
// Finally, there is the firstTime method. It merely returns the immutable
|
|
|
|
// chunkFirstTime member variable. It's arguably not needed and only there for
|
|
|
|
// consistency with lastTime. It can be called at any time and doesn't involve
|
|
|
|
// locking.
|
2014-10-22 17:21:23 +00:00
|
|
|
type chunkDesc struct {
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 17:47:50 +00:00
|
|
|
sync.Mutex // Protects pinning.
|
2015-05-20 17:13:06 +00:00
|
|
|
c chunk // nil if chunk is evicted.
|
|
|
|
rCnt int
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 17:47:50 +00:00
|
|
|
chunkFirstTime model.Time // Populated at creation. Immutable.
|
2016-02-11 16:36:13 +00:00
|
|
|
chunkLastTime model.Time // Populated on closing of the chunk, model.Earliest if unset.
|
2014-11-13 19:50:25 +00:00
|
|
|
|
|
|
|
// evictListElement is nil if the chunk is not in the evict list.
|
|
|
|
// evictListElement is _not_ protected by the chunkDesc mutex.
|
|
|
|
// It must only be touched by the evict list handler in memorySeriesStorage.
|
|
|
|
evictListElement *list.Element
|
2014-10-22 17:21:23 +00:00
|
|
|
}
|
|
|
|
|
2014-11-20 20:03:51 +00:00
|
|
|
// newChunkDesc creates a new chunkDesc pointing to the provided chunk. The
|
|
|
|
// provided chunk is assumed to be not persisted yet. Therefore, the refCount of
|
|
|
|
// the new chunkDesc is 1 (preventing eviction prior to persisting).
|
2016-02-11 16:36:13 +00:00
|
|
|
func newChunkDesc(c chunk, firstTime model.Time) *chunkDesc {
|
2014-10-22 17:21:23 +00:00
|
|
|
chunkOps.WithLabelValues(createAndPin).Inc()
|
2014-10-23 13:18:32 +00:00
|
|
|
atomic.AddInt64(&numMemChunks, 1)
|
2014-11-27 17:25:03 +00:00
|
|
|
numMemChunkDescs.Inc()
|
2016-02-11 16:36:13 +00:00
|
|
|
return &chunkDesc{
|
|
|
|
c: c,
|
|
|
|
rCnt: 1,
|
|
|
|
chunkFirstTime: firstTime,
|
|
|
|
chunkLastTime: model.Earliest,
|
|
|
|
}
|
2014-10-22 17:21:23 +00:00
|
|
|
}
|
|
|
|
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 17:47:50 +00:00
|
|
|
// add adds a sample pair to the underlying chunk. The chunk must be pinned, and
|
|
|
|
// the caller must have locked the fingerprint of the series.
|
2015-08-22 12:52:35 +00:00
|
|
|
func (cd *chunkDesc) add(s *model.SamplePair) []chunk {
|
2015-05-20 17:13:06 +00:00
|
|
|
return cd.c.add(s)
|
2014-10-22 17:21:23 +00:00
|
|
|
}
|
|
|
|
|
2014-11-20 20:03:51 +00:00
|
|
|
// pin increments the refCount by one. Upon increment from 0 to 1, this
|
|
|
|
// chunkDesc is removed from the evict list. To enable the latter, the
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 17:47:50 +00:00
|
|
|
// evictRequests channel has to be provided. This method can be called
|
|
|
|
// concurrently at any time.
|
2014-11-13 19:50:25 +00:00
|
|
|
func (cd *chunkDesc) pin(evictRequests chan<- evictRequest) {
|
2014-10-22 17:21:23 +00:00
|
|
|
cd.Lock()
|
|
|
|
defer cd.Unlock()
|
|
|
|
|
2015-05-20 17:13:06 +00:00
|
|
|
if cd.rCnt == 0 {
|
2014-11-13 19:50:25 +00:00
|
|
|
// Remove ourselves from the evict list.
|
|
|
|
evictRequests <- evictRequest{cd, false}
|
|
|
|
}
|
2015-05-20 17:13:06 +00:00
|
|
|
cd.rCnt++
|
2014-10-22 17:21:23 +00:00
|
|
|
}
|
|
|
|
|
2014-11-20 20:03:51 +00:00
|
|
|
// unpin decrements the refCount by one. Upon decrement from 1 to 0, this
|
|
|
|
// chunkDesc is added to the evict list. To enable the latter, the evictRequests
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 17:47:50 +00:00
|
|
|
// channel has to be provided. This method can be called concurrently at any
|
|
|
|
// time.
|
2014-11-13 19:50:25 +00:00
|
|
|
func (cd *chunkDesc) unpin(evictRequests chan<- evictRequest) {
|
2014-10-22 17:21:23 +00:00
|
|
|
cd.Lock()
|
|
|
|
defer cd.Unlock()
|
|
|
|
|
2015-05-20 17:13:06 +00:00
|
|
|
if cd.rCnt == 0 {
|
2014-10-22 17:21:23 +00:00
|
|
|
panic("cannot unpin already unpinned chunk")
|
|
|
|
}
|
2015-05-20 17:13:06 +00:00
|
|
|
cd.rCnt--
|
|
|
|
if cd.rCnt == 0 {
|
2014-11-13 19:50:25 +00:00
|
|
|
// Add ourselves to the back of the evict list.
|
|
|
|
evictRequests <- evictRequest{cd, true}
|
2014-10-22 17:21:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 17:47:50 +00:00
|
|
|
// refCount returns the number of pins. This method can be called concurrently
|
|
|
|
// at any time.
|
2015-05-20 17:13:06 +00:00
|
|
|
func (cd *chunkDesc) refCount() int {
|
2014-10-27 14:55:44 +00:00
|
|
|
cd.Lock()
|
|
|
|
defer cd.Unlock()
|
|
|
|
|
2015-05-20 17:13:06 +00:00
|
|
|
return cd.rCnt
|
2014-10-27 14:55:44 +00:00
|
|
|
}
|
|
|
|
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 17:47:50 +00:00
|
|
|
// firstTime returns the timestamp of the first sample in the chunk. This method
|
|
|
|
// can be called concurrently at any time. It only returns the immutable
|
|
|
|
// cd.chunkFirstTime without any locking. Arguably, this method is
|
|
|
|
// useless. However, it provides consistency with the lastTime method.
|
2015-08-20 15:18:46 +00:00
|
|
|
func (cd *chunkDesc) firstTime() model.Time {
|
2016-02-11 16:36:13 +00:00
|
|
|
return cd.chunkFirstTime
|
|
|
|
}
|
|
|
|
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 17:47:50 +00:00
|
|
|
// lastTime returns the timestamp of the last sample in the chunk. It must not
|
|
|
|
// be called concurrently with maybePopulateLastTime. If the chunkDesc is part
|
|
|
|
// of a memory series, this method requires the chunk to be pinned and the
|
|
|
|
// fingerprint of the time series to be locked.
|
2016-02-11 16:36:13 +00:00
|
|
|
func (cd *chunkDesc) lastTime() model.Time {
|
|
|
|
if cd.chunkLastTime != model.Earliest || cd.c == nil {
|
|
|
|
return cd.chunkLastTime
|
2014-10-22 17:21:23 +00:00
|
|
|
}
|
2016-02-11 16:36:13 +00:00
|
|
|
return cd.c.newIterator().lastTimestamp()
|
2014-10-22 17:21:23 +00:00
|
|
|
}
|
|
|
|
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 17:47:50 +00:00
|
|
|
// maybePopulateLastTime populates the chunkLastTime from the underlying chunk
|
|
|
|
// if it has not yet happened. The chunk must be pinned, and the caller must
|
|
|
|
// have locked the fingerprint of the series. This method must not be called
|
|
|
|
// concurrently with lastTime.
|
2016-02-11 16:36:13 +00:00
|
|
|
func (cd *chunkDesc) maybePopulateLastTime() {
|
|
|
|
if cd.chunkLastTime == model.Earliest && cd.c != nil {
|
|
|
|
cd.chunkLastTime = cd.c.newIterator().lastTimestamp()
|
2014-10-22 17:21:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 17:47:50 +00:00
|
|
|
// lastSamplePair returns the last sample pair of the underlying chunk. The
|
|
|
|
// chunk must be pinned.
|
2015-08-22 12:52:35 +00:00
|
|
|
func (cd *chunkDesc) lastSamplePair() *model.SamplePair {
|
2015-06-22 20:50:47 +00:00
|
|
|
if cd.c == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
it := cd.c.newIterator()
|
2015-08-22 12:52:35 +00:00
|
|
|
return &model.SamplePair{
|
2015-06-22 20:50:47 +00:00
|
|
|
Timestamp: it.lastTimestamp(),
|
|
|
|
Value: it.lastSampleValue(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 17:47:50 +00:00
|
|
|
// isEvicted returns whether the chunk is evicted. The caller must have locked
|
|
|
|
// the fingerprint of the series.
|
2014-10-22 17:21:23 +00:00
|
|
|
func (cd *chunkDesc) isEvicted() bool {
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 17:47:50 +00:00
|
|
|
// Locking required here because we do not want the caller to force
|
|
|
|
// pinning the chunk first, so it could be evicted while this method is
|
|
|
|
// called.
|
2014-10-22 17:21:23 +00:00
|
|
|
cd.Lock()
|
|
|
|
defer cd.Unlock()
|
|
|
|
|
2015-05-20 17:13:06 +00:00
|
|
|
return cd.c == nil
|
2014-10-22 17:21:23 +00:00
|
|
|
}
|
|
|
|
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 17:47:50 +00:00
|
|
|
// setChunk sets the underlying chunk. The caller must have locked the
|
|
|
|
// fingerprint of the series and must have "pre-pinned" the chunk (i.e. first
|
|
|
|
// call pin and then set the chunk).
|
2014-10-22 17:21:23 +00:00
|
|
|
func (cd *chunkDesc) setChunk(c chunk) {
|
2015-05-20 17:13:06 +00:00
|
|
|
if cd.c != nil {
|
2014-10-22 17:21:23 +00:00
|
|
|
panic("chunk already set")
|
|
|
|
}
|
2015-05-20 17:13:06 +00:00
|
|
|
cd.c = c
|
2014-10-22 17:21:23 +00:00
|
|
|
}
|
|
|
|
|
2014-11-20 20:03:51 +00:00
|
|
|
// maybeEvict evicts the chunk if the refCount is 0. It returns whether the chunk
|
2014-11-13 19:50:25 +00:00
|
|
|
// is now evicted, which includes the case that the chunk was evicted even
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 17:47:50 +00:00
|
|
|
// before this method was called. It can be called concurrently at any time.
|
2014-11-13 19:50:25 +00:00
|
|
|
func (cd *chunkDesc) maybeEvict() bool {
|
2014-10-22 17:21:23 +00:00
|
|
|
cd.Lock()
|
|
|
|
defer cd.Unlock()
|
|
|
|
|
2015-05-20 17:13:06 +00:00
|
|
|
if cd.c == nil {
|
2014-10-22 17:21:23 +00:00
|
|
|
return true
|
|
|
|
}
|
2015-05-20 17:13:06 +00:00
|
|
|
if cd.rCnt != 0 {
|
2014-11-13 19:50:25 +00:00
|
|
|
return false
|
2014-10-22 17:21:23 +00:00
|
|
|
}
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 17:47:50 +00:00
|
|
|
// Last opportunity to populate chunkLastTime. This is a safety
|
|
|
|
// guard. Regularly, chunkLastTime should be populated upon completion
|
|
|
|
// of a chunk before persistence can kick to unpin it (and thereby
|
|
|
|
// making it evictable in the first place).
|
2016-02-11 16:36:13 +00:00
|
|
|
if cd.chunkLastTime == model.Earliest {
|
|
|
|
cd.chunkLastTime = cd.c.newIterator().lastTimestamp()
|
|
|
|
}
|
2015-05-20 17:13:06 +00:00
|
|
|
cd.c = nil
|
2014-10-22 17:21:23 +00:00
|
|
|
chunkOps.WithLabelValues(evict).Inc()
|
2014-10-23 13:18:32 +00:00
|
|
|
atomic.AddInt64(&numMemChunks, -1)
|
2014-11-13 19:50:25 +00:00
|
|
|
return true
|
2014-10-22 17:21:23 +00:00
|
|
|
}
|
|
|
|
|
2014-09-16 13:47:24 +00:00
|
|
|
// chunk is the interface for all chunks. Chunks are generally not
|
|
|
|
// goroutine-safe.
|
2014-06-06 09:55:53 +00:00
|
|
|
type chunk interface {
|
2014-09-16 13:47:24 +00:00
|
|
|
// add adds a SamplePair to the chunks, performs any necessary
|
|
|
|
// re-encoding, and adds any necessary overflow chunks. It returns the
|
|
|
|
// new version of the original chunk, followed by overflow chunks, if
|
|
|
|
// any. The first chunk returned might be the same as the original one
|
|
|
|
// or a newly allocated version. In any case, take the returned chunk as
|
2016-02-10 02:47:00 +00:00
|
|
|
// the relevant one and discard the original chunk.
|
2015-08-22 12:52:35 +00:00
|
|
|
add(sample *model.SamplePair) []chunk
|
2014-06-06 09:55:53 +00:00
|
|
|
clone() chunk
|
2015-08-20 15:18:46 +00:00
|
|
|
firstTime() model.Time
|
2014-06-06 09:55:53 +00:00
|
|
|
newIterator() chunkIterator
|
|
|
|
marshal(io.Writer) error
|
2016-01-25 15:36:36 +00:00
|
|
|
marshalToBuf([]byte) error
|
2014-06-06 09:55:53 +00:00
|
|
|
unmarshal(io.Reader) error
|
2015-04-13 18:20:26 +00:00
|
|
|
unmarshalFromBuf([]byte)
|
2015-03-13 14:49:07 +00:00
|
|
|
encoding() chunkEncoding
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
|
2014-09-16 13:47:24 +00:00
|
|
|
// A chunkIterator enables efficient access to the content of a chunk. It is
|
|
|
|
// generally not safe to use a chunkIterator concurrently with or after chunk
|
|
|
|
// mutation.
|
2014-06-06 09:55:53 +00:00
|
|
|
type chunkIterator interface {
|
2015-04-14 11:46:38 +00:00
|
|
|
// length returns the number of samples in the chunk.
|
|
|
|
length() int
|
|
|
|
// Gets the timestamp of the n-th sample in the chunk.
|
2015-08-20 15:18:46 +00:00
|
|
|
timestampAtIndex(int) model.Time
|
2015-04-14 11:46:38 +00:00
|
|
|
// Gets the last timestamp in the chunk.
|
2015-08-20 15:18:46 +00:00
|
|
|
lastTimestamp() model.Time
|
2015-04-14 11:46:38 +00:00
|
|
|
// Gets the sample value of the n-th sample in the chunk.
|
2015-08-20 15:18:46 +00:00
|
|
|
sampleValueAtIndex(int) model.SampleValue
|
2015-04-14 11:46:38 +00:00
|
|
|
// Gets the last sample value in the chunk.
|
2015-08-20 15:18:46 +00:00
|
|
|
lastSampleValue() model.SampleValue
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 17:47:50 +00:00
|
|
|
// Gets the value that is closest before the given time. In case a value
|
|
|
|
// exist at precisely the given time, that value is returned. If no
|
|
|
|
// applicable value exists, a SamplePair with timestamp model.Earliest
|
|
|
|
// and value 0.0 is returned.
|
|
|
|
valueAtOrBeforeTime(model.Time) model.SamplePair
|
2014-09-16 13:47:24 +00:00
|
|
|
// Gets all values contained within a given interval.
|
2015-08-22 12:52:35 +00:00
|
|
|
rangeValues(metric.Interval) []model.SamplePair
|
2014-09-16 13:47:24 +00:00
|
|
|
// Whether a given timestamp is contained between first and last value
|
|
|
|
// in the chunk.
|
2015-08-20 15:18:46 +00:00
|
|
|
contains(model.Time) bool
|
2015-04-14 11:46:38 +00:00
|
|
|
// values returns a channel, from which all sample values in the chunk
|
|
|
|
// can be received in order. The channel is closed after the last
|
|
|
|
// one. It is generally not safe to mutate the chunk while the channel
|
|
|
|
// is still open.
|
2015-08-22 12:52:35 +00:00
|
|
|
values() <-chan *model.SamplePair
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
|
2015-08-22 12:52:35 +00:00
|
|
|
func transcodeAndAdd(dst chunk, src chunk, s *model.SamplePair) []chunk {
|
2014-10-22 17:21:23 +00:00
|
|
|
chunkOps.WithLabelValues(transcode).Inc()
|
2014-06-06 09:55:53 +00:00
|
|
|
|
|
|
|
head := dst
|
2014-10-15 13:53:05 +00:00
|
|
|
body := []chunk{}
|
2015-04-14 11:46:38 +00:00
|
|
|
for v := range src.newIterator().values() {
|
2014-06-06 09:55:53 +00:00
|
|
|
newChunks := head.add(v)
|
|
|
|
body = append(body, newChunks[:len(newChunks)-1]...)
|
|
|
|
head = newChunks[len(newChunks)-1]
|
|
|
|
}
|
|
|
|
newChunks := head.add(s)
|
2015-03-13 14:49:07 +00:00
|
|
|
return append(body, newChunks...)
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
|
2015-03-13 14:49:07 +00:00
|
|
|
// newChunk creates a new chunk according to the encoding set by the
|
|
|
|
// defaultChunkEncoding flag.
|
|
|
|
func newChunk() chunk {
|
2015-06-15 10:49:28 +00:00
|
|
|
return newChunkForEncoding(DefaultChunkEncoding)
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
|
2015-03-13 14:49:07 +00:00
|
|
|
func newChunkForEncoding(encoding chunkEncoding) chunk {
|
|
|
|
switch encoding {
|
|
|
|
case delta:
|
2015-03-04 12:40:18 +00:00
|
|
|
return newDeltaEncodedChunk(d1, d0, true, chunkLen)
|
2015-03-13 14:49:07 +00:00
|
|
|
case doubleDelta:
|
2015-03-04 12:40:18 +00:00
|
|
|
return newDoubleDeltaEncodedChunk(d1, d0, true, chunkLen)
|
2014-06-06 09:55:53 +00:00
|
|
|
default:
|
2015-03-13 14:49:07 +00:00
|
|
|
panic(fmt.Errorf("unknown chunk encoding: %v", encoding))
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
}
|