Make bufPool a member of the persistence struct.

This commit is contained in:
beorn7 2015-04-14 10:43:09 +02:00
parent b02d900e61
commit 66fc61f9b7
2 changed files with 12 additions and 7 deletions

View File

@ -71,11 +71,7 @@ const (
indexingQueueCapacity = 1024 * 16 indexingQueueCapacity = 1024 * 16
) )
var ( var fpLen = len(clientmodel.Fingerprint(0).String()) // Length of a fingerprint as string.
fpLen = len(clientmodel.Fingerprint(0).String()) // Length of a fingerprint as string.
byteBufPool = sync.Pool{New: func() interface{} { return make([]byte, 0, 3*chunkLenWithHeader) }}
)
const ( const (
flagHeadChunkPersisted byte = 1 << iota flagHeadChunkPersisted byte = 1 << iota
@ -128,6 +124,8 @@ type persistence struct {
fLock flock.Releaser // The file lock to protect against concurrent usage. fLock flock.Releaser // The file lock to protect against concurrent usage.
shouldSync syncStrategy shouldSync syncStrategy
bufPool sync.Pool
} }
// newPersistence returns a newly allocated persistence backed by local disk storage, ready to use. // newPersistence returns a newly allocated persistence backed by local disk storage, ready to use.
@ -239,6 +237,10 @@ func newPersistence(basePath string, dirty, pedanticChecks bool, shouldSync sync
dirtyFileName: dirtyPath, dirtyFileName: dirtyPath,
fLock: fLock, fLock: fLock,
shouldSync: shouldSync, shouldSync: shouldSync,
// Create buffers of length 3*chunkLenWithHeader by default because that is still reasonably small
// and at the same time enough for many uses. The contract is to never return buffer smaller than
// that to the pool so that callers can rely on a minimum buffer size.
bufPool: sync.Pool{New: func() interface{} { return make([]byte, 0, 3*chunkLenWithHeader) }},
} }
if p.dirty { if p.dirty {
@ -383,9 +385,9 @@ func (p *persistence) loadChunks(fp clientmodel.Fingerprint, indexes []int, inde
defer f.Close() defer f.Close()
chunks := make([]chunk, 0, len(indexes)) chunks := make([]chunk, 0, len(indexes))
buf := byteBufPool.Get().([]byte) buf := p.bufPool.Get().([]byte)
defer func() { defer func() {
byteBufPool.Put(buf) p.bufPool.Put(buf)
}() }()
for i := 0; i < len(indexes); i++ { for i := 0; i < len(indexes); i++ {

View File

@ -15,6 +15,7 @@ package local
import ( import (
"reflect" "reflect"
"sync"
"testing" "testing"
clientmodel "github.com/prometheus/client_golang/model" clientmodel "github.com/prometheus/client_golang/model"
@ -894,6 +895,7 @@ var fpStrings = []string{
func BenchmarkLoadChunksSequentially(b *testing.B) { func BenchmarkLoadChunksSequentially(b *testing.B) {
p := persistence{ p := persistence{
basePath: "fixtures", basePath: "fixtures",
bufPool: sync.Pool{New: func() interface{} { return make([]byte, 0, 3*chunkLenWithHeader) }},
} }
sequentialIndexes := make([]int, 47) sequentialIndexes := make([]int, 47)
for i := range sequentialIndexes { for i := range sequentialIndexes {
@ -918,6 +920,7 @@ func BenchmarkLoadChunksSequentially(b *testing.B) {
func BenchmarkLoadChunksRandomly(b *testing.B) { func BenchmarkLoadChunksRandomly(b *testing.B) {
p := persistence{ p := persistence{
basePath: "fixtures", basePath: "fixtures",
bufPool: sync.Pool{New: func() interface{} { return make([]byte, 0, 3*chunkLenWithHeader) }},
} }
randomIndexes := []int{1, 5, 6, 8, 11, 14, 18, 23, 29, 33, 42, 46} randomIndexes := []int{1, 5, 6, 8, 11, 14, 18, 23, 29, 33, 42, 46}