Merge pull request #3835 from krasi-georgiev/pool-package-generalize

Pool package generalize
This commit is contained in:
Fabian Reinartz 2018-02-28 14:30:46 +01:00 committed by GitHub
commit ef567ceb7d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 34 additions and 21 deletions

View File

@ -13,17 +13,23 @@
package pool
import "sync"
import (
"fmt"
"reflect"
"sync"
)
// BytesPool is a bucketed pool for variably sized byte slices.
type BytesPool struct {
// Pool is a bucketed pool for variably sized byte slices.
type Pool struct {
buckets []sync.Pool
sizes []int
// make is the function used to create an empty slice when none exist yet.
make func(int) interface{}
}
// NewBytesPool returns a new BytesPool with size buckets for minSize to maxSize
// New returns a new Pool with size buckets for minSize to maxSize
// increasing by the given factor.
func NewBytesPool(minSize, maxSize int, factor float64) *BytesPool {
func New(minSize, maxSize int, factor float64, makeFunc func(int) interface{}) *Pool {
if minSize < 1 {
panic("invalid minimum pool size")
}
@ -40,36 +46,42 @@ func NewBytesPool(minSize, maxSize int, factor float64) *BytesPool {
sizes = append(sizes, s)
}
p := &BytesPool{
p := &Pool{
buckets: make([]sync.Pool, len(sizes)),
sizes: sizes,
make: makeFunc,
}
return p
}
// Get returns a new byte slices that fits the given size.
func (p *BytesPool) Get(sz int) []byte {
func (p *Pool) Get(sz int) interface{} {
for i, bktSize := range p.sizes {
if sz > bktSize {
continue
}
b, ok := p.buckets[i].Get().([]byte)
if !ok {
b = make([]byte, 0, bktSize)
b := p.buckets[i].Get()
if b == nil {
b = p.make(bktSize)
}
return b
}
return make([]byte, 0, sz)
return p.make(sz)
}
// Put returns a byte slice to the right bucket in the pool.
func (p *BytesPool) Put(b []byte) {
for i, bktSize := range p.sizes {
if cap(b) > bktSize {
// Put adds a slice to the right bucket in the pool.
func (p *Pool) Put(s interface{}) {
slice := reflect.ValueOf(s)
if slice.Kind() != reflect.Slice {
panic(fmt.Sprintf("%+v is not a slice", slice))
}
for i, size := range p.sizes {
if slice.Cap() > size {
continue
}
p.buckets[i].Put(b[:0])
p.buckets[i].Put(slice.Slice(0, 0).Interface())
return
}
}

View File

@ -148,7 +148,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app Appendable, logger log.Logger)
level.Error(logger).Log("msg", "Error creating HTTP client", "err", err)
}
buffers := pool.NewBytesPool(163, 100e6, 3)
buffers := pool.New(163, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) })
ctx, cancel := context.WithCancel(context.Background())
sp := &scrapePool{
@ -487,7 +487,7 @@ type scrapeLoop struct {
l log.Logger
cache *scrapeCache
lastScrapeSize int
buffers *pool.BytesPool
buffers *pool.Pool
appender func() storage.Appender
sampleMutator labelsMutator
@ -602,7 +602,7 @@ func (c *scrapeCache) forEachStale(f func(labels.Labels) bool) {
func newScrapeLoop(ctx context.Context,
sc scraper,
l log.Logger,
buffers *pool.BytesPool,
buffers *pool.Pool,
sampleMutator labelsMutator,
reportSampleMutator labelsMutator,
appender func() storage.Appender,
@ -611,7 +611,7 @@ func newScrapeLoop(ctx context.Context,
l = log.NewNopLogger()
}
if buffers == nil {
buffers = pool.NewBytesPool(1e3, 1e6, 3)
buffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) })
}
sl := &scrapeLoop{
scraper: sc,
@ -668,7 +668,8 @@ mainLoop:
time.Since(last).Seconds(),
)
}
b := sl.buffers.Get(sl.lastScrapeSize)
b := sl.buffers.Get(sl.lastScrapeSize).([]byte)
buf := bytes.NewBuffer(b)
scrapeErr := sl.scraper.scrape(scrapeCtx, buf)