venodr: update tsdb and go-kit/log
This commit is contained in:
parent
397f001ac5
commit
a2e7b0b934
|
@ -15,7 +15,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// MagicChunks 4 bytes at the head of series file.
|
||||
// MagicChunks is 4 bytes at the head of series file.
|
||||
MagicChunks = 0x85BD40DD
|
||||
)
|
||||
|
||||
|
|
|
@ -141,11 +141,11 @@ func (c *compactor) match(bs []*BlockMeta) bool {
|
|||
return uint64(bs[len(bs)-1].MaxTime-bs[0].MinTime) <= c.opts.maxBlockRange
|
||||
}
|
||||
|
||||
var entropy = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
func mergeBlockMetas(blocks ...Block) (res BlockMeta) {
|
||||
m0 := blocks[0].Meta()
|
||||
|
||||
entropy := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
res.Sequence = m0.Sequence
|
||||
res.MinTime = m0.MinTime
|
||||
res.MaxTime = blocks[len(blocks)-1].Meta().MaxTime
|
||||
|
@ -258,10 +258,6 @@ func (c *compactor) populate(blocks []Block, indexw IndexWriter, chunkw ChunkWri
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(fabxc): find more transparent way of handling this.
|
||||
if hb, ok := b.(*headBlock); ok {
|
||||
all = hb.remapPostings(all)
|
||||
}
|
||||
s := newCompactionSeriesSet(b.Index(), b.Chunks(), all)
|
||||
|
||||
if i == 0 {
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -98,6 +99,7 @@ type DB struct {
|
|||
|
||||
// Mutex that must be held when modifying just the head blocks
|
||||
// or the general layout.
|
||||
// Must never be held when acquiring a blocks's mutex!
|
||||
headmtx sync.RWMutex
|
||||
heads []HeadBlock
|
||||
|
||||
|
@ -154,7 +156,7 @@ func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db
|
|||
|
||||
if l == nil {
|
||||
l = log.NewLogfmtLogger(os.Stdout)
|
||||
l = log.NewContext(l).With("ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
||||
l = log.With(l, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
||||
}
|
||||
|
||||
if opts == nil {
|
||||
|
@ -232,16 +234,17 @@ func (db *DB) retentionCutoff() (bool, error) {
|
|||
db.mtx.RLock()
|
||||
defer db.mtx.RUnlock()
|
||||
|
||||
// We only consider the already persisted blocks. Head blocks generally
|
||||
// only account for a fraction of the total data.
|
||||
db.headmtx.RLock()
|
||||
defer db.headmtx.RUnlock()
|
||||
lenp := len(db.blocks) - len(db.heads)
|
||||
db.headmtx.RUnlock()
|
||||
|
||||
// We don't count the span covered by head blocks towards the
|
||||
// retention time as it generally makes up a fraction of it.
|
||||
if len(db.blocks)-len(db.heads) == 0 {
|
||||
if lenp == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
last := db.blocks[len(db.blocks)-len(db.heads)-1]
|
||||
last := db.blocks[lenp-1]
|
||||
mint := last.Meta().MaxTime - int64(db.opts.RetentionDuration)
|
||||
|
||||
return retentionCutoff(db.dir, mint)
|
||||
|
@ -283,6 +286,7 @@ func (db *DB) compact() (changes bool, err error) {
|
|||
return changes, errors.Wrap(err, "persist head block")
|
||||
}
|
||||
changes = true
|
||||
runtime.GC()
|
||||
}
|
||||
|
||||
// Check for compactions of multiple blocks.
|
||||
|
@ -291,6 +295,9 @@ func (db *DB) compact() (changes bool, err error) {
|
|||
if err != nil {
|
||||
return changes, errors.Wrap(err, "plan compaction")
|
||||
}
|
||||
if len(plans) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
select {
|
||||
case <-db.stopc:
|
||||
|
@ -307,10 +314,7 @@ func (db *DB) compact() (changes bool, err error) {
|
|||
return changes, errors.Wrapf(err, "compact %s", p)
|
||||
}
|
||||
changes = true
|
||||
}
|
||||
// If we didn't compact anything, there's nothing left to do.
|
||||
if len(plans) == 0 {
|
||||
break
|
||||
runtime.GC()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -362,7 +366,7 @@ func (db *DB) seqBlock(i int) (Block, bool) {
|
|||
|
||||
func (db *DB) reloadBlocks() error {
|
||||
var cs []io.Closer
|
||||
defer closeAll(cs...)
|
||||
defer func() { closeAll(cs...) }()
|
||||
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
@ -419,7 +423,7 @@ func (db *DB) reloadBlocks() error {
|
|||
// Close all blocks that we no longer need. They are closed after returning all
|
||||
// locks to avoid questionable locking order.
|
||||
for _, b := range db.blocks {
|
||||
if nb := seqBlocks[b.Meta().Sequence]; nb != b {
|
||||
if nb, ok := seqBlocks[b.Meta().Sequence]; !ok || nb != b {
|
||||
cs = append(cs, b)
|
||||
}
|
||||
}
|
||||
|
@ -670,7 +674,7 @@ func (db *DB) cut(mint int64) (HeadBlock, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
db.blocks = append(db.blocks, newHead)
|
||||
db.blocks = append(db.blocks, newHead) // TODO(fabxc): this is a race!
|
||||
db.heads = append(db.heads, newHead)
|
||||
|
||||
select {
|
||||
|
|
|
@ -17,9 +17,13 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// func init() {
|
||||
// deadlock.Opts.OnPotentialDeadlock = func() { fmt.Println("found deadlock") }
|
||||
// }
|
||||
|
||||
var (
|
||||
// ErrNotFound is returned if a looked up resource was not found.
|
||||
ErrNotFound = fmt.Errorf("not found")
|
||||
ErrNotFound = errors.Errorf("not found")
|
||||
|
||||
// ErrOutOfOrderSample is returned if an appended sample has a
|
||||
// timestamp larger than the most recent sample.
|
||||
|
@ -53,8 +57,7 @@ type headBlock struct {
|
|||
values map[string]stringset // label names to possible values
|
||||
postings *memPostings // postings lists for terms
|
||||
|
||||
metamtx sync.RWMutex
|
||||
meta BlockMeta
|
||||
meta BlockMeta
|
||||
}
|
||||
|
||||
func createHeadBlock(dir string, seq int, l log.Logger, mint, maxt int64) (*headBlock, error) {
|
||||
|
@ -64,6 +67,9 @@ func createHeadBlock(dir string, seq int, l log.Logger, mint, maxt int64) (*head
|
|||
if err := os.MkdirAll(tmp, 0777); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entropy := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
ulid, err := ulid.New(ulid.Now(), entropy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -85,7 +91,7 @@ func createHeadBlock(dir string, seq int, l log.Logger, mint, maxt int64) (*head
|
|||
|
||||
// openHeadBlock creates a new empty head block.
|
||||
func openHeadBlock(dir string, l log.Logger) (*headBlock, error) {
|
||||
wal, err := OpenWAL(dir, log.NewContext(l).With("component", "wal"), 5*time.Second)
|
||||
wal, err := OpenWAL(dir, log.With(l, "component", "wal"), 5*time.Second)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -106,6 +112,7 @@ func openHeadBlock(dir string, l log.Logger) (*headBlock, error) {
|
|||
|
||||
r := wal.Reader()
|
||||
|
||||
Outer:
|
||||
for r.Next() {
|
||||
series, samples := r.At()
|
||||
|
||||
|
@ -114,6 +121,10 @@ func openHeadBlock(dir string, l log.Logger) (*headBlock, error) {
|
|||
h.meta.Stats.NumSeries++
|
||||
}
|
||||
for _, s := range samples {
|
||||
if int(s.ref) >= len(h.series) {
|
||||
l.Log("msg", "unknown series reference, abort WAL restore", "got", s.ref, "max", len(h.series)-1)
|
||||
break Outer
|
||||
}
|
||||
h.series[s.ref].append(s.t, s.v)
|
||||
|
||||
if !h.inBounds(s.t) {
|
||||
|
@ -165,10 +176,19 @@ func (h *headBlock) Close() error {
|
|||
}
|
||||
|
||||
func (h *headBlock) Meta() BlockMeta {
|
||||
h.metamtx.RLock()
|
||||
defer h.metamtx.RUnlock()
|
||||
m := BlockMeta{
|
||||
ULID: h.meta.ULID,
|
||||
Sequence: h.meta.Sequence,
|
||||
MinTime: h.meta.MinTime,
|
||||
MaxTime: h.meta.MaxTime,
|
||||
Compaction: h.meta.Compaction,
|
||||
}
|
||||
|
||||
return h.meta
|
||||
m.Stats.NumChunks = atomic.LoadUint64(&h.meta.Stats.NumChunks)
|
||||
m.Stats.NumSeries = atomic.LoadUint64(&h.meta.Stats.NumSeries)
|
||||
m.Stats.NumSamples = atomic.LoadUint64(&h.meta.Stats.NumSamples)
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func (h *headBlock) Dir() string { return h.dir }
|
||||
|
@ -183,12 +203,35 @@ func (h *headBlock) Querier(mint, maxt int64) Querier {
|
|||
if h.closed {
|
||||
panic(fmt.Sprintf("block %s already closed", h.dir))
|
||||
}
|
||||
|
||||
// Reference on the original slice to use for postings mapping.
|
||||
series := h.series[:]
|
||||
|
||||
return &blockQuerier{
|
||||
mint: mint,
|
||||
maxt: maxt,
|
||||
index: h.Index(),
|
||||
chunks: h.Chunks(),
|
||||
postingsMapper: h.remapPostings,
|
||||
mint: mint,
|
||||
maxt: maxt,
|
||||
index: h.Index(),
|
||||
chunks: h.Chunks(),
|
||||
postingsMapper: func(p Postings) Postings {
|
||||
ep := make([]uint32, 0, 64)
|
||||
|
||||
for p.Next() {
|
||||
// Skip posting entries that include series added after we
|
||||
// instantiated the querier.
|
||||
if int(p.At()) >= len(series) {
|
||||
break
|
||||
}
|
||||
ep = append(ep, p.At())
|
||||
}
|
||||
if err := p.Err(); err != nil {
|
||||
return errPostings{err: errors.Wrap(err, "expand postings")}
|
||||
}
|
||||
|
||||
sort.Slice(ep, func(i, j int) bool {
|
||||
return labels.Compare(series[ep[i]].lset, series[ep[j]].lset) < 0
|
||||
})
|
||||
return newListPostings(ep)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -392,11 +435,8 @@ func (a *headAppender) Commit() error {
|
|||
|
||||
a.mtx.RUnlock()
|
||||
|
||||
a.metamtx.Lock()
|
||||
defer a.metamtx.Unlock()
|
||||
|
||||
a.meta.Stats.NumSamples += total
|
||||
a.meta.Stats.NumSeries += uint64(len(a.newSeries))
|
||||
atomic.AddUint64(&a.meta.Stats.NumSamples, total)
|
||||
atomic.AddUint64(&a.meta.Stats.NumSeries, uint64(len(a.newSeries)))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -551,28 +591,6 @@ func (h *headBlock) create(hash uint64, lset labels.Labels) *memSeries {
|
|||
return s
|
||||
}
|
||||
|
||||
// remapPostings changes the order of the postings from their ID to the ordering
|
||||
// of the series they reference.
|
||||
// Returned postings have no longer monotonic IDs and MUST NOT be used for regular
|
||||
// postings set operations, i.e. intersect and merge.
|
||||
func (h *headBlock) remapPostings(p Postings) Postings {
|
||||
// Expand the postings but only up until the point where the mapper
|
||||
// covers existing metrics.
|
||||
ep := make([]uint32, 0, 64)
|
||||
|
||||
for p.Next() {
|
||||
ep = append(ep, p.At())
|
||||
}
|
||||
if err := p.Err(); err != nil {
|
||||
return errPostings{err: errors.Wrap(err, "expand postings")}
|
||||
}
|
||||
|
||||
sort.Slice(ep, func(i, j int) bool {
|
||||
return labels.Compare(h.series[i].lset, h.series[j].lset) < 0
|
||||
})
|
||||
return newListPostings(ep)
|
||||
}
|
||||
|
||||
type memSeries struct {
|
||||
mtx sync.RWMutex
|
||||
|
||||
|
@ -603,6 +621,9 @@ func (s *memSeries) cut() *memChunk {
|
|||
}
|
||||
|
||||
func (s *memSeries) append(t int64, v float64) bool {
|
||||
s.mtx.Lock()
|
||||
defer s.mtx.Unlock()
|
||||
|
||||
var c *memChunk
|
||||
|
||||
if s.app == nil || s.head().samples > 2000 {
|
||||
|
|
|
@ -461,7 +461,7 @@ type SeriesIterator interface {
|
|||
// If there's no value exactly at ts, it advances to the last value
|
||||
// before tt.
|
||||
Seek(t int64) bool
|
||||
// Values returns the current timestamp/value pair.
|
||||
// At returns the current timestamp/value pair.
|
||||
At() (t int64, v float64)
|
||||
// Next advances the iterator by one.
|
||||
Next() bool
|
||||
|
@ -674,7 +674,7 @@ func (b *BufferedSeriesIterator) Next() bool {
|
|||
return ok
|
||||
}
|
||||
|
||||
// Values returns the current element of the iterator.
|
||||
// At returns the current element of the iterator.
|
||||
func (b *BufferedSeriesIterator) At() (int64, float64) {
|
||||
return b.it.At()
|
||||
}
|
||||
|
|
|
@ -61,6 +61,15 @@ const (
|
|||
walSegmentSizeBytes = 256 * 1024 * 1024 // 256 MB
|
||||
)
|
||||
|
||||
// The table gets initialized with sync.Once but may still cause a race
|
||||
// with any other use of the crc32 package anywhere. Thus we initialize it
|
||||
// before.
|
||||
var castagnoliTable *crc32.Table
|
||||
|
||||
func init() {
|
||||
castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
}
|
||||
|
||||
// OpenWAL opens or creates a write ahead log in the given directory.
|
||||
// The WAL must be read completely before new data is written.
|
||||
func OpenWAL(dir string, logger log.Logger, flushInterval time.Duration) (*WAL, error) {
|
||||
|
@ -84,7 +93,7 @@ func OpenWAL(dir string, logger log.Logger, flushInterval time.Duration) (*WAL,
|
|||
donec: make(chan struct{}),
|
||||
stopc: make(chan struct{}),
|
||||
segmentSize: walSegmentSizeBytes,
|
||||
crc32: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
|
||||
crc32: crc32.New(castagnoliTable),
|
||||
}
|
||||
if err := w.initSegments(); err != nil {
|
||||
return nil, err
|
||||
|
@ -214,6 +223,7 @@ func (w *WAL) tail() *os.File {
|
|||
return w.files[len(w.files)-1]
|
||||
}
|
||||
|
||||
// Sync flushes the changes to disk.
|
||||
func (w *WAL) Sync() error {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
|
|
|
@ -1,20 +1,22 @@
|
|||
# package log
|
||||
|
||||
`package log` provides a minimal interface for structured logging in services.
|
||||
It may be wrapped to encode conventions, enforce type-safety, provide leveled logging, and so on.
|
||||
It can be used for both typical application log events, and log-structured data streams.
|
||||
It may be wrapped to encode conventions, enforce type-safety, provide leveled
|
||||
logging, and so on. It can be used for both typical application log events,
|
||||
and log-structured data streams.
|
||||
|
||||
## Structured logging
|
||||
|
||||
Structured logging is, basically, conceding to the reality that logs are _data_,
|
||||
and warrant some level of schematic rigor.
|
||||
Using a stricter, key/value-oriented message format for our logs,
|
||||
containing contextual and semantic information,
|
||||
makes it much easier to get insight into the operational activity of the systems we build.
|
||||
Consequently, `package log` is of the strong belief that
|
||||
"[the benefits of structured logging outweigh the minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)".
|
||||
Structured logging is, basically, conceding to the reality that logs are
|
||||
_data_, and warrant some level of schematic rigor. Using a stricter,
|
||||
key/value-oriented message format for our logs, containing contextual and
|
||||
semantic information, makes it much easier to get insight into the
|
||||
operational activity of the systems we build. Consequently, `package log` is
|
||||
of the strong belief that "[the benefits of structured logging outweigh the
|
||||
minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)".
|
||||
|
||||
Migrating from unstructured to structured logging is probably a lot easier than you'd expect.
|
||||
Migrating from unstructured to structured logging is probably a lot easier
|
||||
than you'd expect.
|
||||
|
||||
```go
|
||||
// Unstructured
|
||||
|
@ -37,17 +39,17 @@ logger.Log("question", "what is the meaning of life?", "answer", 42)
|
|||
// question="what is the meaning of life?" answer=42
|
||||
```
|
||||
|
||||
### Log contexts
|
||||
### Contextual Loggers
|
||||
|
||||
```go
|
||||
func main() {
|
||||
var logger log.Logger
|
||||
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||
logger = log.NewContext(logger).With("instance_id", 123)
|
||||
logger = log.With(logger, "instance_id", 123)
|
||||
|
||||
logger.Log("msg", "starting")
|
||||
NewWorker(log.NewContext(logger).With("component", "worker")).Run()
|
||||
NewSlacker(log.NewContext(logger).With("component", "slacker")).Run()
|
||||
NewWorker(log.With(logger, "component", "worker")).Run()
|
||||
NewSlacker(log.With(logger, "component", "slacker")).Run()
|
||||
}
|
||||
|
||||
// Output:
|
||||
|
@ -77,9 +79,8 @@ func main() {
|
|||
// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"}
|
||||
```
|
||||
|
||||
Or, if, for legacy reasons,
|
||||
you need to pipe all of your logging through the stdlib log package,
|
||||
you can redirect Go kit logger to the stdlib logger.
|
||||
Or, if, for legacy reasons, you need to pipe all of your logging through the
|
||||
stdlib log package, you can redirect Go kit logger to the stdlib logger.
|
||||
|
||||
```go
|
||||
logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{})
|
||||
|
@ -94,7 +95,7 @@ logger.Log("legacy", true, "msg", "at least it's something")
|
|||
```go
|
||||
var logger log.Logger
|
||||
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||
logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
||||
logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
||||
|
||||
logger.Log("msg", "hello")
|
||||
|
||||
|
@ -104,7 +105,7 @@ logger.Log("msg", "hello")
|
|||
|
||||
## Supported output formats
|
||||
|
||||
- [Logfmt](https://brandur.org/logfmt)
|
||||
- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write))
|
||||
- JSON
|
||||
|
||||
## Enhancements
|
||||
|
@ -117,27 +118,25 @@ type Logger interface {
|
|||
}
|
||||
```
|
||||
|
||||
This interface, and its supporting code like [log.Context](https://godoc.org/github.com/go-kit/kit/log#Context),
|
||||
is the product of much iteration and evaluation.
|
||||
For more details on the evolution of the Logger interface,
|
||||
see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1),
|
||||
a talk by [Chris Hines](https://github.com/ChrisHines).
|
||||
This interface, and its supporting code like is the product of much iteration
|
||||
and evaluation. For more details on the evolution of the Logger interface,
|
||||
see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1),
|
||||
a talk by [Chris Hines](https://github.com/ChrisHines).
|
||||
Also, please see
|
||||
[#63](https://github.com/go-kit/kit/issues/63),
|
||||
[#76](https://github.com/go-kit/kit/pull/76),
|
||||
[#131](https://github.com/go-kit/kit/issues/131),
|
||||
[#157](https://github.com/go-kit/kit/pull/157),
|
||||
[#164](https://github.com/go-kit/kit/issues/164), and
|
||||
[#252](https://github.com/go-kit/kit/pull/252)
|
||||
to review historical conversations about package log and the Logger interface.
|
||||
[#63](https://github.com/go-kit/kit/issues/63),
|
||||
[#76](https://github.com/go-kit/kit/pull/76),
|
||||
[#131](https://github.com/go-kit/kit/issues/131),
|
||||
[#157](https://github.com/go-kit/kit/pull/157),
|
||||
[#164](https://github.com/go-kit/kit/issues/164), and
|
||||
[#252](https://github.com/go-kit/kit/pull/252)
|
||||
to review historical conversations about package log and the Logger interface.
|
||||
|
||||
Value-add packages and suggestions,
|
||||
like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/levels),
|
||||
are of course welcome.
|
||||
Good proposals should
|
||||
like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/level),
|
||||
are of course welcome. Good proposals should
|
||||
|
||||
- Be composable with [log.Context](https://godoc.org/github.com/go-kit/kit/log#Context),
|
||||
- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped context, and
|
||||
- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/kit/log#With),
|
||||
- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped contextual loggers, and
|
||||
- Be friendly to packages that accept only an unadorned log.Logger.
|
||||
|
||||
## Benchmarks & comparisons
|
||||
|
|
|
@ -35,14 +35,15 @@
|
|||
// idea to log simple values without formatting them. This practice allows
|
||||
// the chosen logger to encode values in the most appropriate way.
|
||||
//
|
||||
// Log Context
|
||||
// Contextual Loggers
|
||||
//
|
||||
// A log context stores keyvals that it includes in all log events. Building
|
||||
// appropriate log contexts reduces repetition and aids consistency in the
|
||||
// resulting log output. We can use a context to improve the RunTask example.
|
||||
// A contextual logger stores keyvals that it includes in all log events.
|
||||
// Building appropriate contextual loggers reduces repetition and aids
|
||||
// consistency in the resulting log output. With and WithPrefix add context to
|
||||
// a logger. We can use With to improve the RunTask example.
|
||||
//
|
||||
// func RunTask(task Task, logger log.Logger) string {
|
||||
// logger = log.NewContext(logger).With("taskID", task.ID)
|
||||
// logger = log.With(logger, "taskID", task.ID)
|
||||
// logger.Log("event", "starting task")
|
||||
// ...
|
||||
// taskHelper(task.Cmd, logger)
|
||||
|
@ -51,19 +52,18 @@
|
|||
// }
|
||||
//
|
||||
// The improved version emits the same log events as the original for the
|
||||
// first and last calls to Log. The call to taskHelper highlights that a
|
||||
// context may be passed as a logger to other functions. Each log event
|
||||
// created by the called function will include the task.ID even though the
|
||||
// function does not have access to that value. Using log contexts this way
|
||||
// simplifies producing log output that enables tracing the life cycle of
|
||||
// individual tasks. (See the Context example for the full code of the
|
||||
// above snippet.)
|
||||
// first and last calls to Log. Passing the contextual logger to taskHelper
|
||||
// enables each log event created by taskHelper to include the task.ID even
|
||||
// though taskHelper does not have access to that value. Using contextual
|
||||
// loggers this way simplifies producing log output that enables tracing the
|
||||
// life cycle of individual tasks. (See the Contextual example for the full
|
||||
// code of the above snippet.)
|
||||
//
|
||||
// Dynamic Context Values
|
||||
// Dynamic Contextual Values
|
||||
//
|
||||
// A Valuer function stored in a log context generates a new value each time
|
||||
// the context logs an event. The Valuer example demonstrates how this
|
||||
// feature works.
|
||||
// A Valuer function stored in a contextual logger generates a new value each
|
||||
// time an event is logged. The Valuer example demonstrates how this feature
|
||||
// works.
|
||||
//
|
||||
// Valuers provide the basis for consistently logging timestamps and source
|
||||
// code location. The log package defines several valuers for that purpose.
|
||||
|
@ -72,7 +72,7 @@
|
|||
// entries contain a timestamp and source location looks like this:
|
||||
//
|
||||
// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
|
||||
// logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
||||
// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
||||
//
|
||||
// Concurrent Safety
|
||||
//
|
||||
|
@ -90,4 +90,27 @@
|
|||
// handled atomically within the wrapped logger, but it typically serializes
|
||||
// both the formatting and output logic. Use a SyncLogger if the formatting
|
||||
// logger may perform multiple writes per log event.
|
||||
//
|
||||
// Error Handling
|
||||
//
|
||||
// This package relies on the practice of wrapping or decorating loggers with
|
||||
// other loggers to provide composable pieces of functionality. It also means
|
||||
// that Logger.Log must return an error because some
|
||||
// implementations—especially those that output log data to an io.Writer—may
|
||||
// encounter errors that cannot be handled locally. This in turn means that
|
||||
// Loggers that wrap other loggers should return errors from the wrapped
|
||||
// logger up the stack.
|
||||
//
|
||||
// Fortunately, the decorator pattern also provides a way to avoid the
|
||||
// necessity to check for errors every time an application calls Logger.Log.
|
||||
// An application required to panic whenever its Logger encounters
|
||||
// an error could initialize its logger as follows.
|
||||
//
|
||||
// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
|
||||
// logger := log.LoggerFunc(func(keyvals ...interface{}) error {
|
||||
// if err := fmtlogger.Log(keyvals...); err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// return nil
|
||||
// })
|
||||
package log
|
||||
|
|
|
@ -6,7 +6,7 @@ import "errors"
|
|||
// log event from keyvals, a variadic sequence of alternating keys and values.
|
||||
// Implementations must be safe for concurrent use by multiple goroutines. In
|
||||
// particular, any implementation of Logger that appends to keyvals or
|
||||
// modifies any of its elements must make a copy first.
|
||||
// modifies or retains any of its elements must make a copy first.
|
||||
type Logger interface {
|
||||
Log(keyvals ...interface{}) error
|
||||
}
|
||||
|
@ -15,62 +15,100 @@ type Logger interface {
|
|||
// the missing value.
|
||||
var ErrMissingValue = errors.New("(MISSING)")
|
||||
|
||||
// NewContext returns a new Context that logs to logger.
|
||||
func NewContext(logger Logger) *Context {
|
||||
if c, ok := logger.(*Context); ok {
|
||||
return c
|
||||
// With returns a new contextual logger with keyvals prepended to those passed
|
||||
// to calls to Log. If logger is also a contextual logger created by With or
|
||||
// WithPrefix, keyvals is appended to the existing context.
|
||||
//
|
||||
// The returned Logger replaces all value elements (odd indexes) containing a
|
||||
// Valuer with their generated value for each call to its Log method.
|
||||
func With(logger Logger, keyvals ...interface{}) Logger {
|
||||
if len(keyvals) == 0 {
|
||||
return logger
|
||||
}
|
||||
l := newContext(logger)
|
||||
kvs := append(l.keyvals, keyvals...)
|
||||
if len(kvs)%2 != 0 {
|
||||
kvs = append(kvs, ErrMissingValue)
|
||||
}
|
||||
return &context{
|
||||
logger: l.logger,
|
||||
// Limiting the capacity of the stored keyvals ensures that a new
|
||||
// backing array is created if the slice must grow in Log or With.
|
||||
// Using the extra capacity without copying risks a data race that
|
||||
// would violate the Logger interface contract.
|
||||
keyvals: kvs[:len(kvs):len(kvs)],
|
||||
hasValuer: l.hasValuer || containsValuer(keyvals),
|
||||
}
|
||||
return &Context{logger: logger}
|
||||
}
|
||||
|
||||
// Context must always have the same number of stack frames between calls to
|
||||
// WithPrefix returns a new contextual logger with keyvals prepended to those
|
||||
// passed to calls to Log. If logger is also a contextual logger created by
|
||||
// With or WithPrefix, keyvals is prepended to the existing context.
|
||||
//
|
||||
// The returned Logger replaces all value elements (odd indexes) containing a
|
||||
// Valuer with their generated value for each call to its Log method.
|
||||
func WithPrefix(logger Logger, keyvals ...interface{}) Logger {
|
||||
if len(keyvals) == 0 {
|
||||
return logger
|
||||
}
|
||||
l := newContext(logger)
|
||||
// Limiting the capacity of the stored keyvals ensures that a new
|
||||
// backing array is created if the slice must grow in Log or With.
|
||||
// Using the extra capacity without copying risks a data race that
|
||||
// would violate the Logger interface contract.
|
||||
n := len(l.keyvals) + len(keyvals)
|
||||
if len(keyvals)%2 != 0 {
|
||||
n++
|
||||
}
|
||||
kvs := make([]interface{}, 0, n)
|
||||
kvs = append(kvs, keyvals...)
|
||||
if len(kvs)%2 != 0 {
|
||||
kvs = append(kvs, ErrMissingValue)
|
||||
}
|
||||
kvs = append(kvs, l.keyvals...)
|
||||
return &context{
|
||||
logger: l.logger,
|
||||
keyvals: kvs,
|
||||
hasValuer: l.hasValuer || containsValuer(keyvals),
|
||||
}
|
||||
}
|
||||
|
||||
// context is the Logger implementation returned by With and WithPrefix. It
|
||||
// wraps a Logger and holds keyvals that it includes in all log events. Its
|
||||
// Log method calls bindValues to generate values for each Valuer in the
|
||||
// context keyvals.
|
||||
//
|
||||
// A context must always have the same number of stack frames between calls to
|
||||
// its Log method and the eventual binding of Valuers to their value. This
|
||||
// requirement comes from the functional requirement to allow a context to
|
||||
// resolve application call site information for a log.Caller stored in the
|
||||
// resolve application call site information for a Caller stored in the
|
||||
// context. To do this we must be able to predict the number of logging
|
||||
// functions on the stack when bindValues is called.
|
||||
//
|
||||
// Three implementation details provide the needed stack depth consistency.
|
||||
// The first two of these details also result in better amortized performance,
|
||||
// and thus make sense even without the requirements regarding stack depth.
|
||||
// The third detail, however, is subtle and tied to the implementation of the
|
||||
// Go compiler.
|
||||
// Two implementation details provide the needed stack depth consistency.
|
||||
//
|
||||
// 1. NewContext avoids introducing an additional layer when asked to
|
||||
// wrap another Context.
|
||||
// 2. With avoids introducing an additional layer by returning a newly
|
||||
// constructed Context with a merged keyvals rather than simply
|
||||
// wrapping the existing Context.
|
||||
// 3. All of Context's methods take pointer receivers even though they
|
||||
// do not mutate the Context.
|
||||
//
|
||||
// Before explaining the last detail, first some background. The Go compiler
|
||||
// generates wrapper methods to implement the auto dereferencing behavior when
|
||||
// calling a value method through a pointer variable. These wrapper methods
|
||||
// are also used when calling a value method through an interface variable
|
||||
// because interfaces store a pointer to the underlying concrete value.
|
||||
// Calling a pointer receiver through an interface does not require generating
|
||||
// an additional function.
|
||||
//
|
||||
// If Context had value methods then calling Context.Log through a variable
|
||||
// with type Logger would have an extra stack frame compared to calling
|
||||
// Context.Log through a variable with type Context. Using pointer receivers
|
||||
// avoids this problem.
|
||||
|
||||
// A Context wraps a Logger and holds keyvals that it includes in all log
|
||||
// events. When logging, a Context replaces all value elements (odd indexes)
|
||||
// containing a Valuer with their generated value for each call to its Log
|
||||
// method.
|
||||
type Context struct {
|
||||
// 1. newContext avoids introducing an additional layer when asked to
|
||||
// wrap another context.
|
||||
// 2. With and WithPrefix avoid introducing an additional layer by
|
||||
// returning a newly constructed context with a merged keyvals rather
|
||||
// than simply wrapping the existing context.
|
||||
type context struct {
|
||||
logger Logger
|
||||
keyvals []interface{}
|
||||
hasValuer bool
|
||||
}
|
||||
|
||||
func newContext(logger Logger) *context {
|
||||
if c, ok := logger.(*context); ok {
|
||||
return c
|
||||
}
|
||||
return &context{logger: logger}
|
||||
}
|
||||
|
||||
// Log replaces all value elements (odd indexes) containing a Valuer in the
|
||||
// stored context with their generated value, appends keyvals, and passes the
|
||||
// result to the wrapped Logger.
|
||||
func (l *Context) Log(keyvals ...interface{}) error {
|
||||
func (l *context) Log(keyvals ...interface{}) error {
|
||||
kvs := append(l.keyvals, keyvals...)
|
||||
if len(kvs)%2 != 0 {
|
||||
kvs = append(kvs, ErrMissingValue)
|
||||
|
@ -86,53 +124,6 @@ func (l *Context) Log(keyvals ...interface{}) error {
|
|||
return l.logger.Log(kvs...)
|
||||
}
|
||||
|
||||
// With returns a new Context with keyvals appended to those of the receiver.
|
||||
func (l *Context) With(keyvals ...interface{}) *Context {
|
||||
if len(keyvals) == 0 {
|
||||
return l
|
||||
}
|
||||
kvs := append(l.keyvals, keyvals...)
|
||||
if len(kvs)%2 != 0 {
|
||||
kvs = append(kvs, ErrMissingValue)
|
||||
}
|
||||
return &Context{
|
||||
logger: l.logger,
|
||||
// Limiting the capacity of the stored keyvals ensures that a new
|
||||
// backing array is created if the slice must grow in Log or With.
|
||||
// Using the extra capacity without copying risks a data race that
|
||||
// would violate the Logger interface contract.
|
||||
keyvals: kvs[:len(kvs):len(kvs)],
|
||||
hasValuer: l.hasValuer || containsValuer(keyvals),
|
||||
}
|
||||
}
|
||||
|
||||
// WithPrefix returns a new Context with keyvals prepended to those of the
|
||||
// receiver.
|
||||
func (l *Context) WithPrefix(keyvals ...interface{}) *Context {
|
||||
if len(keyvals) == 0 {
|
||||
return l
|
||||
}
|
||||
// Limiting the capacity of the stored keyvals ensures that a new
|
||||
// backing array is created if the slice must grow in Log or With.
|
||||
// Using the extra capacity without copying risks a data race that
|
||||
// would violate the Logger interface contract.
|
||||
n := len(l.keyvals) + len(keyvals)
|
||||
if len(keyvals)%2 != 0 {
|
||||
n++
|
||||
}
|
||||
kvs := make([]interface{}, 0, n)
|
||||
kvs = append(kvs, keyvals...)
|
||||
if len(kvs)%2 != 0 {
|
||||
kvs = append(kvs, ErrMissingValue)
|
||||
}
|
||||
kvs = append(kvs, l.keyvals...)
|
||||
return &Context{
|
||||
logger: l.logger,
|
||||
keyvals: kvs,
|
||||
hasValuer: l.hasValuer || containsValuer(keyvals),
|
||||
}
|
||||
}
|
||||
|
||||
// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If
|
||||
// f is a function with the appropriate signature, LoggerFunc(f) is a Logger
|
||||
// object that calls f.
|
||||
|
|
|
@ -39,7 +39,7 @@ func TimestampKey(key string) StdlibAdapterOption {
|
|||
return func(a *StdlibAdapter) { a.timestampKey = key }
|
||||
}
|
||||
|
||||
// FileKey sets the key for the file and line field. By default, it's "file".
|
||||
// FileKey sets the key for the file and line field. By default, it's "caller".
|
||||
func FileKey(key string) StdlibAdapterOption {
|
||||
return func(a *StdlibAdapter) { a.fileKey = key }
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer {
|
|||
a := StdlibAdapter{
|
||||
Logger: logger,
|
||||
timestampKey: "ts",
|
||||
fileKey: "file",
|
||||
fileKey: "caller",
|
||||
messageKey: "msg",
|
||||
}
|
||||
for _, option := range options {
|
||||
|
|
|
@ -6,9 +6,9 @@ import (
|
|||
"github.com/go-stack/stack"
|
||||
)
|
||||
|
||||
// A Valuer generates a log value. When passed to Context.With in a value
|
||||
// element (odd indexes), it represents a dynamic value which is re-evaluated
|
||||
// with each log event.
|
||||
// A Valuer generates a log value. When passed to With or WithPrefix in a
|
||||
// value element (odd indexes), it represents a dynamic value which is re-
|
||||
// evaluated with each log event.
|
||||
type Valuer func() interface{}
|
||||
|
||||
// bindValues replaces all value elements (odd indexes) containing a Valuer
|
||||
|
@ -39,16 +39,6 @@ func Timestamp(t func() time.Time) Valuer {
|
|||
return func() interface{} { return t() }
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultTimestamp is a Valuer that returns the current wallclock time,
|
||||
// respecting time zones, when bound.
|
||||
DefaultTimestamp Valuer = func() interface{} { return time.Now().Format(time.RFC3339) }
|
||||
|
||||
// DefaultTimestampUTC is a Valuer that returns the current time in UTC
|
||||
// when bound.
|
||||
DefaultTimestampUTC Valuer = func() interface{} { return time.Now().UTC().Format(time.RFC3339) }
|
||||
)
|
||||
|
||||
// Caller returns a Valuer that returns a file and line from a specified depth
|
||||
// in the callstack. Users will probably want to use DefaultCaller.
|
||||
func Caller(depth int) Valuer {
|
||||
|
@ -56,6 +46,18 @@ func Caller(depth int) Valuer {
|
|||
}
|
||||
|
||||
var (
|
||||
// DefaultTimestamp is a Valuer that returns the current wallclock time,
|
||||
// respecting time zones, when bound.
|
||||
DefaultTimestamp = Valuer(func() interface{} {
|
||||
return time.Now().Format(time.RFC3339Nano)
|
||||
})
|
||||
|
||||
// DefaultTimestampUTC is a Valuer that returns the current time in UTC
|
||||
// when bound.
|
||||
DefaultTimestampUTC = Valuer(func() interface{} {
|
||||
return time.Now().UTC().Format(time.RFC3339Nano)
|
||||
})
|
||||
|
||||
// DefaultCaller is a Valuer that returns the file and line where the Log
|
||||
// method was invoked. It can only be used with log.With.
|
||||
DefaultCaller = Caller(3)
|
||||
|
|
|
@ -368,10 +368,10 @@
|
|||
"revisionTime": "2016-09-30T00:14:02Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "8wTICzej/k4pCcYtSw+fmD6oZZE=",
|
||||
"checksumSHA1": "L4bnnz4uVgDueUvcTKkZJ+IqPbw=",
|
||||
"path": "github.com/fabxc/tsdb",
|
||||
"revision": "2ef3682560a31bd03f0ba70eb6ec509512ad0de8",
|
||||
"revisionTime": "2017-03-20T10:37:06Z"
|
||||
"revision": "70909ca8ad875aa6f01e1801d185b3f3bab2fda3",
|
||||
"revisionTime": "2017-03-21T11:21:02Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "uVzWuLvF646YjiKomsc2CR1ua58=",
|
||||
|
@ -399,10 +399,10 @@
|
|||
"revisionTime": "2016-08-27T06:11:18Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "7Ta13bcuAlF1esPxSPljmpsSUwc=",
|
||||
"checksumSHA1": "ZSRtnAM/vH1sl0jxr4HztYI/4vs=",
|
||||
"path": "github.com/go-kit/kit/log",
|
||||
"revision": "3db037d8577b40174b37b73b62469aa9363020f6",
|
||||
"revisionTime": "2017-01-10T08:27:22Z"
|
||||
"revision": "04dd4f741c6e76cc170a4d7913f4c625952e6f58",
|
||||
"revisionTime": "2017-03-20T09:05:36Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "KxX/Drph+byPXBFIXaCZaCOAnrU=",
|
||||
|
|
Loading…
Reference in New Issue