Fix staticcheck issues.
This commit is contained in:
parent
0fcfe3209f
commit
59aca4138b
|
@ -229,7 +229,7 @@ func Main() int {
|
|||
webHandler.Ready()
|
||||
log.Info("Server is Ready to receive requests.")
|
||||
|
||||
term := make(chan os.Signal)
|
||||
term := make(chan os.Signal, 1)
|
||||
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
|
||||
select {
|
||||
case <-term:
|
||||
|
|
|
@ -130,7 +130,7 @@ func TestTargetURL(t *testing.T) {
|
|||
func newTestTarget(targetURL string, deadline time.Duration, labels model.LabelSet) *Target {
|
||||
labels = labels.Clone()
|
||||
labels[model.SchemeLabel] = "http"
|
||||
labels[model.AddressLabel] = model.LabelValue(strings.TrimLeft(targetURL, "http://"))
|
||||
labels[model.AddressLabel] = model.LabelValue(strings.TrimPrefix(targetURL, "http://"))
|
||||
labels[model.MetricsPathLabel] = "/metrics"
|
||||
|
||||
return &Target{
|
||||
|
|
|
@ -63,6 +63,9 @@ func TestFPMapper(t *testing.T) {
|
|||
defer closer.Close()
|
||||
|
||||
mapper, err := newFPMapper(sm, p)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Everything is empty, resolving a FP should do nothing.
|
||||
gotFP := mapper.mapFP(fp1, cm11)
|
||||
|
|
|
@ -462,7 +462,7 @@ func (p *persistence) persistChunks(fp model.Fingerprint, chunks []chunk.Chunk)
|
|||
}
|
||||
|
||||
// Determine index within the file.
|
||||
offset, err := f.Seek(0, os.SEEK_CUR)
|
||||
offset, err := f.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
@ -498,7 +498,7 @@ func (p *persistence) loadChunks(fp model.Fingerprint, indexes []int, indexOffse
|
|||
// This loads chunks in batches. A batch is a streak of
|
||||
// consecutive chunks, read from disk in one go.
|
||||
batchSize := 1
|
||||
if _, err := f.Seek(offsetForChunkIndex(indexes[i]+indexOffset), os.SEEK_SET); err != nil {
|
||||
if _, err := f.Seek(offsetForChunkIndex(indexes[i]+indexOffset), io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -561,7 +561,7 @@ func (p *persistence) loadChunkDescs(fp model.Fingerprint, offsetFromEnd int) ([
|
|||
cds := make([]*chunk.Desc, numChunks)
|
||||
chunkTimesBuf := make([]byte, 16)
|
||||
for i := 0; i < numChunks; i++ {
|
||||
_, err := f.Seek(offsetForChunkIndex(i)+chunkHeaderFirstTimeOffset, os.SEEK_SET)
|
||||
_, err := f.Seek(offsetForChunkIndex(i)+chunkHeaderFirstTimeOffset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -814,7 +814,7 @@ func (p *persistence) checkpointSeriesMapAndHeads(
|
|||
if realNumberOfSeries != numberOfSeriesInHeader {
|
||||
// The number of series has changed in the meantime.
|
||||
// Rewrite it in the header.
|
||||
if _, err = f.Seek(int64(numberOfSeriesOffset), os.SEEK_SET); err != nil {
|
||||
if _, err = f.Seek(int64(numberOfSeriesOffset), io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = codable.EncodeUint64(f, realNumberOfSeries); err != nil {
|
||||
|
@ -971,7 +971,7 @@ func (p *persistence) dropAndPersistChunks(
|
|||
headerBuf := make([]byte, chunkHeaderLen)
|
||||
// Find the first chunk in the file that should be kept.
|
||||
for ; ; numDropped++ {
|
||||
_, err = f.Seek(offsetForChunkIndex(numDropped), os.SEEK_SET)
|
||||
_, err = f.Seek(offsetForChunkIndex(numDropped), io.SeekStart)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -1007,7 +1007,7 @@ func (p *persistence) dropAndPersistChunks(
|
|||
if numDropped == chunkIndexToStartSeek {
|
||||
// Nothing to drop. Just adjust the return values and append the chunks (if any).
|
||||
numDropped = 0
|
||||
_, err = f.Seek(offsetForChunkIndex(0), os.SEEK_SET)
|
||||
_, err = f.Seek(offsetForChunkIndex(0), io.SeekStart)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -1033,7 +1033,7 @@ func (p *persistence) dropAndPersistChunks(
|
|||
binary.LittleEndian.Uint64(headerBuf[chunkHeaderFirstTimeOffset:]),
|
||||
)
|
||||
chunk.Ops.WithLabelValues(chunk.Drop).Add(float64(numDropped))
|
||||
_, err = f.Seek(-chunkHeaderLen, os.SEEK_CUR)
|
||||
_, err = f.Seek(-chunkHeaderLen, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -1354,7 +1354,7 @@ func (p *persistence) openChunkFileForWriting(fp model.Fingerprint) (*os.File, e
|
|||
}
|
||||
return os.OpenFile(p.fileNameForFingerprint(fp), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640)
|
||||
// NOTE: Although the file was opened for append,
|
||||
// f.Seek(0, os.SEEK_CUR)
|
||||
// f.Seek(0, io.SeekCurrent)
|
||||
// would now return '0, nil', so we cannot check for a consistent file length right now.
|
||||
// However, the chunkIndexForOffset function is doing that check, so a wrong file length
|
||||
// would still be detected.
|
||||
|
|
|
@ -136,6 +136,9 @@ func testPersistLoadDropChunks(t *testing.T, encoding chunk.Encoding) {
|
|||
}
|
||||
// Load all chunk descs.
|
||||
actualChunkDescs, err := p.loadChunkDescs(fp, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(actualChunkDescs) != 10 {
|
||||
t.Errorf("Got %d chunkDescs, want %d.", len(actualChunkDescs), 10)
|
||||
}
|
||||
|
|
|
@ -1420,7 +1420,7 @@ func testEvictAndPurgeSeries(t *testing.T, encoding chunk.Encoding) {
|
|||
// Unarchive metrics.
|
||||
s.getOrCreateSeries(fp, model.Metric{})
|
||||
|
||||
series, ok = s.fpToSeries.get(fp)
|
||||
_, ok = s.fpToSeries.get(fp)
|
||||
if !ok {
|
||||
t.Fatal("could not find series")
|
||||
}
|
||||
|
|
|
@ -68,6 +68,9 @@ func TestStoreHTTPErrorHandling(t *testing.T) {
|
|||
url: &config.URL{serverURL},
|
||||
timeout: model.Duration(time.Second),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = c.Store(nil)
|
||||
if !reflect.DeepEqual(err, test.err) {
|
||||
|
|
Loading…
Reference in New Issue