2017-04-10 18:59:45 +00:00
|
|
|
// Copyright 2017 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2016-12-22 14:18:33 +00:00
|
|
|
package tsdb
|
|
|
|
|
|
|
|
import (
|
2017-02-14 23:54:52 +00:00
|
|
|
"encoding/binary"
|
|
|
|
"fmt"
|
2016-12-22 14:18:33 +00:00
|
|
|
"io/ioutil"
|
2017-02-15 05:54:59 +00:00
|
|
|
"math/rand"
|
2016-12-22 14:18:33 +00:00
|
|
|
"os"
|
|
|
|
"testing"
|
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
"github.com/coreos/etcd/pkg/fileutil"
|
2017-04-04 09:27:26 +00:00
|
|
|
"github.com/go-kit/kit/log"
|
|
|
|
"github.com/prometheus/tsdb/labels"
|
2016-12-22 14:18:33 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
)
|
|
|
|
|
2017-05-13 15:09:26 +00:00
|
|
|
func TestSegmentWAL_initSegments(t *testing.T) {
|
2017-02-14 23:54:52 +00:00
|
|
|
tmpdir, err := ioutil.TempDir("", "test_wal_open")
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer os.RemoveAll(tmpdir)
|
|
|
|
|
|
|
|
df, err := fileutil.OpenDir(tmpdir)
|
|
|
|
require.NoError(t, err)
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-05-13 15:09:26 +00:00
|
|
|
w := &SegmentWAL{dirFile: df}
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
// Create segment files with an appropriate header.
|
|
|
|
for i := 1; i <= 5; i++ {
|
|
|
|
metab := make([]byte, 8)
|
|
|
|
binary.BigEndian.PutUint32(metab[:4], WALMagic)
|
|
|
|
metab[4] = WALFormatDefault
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
f, err := os.Create(fmt.Sprintf("%s/000%d", tmpdir, i))
|
|
|
|
require.NoError(t, err)
|
|
|
|
_, err = f.Write(metab)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, f.Close())
|
2016-12-22 14:18:33 +00:00
|
|
|
}
|
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
// Initialize 5 correct segment files.
|
|
|
|
require.NoError(t, w.initSegments())
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
require.Equal(t, 5, len(w.files), "unexpected number of segments loaded")
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
// Validate that files are locked properly.
|
|
|
|
for _, of := range w.files {
|
|
|
|
f, err := os.Open(of.Name())
|
|
|
|
require.NoError(t, err, "open locked segment %s", f.Name())
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
_, err = f.Read([]byte{0})
|
|
|
|
require.NoError(t, err, "read locked segment %s", f.Name())
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
_, err = f.Write([]byte{0})
|
|
|
|
require.Error(t, err, "write to tail segment file %s", f.Name())
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
require.NoError(t, f.Close())
|
|
|
|
}
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
for _, f := range w.files {
|
|
|
|
require.NoError(t, f.Close())
|
|
|
|
}
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
// Make initialization fail by corrupting the header of one file.
|
|
|
|
f, err := os.OpenFile(w.files[3].Name(), os.O_WRONLY, 0666)
|
|
|
|
require.NoError(t, err)
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
_, err = f.WriteAt([]byte{0}, 4)
|
|
|
|
require.NoError(t, err)
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-05-13 15:09:26 +00:00
|
|
|
w = &SegmentWAL{dirFile: df}
|
2017-02-14 23:54:52 +00:00
|
|
|
require.Error(t, w.initSegments(), "init corrupted segments")
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
for _, f := range w.files {
|
|
|
|
require.NoError(t, f.Close())
|
|
|
|
}
|
|
|
|
}
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-05-13 15:09:26 +00:00
|
|
|
func TestSegmentWAL_cut(t *testing.T) {
|
2017-02-14 23:54:52 +00:00
|
|
|
tmpdir, err := ioutil.TempDir("", "test_wal_cut")
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer os.RemoveAll(tmpdir)
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
// This calls cut() implicitly the first time without a previous tail.
|
2017-05-13 15:09:26 +00:00
|
|
|
w, err := OpenSegmentWAL(tmpdir, nil, 0)
|
2017-02-14 23:54:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
require.NoError(t, w.entry(WALEntrySeries, 1, []byte("Hello World!!")))
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
require.NoError(t, w.cut(), "cut failed")
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
// Cutting creates a new file and close the previous tail file.
|
|
|
|
require.Equal(t, 2, len(w.files))
|
|
|
|
require.Equal(t, os.ErrInvalid.Error(), w.files[0].Close().Error())
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
require.NoError(t, w.entry(WALEntrySeries, 1, []byte("Hello World!!")))
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
require.NoError(t, w.Close())
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
for _, of := range w.files {
|
|
|
|
f, err := os.Open(of.Name())
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Verify header data.
|
|
|
|
metab := make([]byte, 8)
|
|
|
|
_, err = f.Read(metab)
|
|
|
|
require.NoError(t, err, "read meta data %s", f.Name())
|
|
|
|
require.Equal(t, WALMagic, binary.BigEndian.Uint32(metab[:4]), "verify magic")
|
|
|
|
require.Equal(t, WALFormatDefault, metab[4], "verify format flag")
|
2016-12-22 14:18:33 +00:00
|
|
|
|
2017-02-14 23:54:52 +00:00
|
|
|
// We cannot actually check for correct pre-allocation as it is
|
|
|
|
// optional per filesystem and handled transparently.
|
2017-05-12 15:06:26 +00:00
|
|
|
et, flag, b, err := newWALReader(nil, nil).entry(f)
|
2017-02-14 23:54:52 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, WALEntrySeries, et)
|
|
|
|
require.Equal(t, flag, byte(walSeriesSimple))
|
|
|
|
require.Equal(t, []byte("Hello World!!"), b)
|
|
|
|
}
|
2016-12-22 14:18:33 +00:00
|
|
|
}
|
2017-02-15 05:54:59 +00:00
|
|
|
|
|
|
|
// Symmetrical test of reading and writing to the WAL via its main interface.
|
2017-05-13 15:09:26 +00:00
|
|
|
func TestSegmentWAL_Log_Restore(t *testing.T) {
|
2017-03-08 19:52:03 +00:00
|
|
|
const (
|
|
|
|
numMetrics = 5000
|
|
|
|
iterations = 5
|
|
|
|
stepSize = 100
|
|
|
|
)
|
2017-02-15 05:54:59 +00:00
|
|
|
// Generate testing data. It does not make semantical sense but
|
|
|
|
// for the purpose of this test.
|
2017-03-08 19:52:03 +00:00
|
|
|
series, err := readPrometheusLabels("testdata/20k.series", numMetrics)
|
2017-02-15 05:54:59 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
dir, err := ioutil.TempDir("", "test_wal_log_restore")
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
|
2017-03-08 19:52:03 +00:00
|
|
|
var (
|
|
|
|
recordedSeries [][]labels.Labels
|
2017-05-12 15:06:26 +00:00
|
|
|
recordedSamples [][]RefSample
|
2017-05-26 15:56:31 +00:00
|
|
|
recordedDeletes [][]Stone
|
2017-03-08 19:52:03 +00:00
|
|
|
)
|
|
|
|
var totalSamples int
|
2017-02-15 05:54:59 +00:00
|
|
|
|
2017-03-08 19:52:03 +00:00
|
|
|
// Open WAL a bunch of times, validate all previous data can be read,
|
|
|
|
// write more data to it, close it.
|
|
|
|
for k := 0; k < numMetrics; k += numMetrics / iterations {
|
2017-05-13 15:09:26 +00:00
|
|
|
w, err := OpenSegmentWAL(dir, nil, 0)
|
2017-03-08 19:52:03 +00:00
|
|
|
require.NoError(t, err)
|
2017-02-15 05:54:59 +00:00
|
|
|
|
2017-03-08 19:52:03 +00:00
|
|
|
// Set smaller segment size so we can actually write several files.
|
|
|
|
w.segmentSize = 1000 * 1000
|
|
|
|
|
|
|
|
r := w.Reader()
|
|
|
|
|
|
|
|
var (
|
|
|
|
resultSeries [][]labels.Labels
|
2017-05-12 15:06:26 +00:00
|
|
|
resultSamples [][]RefSample
|
2017-05-26 15:56:31 +00:00
|
|
|
resultDeletes [][]Stone
|
2017-03-08 19:52:03 +00:00
|
|
|
)
|
|
|
|
|
2017-05-23 10:45:16 +00:00
|
|
|
serf := func(lsets []labels.Labels) error {
|
2017-03-08 19:52:03 +00:00
|
|
|
if len(lsets) > 0 {
|
|
|
|
clsets := make([]labels.Labels, len(lsets))
|
|
|
|
copy(clsets, lsets)
|
|
|
|
resultSeries = append(resultSeries, clsets)
|
|
|
|
}
|
2017-05-23 10:45:16 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
smplf := func(smpls []RefSample) error {
|
2017-03-08 19:52:03 +00:00
|
|
|
if len(smpls) > 0 {
|
2017-05-12 15:06:26 +00:00
|
|
|
csmpls := make([]RefSample, len(smpls))
|
2017-03-08 19:52:03 +00:00
|
|
|
copy(csmpls, smpls)
|
|
|
|
resultSamples = append(resultSamples, csmpls)
|
|
|
|
}
|
2017-05-23 10:45:16 +00:00
|
|
|
|
|
|
|
return nil
|
2017-03-08 19:52:03 +00:00
|
|
|
}
|
2017-05-23 10:45:16 +00:00
|
|
|
|
2017-05-26 15:56:31 +00:00
|
|
|
delf := func(stones []Stone) error {
|
2017-05-23 10:45:16 +00:00
|
|
|
if len(stones) > 0 {
|
2017-05-26 15:56:31 +00:00
|
|
|
resultDeletes = append(resultDeletes, stones)
|
2017-05-23 10:45:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(t, r.Read(serf, smplf, delf))
|
2017-02-15 05:54:59 +00:00
|
|
|
|
2017-03-08 19:52:03 +00:00
|
|
|
require.Equal(t, recordedSamples, resultSamples)
|
|
|
|
require.Equal(t, recordedSeries, resultSeries)
|
2017-05-23 10:45:16 +00:00
|
|
|
require.Equal(t, recordedDeletes, resultDeletes)
|
2017-02-15 05:54:59 +00:00
|
|
|
|
2017-03-08 19:52:03 +00:00
|
|
|
series := series[k : k+(numMetrics/iterations)]
|
2017-02-15 05:54:59 +00:00
|
|
|
|
2017-03-08 19:52:03 +00:00
|
|
|
// Insert in batches and generate different amounts of samples for each.
|
|
|
|
for i := 0; i < len(series); i += stepSize {
|
2017-05-12 15:06:26 +00:00
|
|
|
var samples []RefSample
|
2017-05-26 15:56:31 +00:00
|
|
|
var stones []Stone
|
2017-02-15 05:54:59 +00:00
|
|
|
|
2017-03-08 19:52:03 +00:00
|
|
|
for j := 0; j < i*10; j++ {
|
2017-05-12 15:06:26 +00:00
|
|
|
samples = append(samples, RefSample{
|
|
|
|
Ref: uint64(j % 10000),
|
|
|
|
T: int64(j * 2),
|
|
|
|
V: rand.Float64(),
|
2017-03-08 19:52:03 +00:00
|
|
|
})
|
|
|
|
}
|
2017-02-15 05:54:59 +00:00
|
|
|
|
2017-05-23 10:45:16 +00:00
|
|
|
for j := 0; j < i*20; j++ {
|
|
|
|
ts := rand.Int63()
|
2017-05-26 15:56:31 +00:00
|
|
|
stones = append(stones, Stone{rand.Uint32(), intervals{{ts, ts + rand.Int63n(10000)}}})
|
2017-05-23 10:45:16 +00:00
|
|
|
}
|
|
|
|
|
2017-03-08 19:52:03 +00:00
|
|
|
lbls := series[i : i+stepSize]
|
|
|
|
|
2017-05-23 10:45:16 +00:00
|
|
|
require.NoError(t, w.LogSeries(lbls))
|
|
|
|
require.NoError(t, w.LogSamples(samples))
|
2017-05-26 15:56:31 +00:00
|
|
|
require.NoError(t, w.LogDeletes(stones))
|
2017-03-08 19:52:03 +00:00
|
|
|
|
|
|
|
if len(lbls) > 0 {
|
|
|
|
recordedSeries = append(recordedSeries, lbls)
|
|
|
|
}
|
|
|
|
if len(samples) > 0 {
|
|
|
|
recordedSamples = append(recordedSamples, samples)
|
|
|
|
totalSamples += len(samples)
|
|
|
|
}
|
2017-05-23 10:45:16 +00:00
|
|
|
if len(stones) > 0 {
|
2017-05-26 15:56:31 +00:00
|
|
|
recordedDeletes = append(recordedDeletes, stones)
|
2017-05-23 10:45:16 +00:00
|
|
|
}
|
2017-02-15 05:54:59 +00:00
|
|
|
}
|
2017-03-08 19:52:03 +00:00
|
|
|
|
|
|
|
require.NoError(t, w.Close())
|
2017-02-15 05:54:59 +00:00
|
|
|
}
|
|
|
|
}
|
2017-03-14 18:30:23 +00:00
|
|
|
|
|
|
|
// Test reading from a WAL that has been corrupted through various means.
|
|
|
|
func TestWALRestoreCorrupted(t *testing.T) {
|
|
|
|
cases := []struct {
|
|
|
|
name string
|
2017-05-13 15:09:26 +00:00
|
|
|
f func(*testing.T, *SegmentWAL)
|
2017-03-14 18:30:23 +00:00
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "truncate_checksum",
|
2017-05-13 15:09:26 +00:00
|
|
|
f: func(t *testing.T, w *SegmentWAL) {
|
2017-03-14 18:30:23 +00:00
|
|
|
f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
off, err := f.Seek(0, os.SEEK_END)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.NoError(t, f.Truncate(off-1))
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "truncate_body",
|
2017-05-13 15:09:26 +00:00
|
|
|
f: func(t *testing.T, w *SegmentWAL) {
|
2017-03-14 18:30:23 +00:00
|
|
|
f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
off, err := f.Seek(0, os.SEEK_END)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.NoError(t, f.Truncate(off-8))
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "body_content",
|
2017-05-13 15:09:26 +00:00
|
|
|
f: func(t *testing.T, w *SegmentWAL) {
|
2017-03-14 18:30:23 +00:00
|
|
|
f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
off, err := f.Seek(0, os.SEEK_END)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Write junk before checksum starts.
|
|
|
|
_, err = f.WriteAt([]byte{1, 2, 3, 4}, off-8)
|
|
|
|
require.NoError(t, err)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "checksum",
|
2017-05-13 15:09:26 +00:00
|
|
|
f: func(t *testing.T, w *SegmentWAL) {
|
2017-03-14 18:30:23 +00:00
|
|
|
f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
off, err := f.Seek(0, os.SEEK_END)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Write junk into checksum
|
|
|
|
_, err = f.WriteAt([]byte{1, 2, 3, 4}, off-4)
|
|
|
|
require.NoError(t, err)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.name, func(t *testing.T) {
|
|
|
|
// Generate testing data. It does not make semantical sense but
|
|
|
|
// for the purpose of this test.
|
|
|
|
dir, err := ioutil.TempDir("", "test_corrupted_checksum")
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
|
2017-05-13 15:09:26 +00:00
|
|
|
w, err := OpenSegmentWAL(dir, nil, 0)
|
2017-03-14 18:30:23 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2017-05-23 10:45:16 +00:00
|
|
|
require.NoError(t, w.LogSamples([]RefSample{{T: 1, V: 2}}))
|
|
|
|
require.NoError(t, w.LogSamples([]RefSample{{T: 2, V: 3}}))
|
2017-03-14 18:30:23 +00:00
|
|
|
|
|
|
|
require.NoError(t, w.cut())
|
|
|
|
|
2017-05-23 10:45:16 +00:00
|
|
|
require.NoError(t, w.LogSamples([]RefSample{{T: 3, V: 4}}))
|
|
|
|
require.NoError(t, w.LogSamples([]RefSample{{T: 5, V: 6}}))
|
2017-03-14 18:30:23 +00:00
|
|
|
|
|
|
|
require.NoError(t, w.Close())
|
|
|
|
|
|
|
|
// Corrupt the second entry in the first file.
|
|
|
|
// After re-opening we must be able to read the first entry
|
|
|
|
// and the rest, including the second file, must be truncated for clean further
|
|
|
|
// writes.
|
|
|
|
c.f(t, w)
|
|
|
|
|
|
|
|
logger := log.NewLogfmtLogger(os.Stderr)
|
|
|
|
|
2017-05-13 15:09:26 +00:00
|
|
|
w2, err := OpenSegmentWAL(dir, logger, 0)
|
2017-03-14 18:30:23 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
r := w2.Reader()
|
2017-05-23 10:45:16 +00:00
|
|
|
serf := func(l []labels.Labels) error {
|
|
|
|
require.Equal(t, 0, len(l))
|
|
|
|
return nil
|
|
|
|
}
|
2017-05-26 15:56:31 +00:00
|
|
|
delf := func([]Stone) error { return nil }
|
2017-05-23 10:45:16 +00:00
|
|
|
|
|
|
|
// Weird hack to check order of reads.
|
|
|
|
i := 0
|
|
|
|
samplf := func(s []RefSample) error {
|
|
|
|
if i == 0 {
|
|
|
|
require.Equal(t, []RefSample{{T: 1, V: 2}}, s)
|
|
|
|
i++
|
|
|
|
} else {
|
|
|
|
require.Equal(t, []RefSample{{T: 99, V: 100}}, s)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2017-03-14 18:30:23 +00:00
|
|
|
|
2017-05-23 10:45:16 +00:00
|
|
|
require.NoError(t, r.Read(serf, samplf, delf))
|
2017-03-14 18:30:23 +00:00
|
|
|
|
2017-05-23 10:45:16 +00:00
|
|
|
require.NoError(t, w2.LogSamples([]RefSample{{T: 99, V: 100}}))
|
2017-03-14 18:30:23 +00:00
|
|
|
require.NoError(t, w2.Close())
|
|
|
|
|
|
|
|
// We should see the first valid entry and the new one, everything after
|
|
|
|
// is truncated.
|
2017-05-13 15:09:26 +00:00
|
|
|
w3, err := OpenSegmentWAL(dir, logger, 0)
|
2017-03-14 18:30:23 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
r = w3.Reader()
|
|
|
|
|
2017-05-23 10:45:16 +00:00
|
|
|
i = 0
|
|
|
|
require.NoError(t, r.Read(serf, samplf, delf))
|
2017-03-14 18:30:23 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|