mirror of https://github.com/ceph/go-ceph
cephfs: add Fsync and Sync calls for open files
Add wrappers for ceph_fsync. The Fsync call directly wraps ceph_fsync which provides options to behave more like fsync or more like fdatasync. Add Sync, a simpler wrapper over Fsync, to match any interfaces that make use of `File.Sync` from Go's os package. Signed-off-by: John Mulligan <jmulligan@redhat.com>
This commit is contained in:
parent
ec822aa3a9
commit
624230cdbd
|
@ -25,6 +25,17 @@ const (
|
|||
SeekEnd = int(C.SEEK_END)
|
||||
)
|
||||
|
||||
// SyncChoice is used to control how metadata and/or data is sync'ed to
|
||||
// the file system.
|
||||
type SyncChoice int
|
||||
|
||||
const (
|
||||
// SyncAll will synchronize both data and metadata.
|
||||
SyncAll = SyncChoice(0)
|
||||
// SyncDataOnly will synchronize only data.
|
||||
SyncDataOnly = SyncChoice(1)
|
||||
)
|
||||
|
||||
// File represents an open file descriptor in cephfs.
|
||||
type File struct {
|
||||
mount *MountInfo
|
||||
|
@ -293,3 +304,31 @@ func (f *File) Flock(operation LockOp, owner uint64) error {
|
|||
ret := C.ceph_flock(f.mount.mount, f.fd, C.int(operation), C.uint64_t(owner))
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// Fsync ensures the file content that may be cached is committed to stable
|
||||
// storage.
|
||||
// Pass SyncAll to have this call behave like standard fsync and synchronize
|
||||
// all data and metadata.
|
||||
// Pass SyncDataOnly to have this call behave more like fdatasync (on linux).
|
||||
//
|
||||
// Implements:
|
||||
// int ceph_fsync(struct ceph_mount_info *cmount, int fd, int syncdataonly);
|
||||
func (f *File) Fsync(sync SyncChoice) error {
|
||||
if err := f.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ret := C.ceph_fsync(
|
||||
f.mount.mount,
|
||||
f.fd,
|
||||
C.int(sync),
|
||||
)
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// Sync ensures the file content that may be cached is committed to stable
|
||||
// storage.
|
||||
// Sync behaves like Go's os package File.Sync function.
|
||||
func (f *File) Sync() error {
|
||||
return f.Fsync(SyncAll)
|
||||
}
|
||||
|
|
|
@ -711,3 +711,61 @@ func TestFlock(t *testing.T) {
|
|||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestFsync(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
fname := "test_fsync.txt"
|
||||
defer mount.Unlink(fname)
|
||||
|
||||
// unfortunately there's not much to assert around the the behavior of
|
||||
// fsync in these simple tests so we sort-of have to trust ceph on this :-)
|
||||
t.Run("simpleFsync", func(t *testing.T) {
|
||||
f, err := mount.Open(fname, os.O_RDWR|os.O_CREATE, 0666)
|
||||
defer func() { assert.NoError(t, f.Close()) }()
|
||||
assert.NoError(t, err)
|
||||
_, err = f.Write([]byte("batman"))
|
||||
assert.NoError(t, err)
|
||||
err = f.Fsync(SyncAll)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("DataOnly", func(t *testing.T) {
|
||||
f, err := mount.Open(fname, os.O_RDWR|os.O_CREATE, 0666)
|
||||
defer func() { assert.NoError(t, f.Close()) }()
|
||||
assert.NoError(t, err)
|
||||
_, err = f.Write([]byte("superman"))
|
||||
assert.NoError(t, err)
|
||||
err = f.Fsync(SyncDataOnly)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("invalid", func(t *testing.T) {
|
||||
f := &File{}
|
||||
err := f.Fsync(SyncAll)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSync(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
fname := "test_sync.txt"
|
||||
defer mount.Unlink(fname)
|
||||
|
||||
// see fsync
|
||||
t.Run("simple", func(t *testing.T) {
|
||||
f, err := mount.Open(fname, os.O_RDWR|os.O_CREATE, 0666)
|
||||
defer func() { assert.NoError(t, f.Close()) }()
|
||||
assert.NoError(t, err)
|
||||
_, err = f.Write([]byte("question"))
|
||||
assert.NoError(t, err)
|
||||
err = f.Sync()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("invalid", func(t *testing.T) {
|
||||
f := &File{}
|
||||
err := f.Sync()
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue