implement send for multiple snapshots

This commit is contained in:
Denys Smirnov 2017-01-08 16:13:47 +02:00
parent 54573d1c9d
commit 3b69894215
13 changed files with 476 additions and 171 deletions

View File

@ -2,7 +2,7 @@ package btrfs
import "os" import "os"
func getPathRootID(file *os.File) (uint64, error) { func getFileRootID(file *os.File) (objectID, error) {
args := btrfs_ioctl_ino_lookup_args{ args := btrfs_ioctl_ino_lookup_args{
objectid: firstFreeObjectid, objectid: firstFreeObjectid,
} }
@ -11,3 +11,12 @@ func getPathRootID(file *os.File) (uint64, error) {
} }
return args.treeid, nil return args.treeid, nil
} }
func getPathRootID(path string) (objectID, error) {
fs, err := Open(path, true)
if err != nil {
return 0, err
}
defer fs.Close()
return getFileRootID(fs.f)
}

View File

@ -138,6 +138,21 @@ func TestSubvolumes(t *testing.T) {
"foo", "bar", "baz", "foo", "bar", "baz",
"foo", "baz", "foo", "baz",
}) })
path := filepath.Join(names[0], names[2])
mksub(path, "new")
path = filepath.Join(path, "new")
id, err := getPathRootID(filepath.Join(dir, path))
if err != nil {
t.Fatal(err)
}
info, err := subvolSearchByRootID(fs.f, id, "")
if err != nil {
t.Fatal(err)
} else if info.Path != path {
t.Fatalf("wrong path returned: %v vs %v", info.Path, path)
}
} }
func TestCompression(t *testing.T) { func TestCompression(t *testing.T) {

View File

@ -15,10 +15,11 @@ const (
blockGroupRaid6 | blockGroupRaid6 |
blockGroupDup | blockGroupDup |
blockGroupRaid10) blockGroupRaid10)
_BTRFS_BLOCK_GROUP_MASK = _BTRFS_BLOCK_GROUP_TYPE_MASK | _BTRFS_BLOCK_GROUP_PROFILE_MASK
) )
type rootRef struct { type rootRef struct {
DirID uint64 DirID objectID
Sequence uint64 Sequence uint64
Name string Name string
} }
@ -42,7 +43,7 @@ func asRootRef(p []byte) rootRef {
// assuming that it is highly unsafe to have sizeof(struct) > len(data) // assuming that it is highly unsafe to have sizeof(struct) > len(data)
// (*btrfs_root_ref)(unsafe.Pointer(&p[0])) and sizeof(btrfs_root_ref) == 24 // (*btrfs_root_ref)(unsafe.Pointer(&p[0])) and sizeof(btrfs_root_ref) == 24
ref := rootRef{ ref := rootRef{
DirID: asUint64(p[0:]), DirID: objectID(asUint64(p[0:])),
Sequence: asUint64(p[8:]), Sequence: asUint64(p[8:]),
} }
if n := asUint16(p[16:]); n > 0 { if n := asUint16(p[16:]); n > 0 {
@ -76,11 +77,7 @@ func (t btrfs_timespec_raw) Decode() time.Time {
return time.Unix(int64(sec), int64(nsec)) return time.Unix(int64(sec), int64(nsec))
} }
// timeBlock is a raw set of bytes for 4 time fields: // timeBlock is a raw set of bytes for 4 time fields.
// atime btrfs_timespec
// ctime btrfs_timespec
// mtime btrfs_timespec
// otime btrfs_timespec
// It is used to keep correct alignment when accessing structures from btrfs. // It is used to keep correct alignment when accessing structures from btrfs.
type timeBlock [4]btrfs_timespec_raw type timeBlock [4]btrfs_timespec_raw

View File

@ -2,6 +2,18 @@ package btrfs
// This code was auto-generated; DO NOT EDIT! // This code was auto-generated; DO NOT EDIT!
type treeKeyType uint32
type objectID uint64
type fileType int
type fileExtentType int
type devReplaceItemState int
type blockGroup uint64
// This header contains the structure definitions and constants used // This header contains the structure definitions and constants used
// by file system objects that can be retrieved using // by file system objects that can be retrieved using
// the BTRFS_IOC_SEARCH_TREE ioctl. That means basically anything that // the BTRFS_IOC_SEARCH_TREE ioctl. That means basically anything that
@ -9,179 +21,179 @@ package btrfs
const ( const (
// Holds pointers to all of the tree roots // Holds pointers to all of the tree roots
rootTreeObjectid = 1 rootTreeObjectid objectID = 1
// Stores information about which extents are in use, and reference counts // Stores information about which extents are in use, and reference counts
extentTreeObjectid = 2 extentTreeObjectid objectID = 2
// Chunk tree stores translations from logical -> physical block numbering // Chunk tree stores translations from logical -> physical block numbering
// the super block points to the chunk tree // the super block points to the chunk tree
chunkTreeObjectid = 3 chunkTreeObjectid objectID = 3
// Stores information about which areas of a given device are in use. // Stores information about which areas of a given device are in use.
// one per device. The tree of tree roots points to the device tree // one per device. The tree of tree roots points to the device tree
devTreeObjectid = 4 devTreeObjectid objectID = 4
// One per subvolume, storing files and directories // One per subvolume, storing files and directories
fsTreeObjectid = 5 fsTreeObjectid objectID = 5
// Directory objectid inside the root tree // Directory objectid inside the root tree
rootTreeDirObjectid = 6 rootTreeDirObjectid objectID = 6
// Holds checksums of all the data extents // Holds checksums of all the data extents
csumTreeObjectid = 7 csumTreeObjectid objectID = 7
// Holds quota configuration and tracking // Holds quota configuration and tracking
quotaTreeObjectid = 8 quotaTreeObjectid objectID = 8
// For storing items that use the BTRFS_UUID_KEY* types // For storing items that use the BTRFS_UUID_KEY* types
uuidTreeObjectid = 9 uuidTreeObjectid objectID = 9
// Tracks free space in block groups. // Tracks free space in block groups.
freeSpaceTreeObjectid = 10 freeSpaceTreeObjectid objectID = 10
// Device stats in the device tree // Device stats in the device tree
devStatsObjectid = 0 devStatsObjectid objectID = 0
// For storing balance parameters in the root tree // For storing balance parameters in the root tree
balanceObjectid = (1<<64 - 4) balanceObjectid objectID = (1<<64 - 4)
// Orhpan objectid for tracking unlinked/truncated files // Orhpan objectid for tracking unlinked/truncated files
orphanObjectid = (1<<64 - 5) orphanObjectid objectID = (1<<64 - 5)
// Does write ahead logging to speed up fsyncs // Does write ahead logging to speed up fsyncs
treeLogObjectid = (1<<64 - 6) treeLogObjectid objectID = (1<<64 - 6)
treeLogFixupObjectid = (1<<64 - 7) treeLogFixupObjectid objectID = (1<<64 - 7)
// For space balancing // For space balancing
treeRelocObjectid = (1<<64 - 8) treeRelocObjectid objectID = (1<<64 - 8)
dataRelocTreeObjectid = (1<<64 - 9) dataRelocTreeObjectid objectID = (1<<64 - 9)
// Extent checksums all have this objectid // Extent checksums all have this objectid
// this allows them to share the logging tree // this allows them to share the logging tree
// for fsyncs // for fsyncs
extentCsumObjectid = (1<<64 - 10) extentCsumObjectid objectID = (1<<64 - 10)
// For storing free space cache // For storing free space cache
freeSpaceObjectid = (1<<64 - 11) freeSpaceObjectid objectID = (1<<64 - 11)
// The inode number assigned to the special inode for storing // The inode number assigned to the special inode for storing
// free ino cache // free ino cache
freeInoObjectid = (1<<64 - 12) freeInoObjectid objectID = (1<<64 - 12)
// Dummy objectid represents multiple objectids // Dummy objectid represents multiple objectids
multipleObjectids = (1<<64 - 255) multipleObjectids = (1<<64 - 255)
// All files have objectids in this range. // All files have objectids in this range.
firstFreeObjectid = 256 firstFreeObjectid objectID = 256
lastFreeObjectid = (1<<64 - 256) lastFreeObjectid objectID = (1<<64 - 256)
firstChunkTreeObjectid = 256 firstChunkTreeObjectid objectID = 256
// The device items go into the chunk tree. The key is in the form // The device items go into the chunk tree. The key is in the form
// [ 1 BTRFS_DEV_ITEM_KEY device_id ] // [ 1 BTRFS_DEV_ITEM_KEY device_id ]
devItemsObjectid = 1 devItemsObjectid objectID = 1
btreeInodeObjectid = 1 btreeInodeObjectid objectID = 1
emptySubvolDirObjectid = 2 emptySubvolDirObjectid objectID = 2
devReplaceDevid = 0 devReplaceDevid = 0
// Inode items have the data typically returned from stat and store other // Inode items have the data typically returned from stat and store other
// info about object characteristics. There is one for every file and dir in // info about object characteristics. There is one for every file and dir in
// the FS // the FS
inodeItemKey = 1 inodeItemKey treeKeyType = 1
inodeRefKey = 12 inodeRefKey treeKeyType = 12
inodeExtrefKey = 13 inodeExtrefKey treeKeyType = 13
xattrItemKey = 24 xattrItemKey treeKeyType = 24
orphanItemKey = 48 orphanItemKey treeKeyType = 48
// Reserve 2-15 close to the inode for later flexibility // Reserve 2-15 close to the inode for later flexibility
// Dir items are the name -> inode pointers in a directory. There is one // Dir items are the name -> inode pointers in a directory. There is one
// for every name in a directory. // for every name in a directory.
dirLogItemKey = 60 dirLogItemKey treeKeyType = 60
dirLogIndexKey = 72 dirLogIndexKey treeKeyType = 72
dirItemKey = 84 dirItemKey treeKeyType = 84
dirIndexKey = 96 dirIndexKey treeKeyType = 96
// Extent data is for file data // Extent data is for file data
extentDataKey = 108 extentDataKey treeKeyType = 108
// Extent csums are stored in a separate tree and hold csums for // Extent csums are stored in a separate tree and hold csums for
// an entire extent on disk. // an entire extent on disk.
extentCsumKey = 128 extentCsumKey treeKeyType = 128
// Root items point to tree roots. They are typically in the root // Root items point to tree roots. They are typically in the root
// tree used by the super block to find all the other trees // tree used by the super block to find all the other trees
rootItemKey = 132 rootItemKey treeKeyType = 132
// Root backrefs tie subvols and snapshots to the directory entries that // Root backrefs tie subvols and snapshots to the directory entries that
// reference them // reference them
rootBackrefKey = 144 rootBackrefKey treeKeyType = 144
// Root refs make a fast index for listing all of the snapshots and // Root refs make a fast index for listing all of the snapshots and
// subvolumes referenced by a given root. They point directly to the // subvolumes referenced by a given root. They point directly to the
// directory item in the root that references the subvol // directory item in the root that references the subvol
rootRefKey = 156 rootRefKey treeKeyType = 156
// Extent items are in the extent map tree. These record which blocks // Extent items are in the extent map tree. These record which blocks
// are used, and how many references there are to each block // are used, and how many references there are to each block
extentItemKey = 168 extentItemKey treeKeyType = 168
// The same as the BTRFS_EXTENT_ITEM_KEY, except it's metadata we already know // The same as the BTRFS_EXTENT_ITEM_KEY, except it's metadata we already know
// the length, so we save the level in key->offset instead of the length. // the length, so we save the level in key->offset instead of the length.
metadataItemKey = 169 metadataItemKey treeKeyType = 169
treeBlockRefKey = 176 treeBlockRefKey treeKeyType = 176
extentDataRefKey = 178 extentDataRefKey treeKeyType = 178
extentRefV0Key = 180 extentRefV0Key treeKeyType = 180
sharedBlockRefKey = 182 sharedBlockRefKey treeKeyType = 182
sharedDataRefKey = 184 sharedDataRefKey treeKeyType = 184
// Block groups give us hints into the extent allocation trees. Which // Block groups give us hints into the extent allocation trees. Which
// blocks are free etc etc // blocks are free etc etc
blockGroupItemKey = 192 blockGroupItemKey treeKeyType = 192
// Every block group is represented in the free space tree by a free space info // Every block group is represented in the free space tree by a free space info
// item, which stores some accounting information. It is keyed on // item, which stores some accounting information. It is keyed on
// (block_group_start, FREE_SPACE_INFO, block_group_length). // (block_group_start, FREE_SPACE_INFO, block_group_length).
freeSpaceInfoKey = 198 freeSpaceInfoKey treeKeyType = 198
// A free space extent tracks an extent of space that is free in a block group. // A free space extent tracks an extent of space that is free in a block group.
// It is keyed on (start, FREE_SPACE_EXTENT, length). // It is keyed on (start, FREE_SPACE_EXTENT, length).
freeSpaceExtentKey = 199 freeSpaceExtentKey treeKeyType = 199
// When a block group becomes very fragmented, we convert it to use bitmaps // When a block group becomes very fragmented, we convert it to use bitmaps
// instead of extents. A free space bitmap is keyed on // instead of extents. A free space bitmap is keyed on
// (start, FREE_SPACE_BITMAP, length); the corresponding item is a bitmap with // (start, FREE_SPACE_BITMAP, length); the corresponding item is a bitmap with
// (length / sectorsize) bits. // (length / sectorsize) bits.
freeSpaceBitmapKey = 200 freeSpaceBitmapKey treeKeyType = 200
devExtentKey = 204 devExtentKey treeKeyType = 204
devItemKey = 216 devItemKey treeKeyType = 216
chunkItemKey = 228 chunkItemKey treeKeyType = 228
// Records the overall state of the qgroups. // Records the overall state of the qgroups.
// There's only one instance of this key present, // There's only one instance of this key present,
// (0, BTRFS_QGROUP_STATUS_KEY, 0) // (0, BTRFS_QGROUP_STATUS_KEY, 0)
qgroupStatusKey = 240 qgroupStatusKey treeKeyType = 240
// Records the currently used space of the qgroup. // Records the currently used space of the qgroup.
// One key per qgroup, (0, BTRFS_QGROUP_INFO_KEY, qgroupid). // One key per qgroup, (0, BTRFS_QGROUP_INFO_KEY, qgroupid).
qgroupInfoKey = 242 qgroupInfoKey treeKeyType = 242
// Contains the user configured limits for the qgroup. // Contains the user configured limits for the qgroup.
// One key per qgroup, (0, BTRFS_QGROUP_LIMIT_KEY, qgroupid). // One key per qgroup, (0, BTRFS_QGROUP_LIMIT_KEY, qgroupid).
qgroupLimitKey = 244 qgroupLimitKey treeKeyType = 244
// Records the child-parent relationship of qgroups. For // Records the child-parent relationship of qgroups. For
// each relation, 2 keys are present: // each relation, 2 keys are present:
// (childid, BTRFS_QGROUP_RELATION_KEY, parentid) // (childid, BTRFS_QGROUP_RELATION_KEY, parentid)
// (parentid, BTRFS_QGROUP_RELATION_KEY, childid) // (parentid, BTRFS_QGROUP_RELATION_KEY, childid)
qgroupRelationKey = 246 qgroupRelationKey treeKeyType = 246
// Obsolete name, see BTRFS_TEMPORARY_ITEM_KEY. // Obsolete name, see BTRFS_TEMPORARY_ITEM_KEY.
balanceItemKey = 248 balanceItemKey treeKeyType = 248
// The key type for tree items that are stored persistently, but do not need to // The key type for tree items that are stored persistently, but do not need to
// exist for extended period of time. The items can exist in any tree. // exist for extended period of time. The items can exist in any tree.
@ -189,10 +201,10 @@ const (
// Existing items: // Existing items:
// - balance status item // - balance status item
// (BTRFS_BALANCE_OBJECTID, BTRFS_TEMPORARY_ITEM_KEY, 0) // (BTRFS_BALANCE_OBJECTID, BTRFS_TEMPORARY_ITEM_KEY, 0)
temporaryItemKey = 248 temporaryItemKey treeKeyType = 248
// Obsolete name, see BTRFS_PERSISTENT_ITEM_KEY // Obsolete name, see BTRFS_PERSISTENT_ITEM_KEY
devStatsKey = 249 devStatsKey treeKeyType = 249
// The key type for tree items that are stored persistently and usually exist // The key type for tree items that are stored persistently and usually exist
// for a long period, eg. filesystem lifetime. The item kinds can be status // for a long period, eg. filesystem lifetime. The item kinds can be status
@ -202,11 +214,11 @@ const (
// - device statistics, store IO stats in the device tree, one key for all // - device statistics, store IO stats in the device tree, one key for all
// stats // stats
// (BTRFS_DEV_STATS_OBJECTID, BTRFS_DEV_STATS_KEY, 0) // (BTRFS_DEV_STATS_OBJECTID, BTRFS_DEV_STATS_KEY, 0)
persistentItemKey = 249 persistentItemKey treeKeyType = 249
// Persistantly stores the device replace state in the device tree. // Persistantly stores the device replace state in the device tree.
// The key is built like this: (0, BTRFS_DEV_REPLACE_KEY, 0). // The key is built like this: (0, BTRFS_DEV_REPLACE_KEY, 0).
devReplaceKey = 250 devReplaceKey treeKeyType = 250
// Stores items that allow to quickly map UUIDs to something else. // Stores items that allow to quickly map UUIDs to something else.
// These items are part of the filesystem UUID tree. // These items are part of the filesystem UUID tree.
@ -217,7 +229,7 @@ const (
// String items are for debugging. They just store a short string of // String items are for debugging. They just store a short string of
// data in the FS // data in the FS
stringItemKey = 253 stringItemKey treeKeyType = 253
// 32 bytes in various csum fields // 32 bytes in various csum fields
csumSize = 32 csumSize = 32
@ -228,16 +240,16 @@ const (
// Flags definitions for directory entry item type // Flags definitions for directory entry item type
// Used by: // Used by:
// struct btrfs_dir_item.type // struct btrfs_dir_item.type
ftUnknown = 0 ftUnknown fileType = 0
ftRegFile = 1 ftRegFile fileType = 1
ftDir = 2 ftDir fileType = 2
ftChrdev = 3 ftChrdev fileType = 3
ftBlkdev = 4 ftBlkdev fileType = 4
ftFifo = 5 ftFifo fileType = 5
ftSock = 6 ftSock fileType = 6
ftSymlink = 7 ftSymlink fileType = 7
ftXattr = 8 ftXattr fileType = 8
ftMax = 9 ftMax fileType = 9
// The key defines the order in the tree, and so it also defines (optimal) // The key defines the order in the tree, and so it also defines (optimal)
// block layout. // block layout.
@ -392,9 +404,9 @@ const (
// resumed after crash or unmount // resumed after crash or unmount
// BTRFS_BALANCE_* // BTRFS_BALANCE_*
fileExtentInline = 0 fileExtentInline fileExtentType = 0
fileExtentReg = 1 fileExtentReg fileExtentType = 1
fileExtentPrealloc = 2 fileExtentPrealloc fileExtentType = 2
// Transaction id that created this extent // Transaction id that created this extent
// Max number of bytes to hold this extent in ram // Max number of bytes to hold this extent in ram
@ -427,25 +439,25 @@ const (
devReplaceItemContReadingFromSrcdevModeAlways = 0 devReplaceItemContReadingFromSrcdevModeAlways = 0
devReplaceItemContReadingFromSrcdevModeAvoid = 1 devReplaceItemContReadingFromSrcdevModeAvoid = 1
devReplaceItemStateNeverStarted = 0 devReplaceItemStateNeverStarted devReplaceItemState = 0
devReplaceItemStateStarted = 1 devReplaceItemStateStarted devReplaceItemState = 1
devReplaceItemStateSuspended = 2 devReplaceItemStateSuspended devReplaceItemState = 2
devReplaceItemStateFinished = 3 devReplaceItemStateFinished devReplaceItemState = 3
devReplaceItemStateCanceled = 4 devReplaceItemStateCanceled devReplaceItemState = 4
// Grow this item struct at the end for future enhancements and keep // Grow this item struct at the end for future enhancements and keep
// the existing values unchanged // the existing values unchanged
// Different types of block groups (and chunks) // Different types of block groups (and chunks)
blockGroupData = (1 << 0) blockGroupData blockGroup = (1 << 0)
blockGroupSystem = (1 << 1) blockGroupSystem blockGroup = (1 << 1)
blockGroupMetadata = (1 << 2) blockGroupMetadata blockGroup = (1 << 2)
blockGroupRaid0 = (1 << 3) blockGroupRaid0 blockGroup = (1 << 3)
blockGroupRaid1 = (1 << 4) blockGroupRaid1 blockGroup = (1 << 4)
blockGroupDup = (1 << 5) blockGroupDup blockGroup = (1 << 5)
blockGroupRaid10 = (1 << 6) blockGroupRaid10 blockGroup = (1 << 6)
blockGroupRaid5 = (1 << 7) blockGroupRaid5 blockGroup = (1 << 7)
blockGroupRaid6 = (1 << 8) blockGroupRaid6 blockGroup = (1 << 8)
// We need a bit for restriper to be able to tell when chunks of type // We need a bit for restriper to be able to tell when chunks of type
// SINGLE are available. This "extended" profile format is used in // SINGLE are available. This "extended" profile format is used in

View File

@ -19,6 +19,9 @@ var (
f_unexport = flag.Bool("u", true, "make all definitions unexported") f_unexport = flag.Bool("u", true, "make all definitions unexported")
f_goname = flag.Bool("g", true, "rename symbols to follow Go conventions") f_goname = flag.Bool("g", true, "rename symbols to follow Go conventions")
f_trim = flag.String("t", "", "prefix to trim from names") f_trim = flag.String("t", "", "prefix to trim from names")
f_constSuf = flag.String("cs", "", "comma-separated list of constant suffixes to create typed constants")
f_constPref = flag.String("cp", "", "comma-separated list of constant prefixes to create typed constants")
) )
var ( var (
@ -26,8 +29,30 @@ var (
reNegULL = regexp.MustCompile(`-(\d+)ULL`) reNegULL = regexp.MustCompile(`-(\d+)ULL`)
) )
var (
constTypes []constType
)
type constType struct {
Name string
Type string
Suffix string
Prefix string
}
func constName(s string) string { func constName(s string) string {
s = strings.TrimPrefix(s, *f_trim) s = strings.TrimPrefix(s, *f_trim)
typ := ""
for _, t := range constTypes {
if t.Suffix != "" && strings.HasSuffix(s, t.Suffix) {
//s = strings.TrimSuffix(s, t.Suffix)
typ = t.Name
break
} else if t.Prefix != "" && strings.HasPrefix(s, t.Prefix) {
typ = t.Name
break
}
}
if *f_goname { if *f_goname {
buf := bytes.NewBuffer(nil) buf := bytes.NewBuffer(nil)
buf.Grow(len(s)) buf.Grow(len(s))
@ -49,6 +74,9 @@ func constName(s string) string {
} else if *f_unexport { } else if *f_unexport {
s = "_" + s s = "_" + s
} }
if typ != "" {
s += " " + typ
}
return s return s
} }
@ -67,7 +95,6 @@ func process(w io.Writer, path string) error {
) )
nl := true nl := true
fmt.Fprint(w, "// This code was auto-generated; DO NOT EDIT!\n\n")
defer fmt.Fprintln(w, ")") defer fmt.Fprintln(w, ")")
for { for {
line, err := r.ReadBytes('\n') line, err := r.ReadBytes('\n')
@ -136,8 +163,31 @@ func process(w io.Writer, path string) error {
} }
} }
func regConstTypes(str string, fnc func(*constType, string)) {
for _, s := range strings.Split(str, ",") {
kv := strings.Split(s, "=")
if len(kv) != 2 {
continue
}
st := strings.Split(kv[0], ":")
typ := "int"
if len(st) > 1 {
typ = st[1]
}
t := constType{Name: st[0], Type: typ}
fnc(&t, kv[1])
constTypes = append(constTypes, t)
}
}
func main() { func main() {
flag.Parse() flag.Parse()
if suf := *f_constSuf; suf != "" {
regConstTypes(suf, func(t *constType, v string) { t.Suffix = v })
}
if pref := *f_constPref; pref != "" {
regConstTypes(pref, func(t *constType, v string) { t.Prefix = v })
}
var w io.Writer = os.Stdout var w io.Writer = os.Stdout
if path := *f_out; path != "" && path != "-" { if path := *f_out; path != "" && path != "-" {
file, err := os.Create(path) file, err := os.Create(path)
@ -148,7 +198,11 @@ func main() {
w = file w = file
} }
fmt.Fprintf(w, "package %s\n", *f_pkg) fmt.Fprintf(w, "package %s\n\n", *f_pkg)
fmt.Fprint(w, "// This code was auto-generated; DO NOT EDIT!\n\n")
for _, t := range constTypes {
fmt.Fprintf(w, "type %s %s\n\n", t.Name, t.Type)
}
for _, path := range flag.Args() { for _, path := range flag.Args() {
if err := process(w, path); err != nil { if err := process(w, path); err != nil {
log.Fatal(err) log.Fatal(err)

View File

@ -1,4 +1,4 @@
package btrfs package btrfs
//go:generate go run ./cmd/hgen.go -u -g -t BTRFS_ -p btrfs -o btrfs_tree_hc.go btrfs_tree.h //go:generate go run ./cmd/hgen.go -u -g -t BTRFS_ -p btrfs -cs=treeKeyType:uint32=_KEY,objectID:uint64=_OBJECTID -cp=fileType=FT_,fileExtentType=FILE_EXTENT_,devReplaceItemState=DEV_REPLACE_ITEM_STATE_,blockGroup:uint64=BLOCK_GROUP_ -o btrfs_tree_hc.go btrfs_tree.h
//go:generate gofmt -l -w btrfs_tree_hc.go //go:generate gofmt -l -w btrfs_tree_hc.go

View File

@ -305,16 +305,27 @@ type btrfs_ioctl_balance_args struct {
const _BTRFS_INO_LOOKUP_PATH_MAX = 4080 const _BTRFS_INO_LOOKUP_PATH_MAX = 4080
type btrfs_ioctl_ino_lookup_args struct { type btrfs_ioctl_ino_lookup_args struct {
treeid uint64 treeid objectID
objectid uint64 objectid objectID
name [_BTRFS_INO_LOOKUP_PATH_MAX]byte name [_BTRFS_INO_LOOKUP_PATH_MAX]byte
} }
func (arg *btrfs_ioctl_ino_lookup_args) Name() string {
n := 0
for i, b := range arg.name {
if b == '\x00' {
n = i
break
}
}
return string(arg.name[:n])
}
type btrfs_ioctl_search_key struct { type btrfs_ioctl_search_key struct {
tree_id uint64 // which root are we searching. 0 is the tree of tree roots tree_id objectID // which root are we searching. 0 is the tree of tree roots
// keys returned will be >= min and <= max // keys returned will be >= min and <= max
min_objectid uint64 min_objectid objectID
max_objectid uint64 max_objectid objectID
// keys returned will be >= min and <= max // keys returned will be >= min and <= max
min_offset uint64 min_offset uint64
max_offset uint64 max_offset uint64
@ -322,8 +333,8 @@ type btrfs_ioctl_search_key struct {
min_transid uint64 min_transid uint64
max_transid uint64 max_transid uint64
// keys returned will be >= min and <= max // keys returned will be >= min and <= max
min_type uint32 min_type treeKeyType
max_type uint32 max_type treeKeyType
// how many items did userland ask for, and how many are we returning // how many items did userland ask for, and how many are we returning
nr_items uint32 nr_items uint32
_ [36]byte _ [36]byte
@ -331,9 +342,9 @@ type btrfs_ioctl_search_key struct {
type btrfs_ioctl_search_header struct { type btrfs_ioctl_search_header struct {
transid uint64 transid uint64
objectid uint64 objectid objectID
offset uint64 offset uint64
typ uint32 typ treeKeyType
len uint32 len uint32
} }
@ -538,8 +549,8 @@ const (
type btrfs_ioctl_send_args struct { type btrfs_ioctl_send_args struct {
send_fd int64 // in send_fd int64 // in
clone_sources_count uint64 // in clone_sources_count uint64 // in
clone_sources *uint64 // in clone_sources *objectID // in
parent_root uint64 // in parent_root objectID // in
flags uint64 // in flags uint64 // in
_ [4]uint64 // in _ [4]uint64 // in
} }
@ -679,8 +690,14 @@ func iocDefaultSubvol(f *os.File, out *uint64) error {
return ioctl.Do(f, _BTRFS_IOC_DEFAULT_SUBVOL, out) return ioctl.Do(f, _BTRFS_IOC_DEFAULT_SUBVOL, out)
} }
type spaceFlags uint64
func (f spaceFlags) BlockGroup() blockGroup {
return blockGroup(f) & _BTRFS_BLOCK_GROUP_MASK
}
type spaceInfo struct { type spaceInfo struct {
Flags uint64 Flags spaceFlags
TotalBytes uint64 TotalBytes uint64
UsedBytes uint64 UsedBytes uint64
} }
@ -715,7 +732,7 @@ func iocSpaceInfo(f *os.File) ([]spaceInfo, error) {
for i := 0; i < int(n); i++ { for i := 0; i < int(n); i++ {
info := (*btrfs_ioctl_space_info)(unsafe.Pointer(ptr)) info := (*btrfs_ioctl_space_info)(unsafe.Pointer(ptr))
out[i] = spaceInfo{ out[i] = spaceInfo{
Flags: info.flags, Flags: spaceFlags(info.flags),
TotalBytes: info.total_bytes, TotalBytes: info.total_bytes,
UsedBytes: info.used_bytes, UsedBytes: info.used_bytes,
} }

View File

@ -48,7 +48,7 @@ func Receive(r io.Reader, dstDir string) error {
} }
// We want to resolve the path to the subvolume we're sitting in // We want to resolve the path to the subvolume we're sitting in
// so that we can adjust the paths of any subvols we want to receive in. // so that we can adjust the paths of any subvols we want to receive in.
subvolID, err := getPathRootID(mnt) subvolID, err := getFileRootID(mnt)
if err != nil { if err != nil {
return err return err
} }

149
send.go
View File

@ -5,6 +5,7 @@ import (
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
"unsafe"
) )
func Send(w io.Writer, parent string, subvols ...string) error { func Send(w io.Writer, parent string, subvols ...string) error {
@ -23,20 +24,15 @@ func Send(w io.Writer, parent string, subvols ...string) error {
return err return err
} }
var ( var (
cloneSrc []uint64 cloneSrc []objectID
parentID uint64 parentID objectID
) )
if parent != "" { if parent != "" {
parent, err = filepath.Abs(parent) parent, err = filepath.Abs(parent)
if err != nil { if err != nil {
return err return err
} }
f, err := os.Open(parent) id, err := getPathRootID(parent)
if err != nil {
return fmt.Errorf("cannot open parent: %v", err)
}
id, err := getPathRootID(f)
f.Close()
if err != nil { if err != nil {
return fmt.Errorf("cannot get parent root id: %v", err) return fmt.Errorf("cannot get parent root id: %v", err)
} }
@ -53,7 +49,7 @@ func Send(w io.Writer, parent string, subvols ...string) error {
paths = append(paths, sub) paths = append(paths, sub)
mount, err := findMountRoot(sub) mount, err := findMountRoot(sub)
if err != nil { if err != nil {
return err return fmt.Errorf("cannot find mount root for %v: %v", sub, err)
} else if mount != mountRoot { } else if mount != mountRoot {
return fmt.Errorf("all subvolumes must be from the same filesystem (%s is not)", sub) return fmt.Errorf("all subvolumes must be from the same filesystem (%s is not)", sub)
} }
@ -64,14 +60,29 @@ func Send(w io.Writer, parent string, subvols ...string) error {
return fmt.Errorf("subvolume %s is not read-only", sub) return fmt.Errorf("subvolume %s is not read-only", sub)
} }
} }
//full := len(cloneSrc) == 0 mfs, err := Open(mountRoot, true)
if err != nil {
return err
}
defer mfs.Close()
full := len(cloneSrc) == 0
for i, sub := range paths { for i, sub := range paths {
//if len(cloneSrc) > 1 { var rootID objectID
// // TODO: find_good_parent if !full && parent != "" {
//} rel, err := filepath.Rel(mountRoot, sub)
//if !full { // TODO if err != nil {
// cloneSrc = append(cloneSrc, ) return err
//} }
si, err := subvolSearchByPath(mfs.f, rel)
if err != nil {
return fmt.Errorf("cannot find subvolume %s: %v", rel, err)
}
rootID = si.RootID
parentID, err = findGoodParent(mfs.f, rootID, cloneSrc)
if err != nil {
return fmt.Errorf("cannot find good parent for %v: %v", rel, err)
}
}
fs, err := Open(sub, true) fs, err := Open(sub, true)
if err != nil { if err != nil {
return err return err
@ -88,11 +99,14 @@ func Send(w io.Writer, parent string, subvols ...string) error {
if err != nil { if err != nil {
return fmt.Errorf("error sending %s: %v", sub, err) return fmt.Errorf("error sending %s: %v", sub, err)
} }
if !full && parent != "" {
cloneSrc = append(cloneSrc, rootID)
}
} }
return nil return nil
} }
func send(w io.Writer, subvol *os.File, parent uint64, sources []uint64, flags uint64) error { func send(w io.Writer, subvol *os.File, parent objectID, sources []objectID, flags uint64) error {
pr, pw, err := os.Pipe() pr, pw, err := os.Pipe()
if err != nil { if err != nil {
return err return err
@ -123,3 +137,104 @@ func send(w io.Writer, subvol *os.File, parent uint64, sources []uint64, flags u
} }
return wait() return wait()
} }
// readRootItem reads a root item from the tree.
//
// TODO(dennwc): support older kernels:
// In case we detect a root item smaller then sizeof(root_item),
// we know it's an old version of the root structure and initialize all new fields to zero.
// The same happens if we detect mismatching generation numbers as then we know the root was
// once mounted with an older kernel that was not aware of the root item structure change.
func readRootItem(mnt *os.File, rootID objectID) (*rootItem, error) {
sk := btrfs_ioctl_search_key{
tree_id: rootTreeObjectid,
// There may be more than one ROOT_ITEM key if there are
// snapshots pending deletion, we have to loop through them.
min_objectid: rootID,
max_objectid: rootID,
min_type: rootItemKey,
max_type: rootItemKey,
max_offset: maxUint64,
max_transid: maxUint64,
nr_items: 4096,
}
for ; sk.min_offset < maxUint64; sk.min_offset++ {
results, err := treeSearchRaw(mnt, sk)
if err != nil {
return nil, err
} else if len(results) == 0 {
break
}
for _, r := range results {
sk.min_objectid = r.ObjectID
sk.min_type = r.Type
sk.min_offset = r.Offset
if r.ObjectID > rootID {
break
}
if r.ObjectID == rootID && r.Type == rootItemKey {
const sz = int(unsafe.Sizeof(btrfs_root_item_raw{}))
if len(r.Data) > sz {
return nil, fmt.Errorf("btrfs_root_item is larger than expected; kernel is newer than the library")
} else if len(r.Data) < sz { // TODO
return nil, fmt.Errorf("btrfs_root_item is smaller then expected; kernel version is too old")
}
p := asRootItem(r.Data).Decode()
return &p, nil
}
}
results = nil
if sk.min_type != rootItemKey || sk.min_objectid != rootID {
break
}
}
return nil, ErrNotFound
}
func getParent(mnt *os.File, rootID objectID) (*subvolInfo, error) {
st, err := subvolSearchByRootID(mnt, rootID, "")
if err != nil {
return nil, err
}
return subvolSearchByUUID(mnt, st.ParentUUID)
}
func findGoodParent(mnt *os.File, rootID objectID, cloneSrc []objectID) (objectID, error) {
parent, err := getParent(mnt, rootID)
if err != nil {
return 0, fmt.Errorf("get parent failed: %v", err)
}
for _, id := range cloneSrc {
if id == parent.RootID {
return parent.RootID, nil
}
}
var (
bestParent *subvolInfo
bestDiff uint64 = maxUint64
)
for _, id := range cloneSrc {
parent2, err := getParent(mnt, id)
if err != nil {
return 0, err
}
if parent2.RootID != parent.RootID {
continue
}
parent2, err = subvolSearchByRootID(mnt, id, "")
if err != nil {
return 0, err
}
diff := parent2.CTransID - parent.CTransID
if diff < 0 {
diff = -diff
}
if diff < bestDiff {
bestParent, bestDiff = parent2, diff
}
}
if bestParent == nil {
return 0, ErrNotFound
}
return bestParent.RootID, nil
}

View File

@ -19,7 +19,7 @@ func IsSubVolume(path string) (bool, error) {
if err := syscall.Stat(path, &st); err != nil { if err := syscall.Stat(path, &st); err != nil {
return false, &os.PathError{Op: "stat", Path: path, Err: err} return false, &os.PathError{Op: "stat", Path: path, Err: err}
} }
if st.Ino != firstFreeObjectid || if objectID(st.Ino) != firstFreeObjectid ||
st.Mode&syscall.S_IFMT != syscall.S_IFDIR { st.Mode&syscall.S_IFMT != syscall.S_IFDIR {
return false, nil return false, nil
} }
@ -164,11 +164,11 @@ func GetFlags(path string) (SubvolFlags, error) {
} }
type Subvolume struct { type Subvolume struct {
ObjectID uint64 ObjectID objectID
TransID uint64 TransID uint64
Name string Name string
RefTree uint64 RefTree uint64
DirID uint64 DirID objectID
Gen uint64 Gen uint64
OGen uint64 OGen uint64
Flags uint64 Flags uint64
@ -179,10 +179,10 @@ type Subvolume struct {
CTime time.Time CTime time.Time
} }
func listSubVolumes(f *os.File) (map[uint64]Subvolume, error) { func listSubVolumes(f *os.File) (map[objectID]Subvolume, error) {
sk := btrfs_ioctl_search_key{ sk := btrfs_ioctl_search_key{
// search in the tree of tree roots // search in the tree of tree roots
tree_id: 1, tree_id: rootTreeObjectid,
// Set the min and max to backref keys. The search will // Set the min and max to backref keys. The search will
// only send back this type of key now. // only send back this type of key now.
@ -199,7 +199,7 @@ func listSubVolumes(f *os.File) (map[uint64]Subvolume, error) {
nr_items: 4096, // just a big number, doesn't matter much nr_items: 4096, // just a big number, doesn't matter much
} }
m := make(map[uint64]Subvolume) m := make(map[objectID]Subvolume)
for { for {
out, err := treeSearchRaw(f, sk) out, err := treeSearchRaw(f, sk)
if err != nil { if err != nil {
@ -259,12 +259,17 @@ func listSubVolumes(f *os.File) (map[uint64]Subvolume, error) {
} }
type subvolInfo struct { type subvolInfo struct {
RootID uint64 RootID objectID
UUID UUID UUID UUID
ParentUUID UUID ParentUUID UUID
ReceivedUUID UUID ReceivedUUID UUID
CTime time.Time
OTime time.Time
STime time.Time
RTime time.Time
CTransID uint64 CTransID uint64
OTransID uint64 OTransID uint64
STransID uint64 STransID uint64
@ -278,7 +283,7 @@ func subvolSearchByUUID(mnt *os.File, uuid UUID) (*subvolInfo, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return subvolSearchByRootID(mnt, id) return subvolSearchByRootID(mnt, id, "")
} }
func subvolSearchByReceivedUUID(mnt *os.File, uuid UUID) (*subvolInfo, error) { func subvolSearchByReceivedUUID(mnt *os.File, uuid UUID) (*subvolInfo, error) {
@ -286,15 +291,95 @@ func subvolSearchByReceivedUUID(mnt *os.File, uuid UUID) (*subvolInfo, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return subvolSearchByRootID(mnt, id) return subvolSearchByRootID(mnt, id, "")
} }
func subvolSearchByPath(mnt *os.File, path string) (*subvolInfo, error) { func subvolSearchByPath(mnt *os.File, path string) (*subvolInfo, error) {
var id uint64 if !filepath.IsAbs(path) {
panic("not implemented") path = filepath.Join(mnt.Name(), path)
return subvolSearchByRootID(mnt, id) }
id, err := getPathRootID(path)
if err != nil {
return nil, err
}
return subvolSearchByRootID(mnt, id, path)
} }
func subvolSearchByRootID(mnt *os.File, rootID uint64) (*subvolInfo, error) { func subvolidResolve(mnt *os.File, subvolID objectID) (string, error) {
panic("not implemented") return subvolidResolveSub(mnt, "", subvolID)
}
func subvolidResolveSub(mnt *os.File, path string, subvolID objectID) (string, error) {
if subvolID == fsTreeObjectid {
return "", nil
}
sk := btrfs_ioctl_search_key{
tree_id: rootTreeObjectid,
min_objectid: subvolID,
max_objectid: subvolID,
min_type: rootBackrefKey,
max_type: rootBackrefKey,
max_offset: maxUint64,
max_transid: maxUint64,
nr_items: 1,
}
results, err := treeSearchRaw(mnt, sk)
if err != nil {
return "", err
} else if len(results) < 1 {
return "", ErrNotFound
}
res := results[0]
if objectID(res.Offset) != fsTreeObjectid {
spath, err := subvolidResolveSub(mnt, path, objectID(res.Offset))
if err != nil {
return "", err
}
path = spath + "/"
}
backRef := asRootRef(res.Data)
if backRef.DirID != firstFreeObjectid {
arg := btrfs_ioctl_ino_lookup_args{
treeid: objectID(res.Offset),
objectid: backRef.DirID,
}
if err := iocInoLookup(mnt, &arg); err != nil {
return "", err
}
path += arg.Name()
}
return path + backRef.Name, nil
}
// subvolSearchByRootID
//
// Path is optional, and will be resolved automatically if not set.
func subvolSearchByRootID(mnt *os.File, rootID objectID, path string) (*subvolInfo, error) {
robj, err := readRootItem(mnt, rootID)
if err != nil {
return nil, err
}
info := &subvolInfo{
RootID: rootID,
UUID: robj.UUID,
ReceivedUUID: robj.ReceivedUUID,
ParentUUID: robj.ParentUUID,
CTime: robj.CTime,
OTime: robj.OTime,
STime: robj.STime,
RTime: robj.RTime,
CTransID: robj.CTransID,
OTransID: robj.OTransID,
STransID: robj.STransID,
RTransID: robj.RTransID,
Path: path,
}
if path == "" {
info.Path, err = subvolidResolve(mnt, info.RootID)
}
return info, err
} }

View File

@ -6,8 +6,8 @@ import (
"syscall" "syscall"
) )
func cmpChunkBlockGroup(f1, f2 uint64) int { func cmpChunkBlockGroup(f1, f2 blockGroup) int {
var mask uint64 var mask blockGroup
if (f1 & _BTRFS_BLOCK_GROUP_TYPE_MASK) == if (f1 & _BTRFS_BLOCK_GROUP_TYPE_MASK) ==
(f2 & _BTRFS_BLOCK_GROUP_TYPE_MASK) { (f2 & _BTRFS_BLOCK_GROUP_TYPE_MASK) {
@ -34,7 +34,7 @@ type spaceInfoByBlockGroup []spaceInfo
func (a spaceInfoByBlockGroup) Len() int { return len(a) } func (a spaceInfoByBlockGroup) Len() int { return len(a) }
func (a spaceInfoByBlockGroup) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a spaceInfoByBlockGroup) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a spaceInfoByBlockGroup) Less(i, j int) bool { func (a spaceInfoByBlockGroup) Less(i, j int) bool {
return cmpChunkBlockGroup(a[i].Flags, a[j].Flags) < 0 return cmpChunkBlockGroup(blockGroup(a[i].Flags), blockGroup(a[j].Flags)) < 0
} }
type UsageInfo struct { type UsageInfo struct {
@ -93,41 +93,42 @@ func spaceUsage(f *os.File) (UsageInfo, error) {
) )
for _, s := range spaces { for _, s := range spaces {
ratio := 1 ratio := 1
bg := s.Flags.BlockGroup()
switch { switch {
case s.Flags&blockGroupRaid0 != 0: case bg&blockGroupRaid0 != 0:
ratio = 1 ratio = 1
case s.Flags&blockGroupRaid1 != 0: case bg&blockGroupRaid1 != 0:
ratio = 2 ratio = 2
case s.Flags&blockGroupRaid5 != 0: case bg&blockGroupRaid5 != 0:
ratio = 0 ratio = 0
case s.Flags&blockGroupRaid6 != 0: case bg&blockGroupRaid6 != 0:
ratio = 0 ratio = 0
case s.Flags&blockGroupDup != 0: case bg&blockGroupDup != 0:
ratio = 2 ratio = 2
case s.Flags&blockGroupRaid10 != 0: case bg&blockGroupRaid10 != 0:
ratio = 2 ratio = 2
} }
if ratio > maxDataRatio { if ratio > maxDataRatio {
maxDataRatio = ratio maxDataRatio = ratio
} }
if s.Flags&spaceInfoGlobalRsv != 0 { if bg&spaceInfoGlobalRsv != 0 {
u.GlobalReserve = s.TotalBytes u.GlobalReserve = s.TotalBytes
u.GlobalReserveUsed = s.UsedBytes u.GlobalReserveUsed = s.UsedBytes
} }
if s.Flags&(blockGroupData|blockGroupMetadata) == (blockGroupData | blockGroupMetadata) { if bg&(blockGroupData|blockGroupMetadata) == (blockGroupData | blockGroupMetadata) {
mixed = true mixed = true
} }
if s.Flags&blockGroupData != 0 { if bg&blockGroupData != 0 {
u.RawDataUsed += s.UsedBytes * uint64(ratio) u.RawDataUsed += s.UsedBytes * uint64(ratio)
u.RawDataChunks += s.TotalBytes * uint64(ratio) u.RawDataChunks += s.TotalBytes * uint64(ratio)
u.LogicalDataChunks += s.TotalBytes u.LogicalDataChunks += s.TotalBytes
} }
if s.Flags&blockGroupMetadata != 0 { if bg&blockGroupMetadata != 0 {
u.RawMetaUsed += s.UsedBytes * uint64(ratio) u.RawMetaUsed += s.UsedBytes * uint64(ratio)
u.RawMetaChunks += s.TotalBytes * uint64(ratio) u.RawMetaChunks += s.TotalBytes * uint64(ratio)
u.LogicalMetaChunks += s.TotalBytes u.LogicalMetaChunks += s.TotalBytes
} }
if s.Flags&blockGroupSystem != 0 { if bg&blockGroupSystem != 0 {
u.SystemUsed += s.UsedBytes * uint64(ratio) u.SystemUsed += s.UsedBytes * uint64(ratio)
u.SystemChunks += s.TotalBytes * uint64(ratio) u.SystemChunks += s.TotalBytes * uint64(ratio)
} }

View File

@ -66,8 +66,8 @@ func openDir(path string) (*os.File, error) {
type searchResult struct { type searchResult struct {
TransID uint64 TransID uint64
ObjectID uint64 ObjectID objectID
Type uint32 Type treeKeyType
Offset uint64 Offset uint64
Data []byte Data []byte
} }

View File

@ -6,23 +6,23 @@ import (
"os" "os"
) )
func lookupUUIDSubvolItem(f *os.File, uuid UUID) (uint64, error) { func lookupUUIDSubvolItem(f *os.File, uuid UUID) (objectID, error) {
return uuidTreeLookupAny(f, uuid, uuidKeySubvol) return uuidTreeLookupAny(f, uuid, uuidKeySubvol)
} }
func lookupUUIDReceivedSubvolItem(f *os.File, uuid UUID) (uint64, error) { func lookupUUIDReceivedSubvolItem(f *os.File, uuid UUID) (objectID, error) {
return uuidTreeLookupAny(f, uuid, uuidKeyReceivedSubvol) return uuidTreeLookupAny(f, uuid, uuidKeyReceivedSubvol)
} }
func (id UUID) toKey() (objID, off uint64) { func (id UUID) toKey() (objID objectID, off uint64) {
objID = binary.LittleEndian.Uint64(id[:8]) objID = objectID(binary.LittleEndian.Uint64(id[:8]))
off = binary.LittleEndian.Uint64(id[8:16]) off = binary.LittleEndian.Uint64(id[8:16])
return return
} }
// uuidTreeLookupAny searches uuid tree for a given uuid in specified field. // uuidTreeLookupAny searches uuid tree for a given uuid in specified field.
// It returns ErrNotFound if object was not found. // It returns ErrNotFound if object was not found.
func uuidTreeLookupAny(f *os.File, uuid UUID, typ uint32) (uint64, error) { func uuidTreeLookupAny(f *os.File, uuid UUID, typ treeKeyType) (objectID, error) {
objId, off := uuid.toKey() objId, off := uuid.toKey()
args := btrfs_ioctl_search_key{ args := btrfs_ioctl_search_key{
tree_id: uuidTreeObjectid, tree_id: uuidTreeObjectid,
@ -45,5 +45,5 @@ func uuidTreeLookupAny(f *os.File, uuid UUID, typ uint32) (uint64, error) {
if len(out.Data) != 8 { if len(out.Data) != 8 {
return 0, fmt.Errorf("btrfs: uuid item with illegal size %d", len(out.Data)) return 0, fmt.Errorf("btrfs: uuid item with illegal size %d", len(out.Data))
} }
return binary.LittleEndian.Uint64(out.Data), nil return objectID(binary.LittleEndian.Uint64(out.Data)), nil
} }