From 989489096eedbe48957651cac7da0c6d8e7009de Mon Sep 17 00:00:00 2001 From: Will Rouesnel Date: Fri, 23 Feb 2018 01:55:49 +1100 Subject: [PATCH] Refactor repository layout and convert build system to Mage. This commit implements a massive refactor of the repository, and moves the build system over to use Mage (magefile.org) which should allow seamless building across multiple platforms. --- .gitignore | 22 +- .travis.yml | 38 +- README.md | 22 +- .../postgres_exporter/pg_setting.go | 0 .../postgres_exporter/pg_setting_test.go | 0 .../postgres_exporter/postgres_exporter.go | 0 .../postgres_exporter_integration_test.go | 0 .../postgres_exporter_test.go | 0 .../docker-postgres-replication/Dockerfile | 2 +- .../docker-postgres-replication/Dockerfile.p2 | 0 .../docker-postgres-replication/README.md | 0 .../docker-compose.yml | 0 .../docker-entrypoint.sh | 0 .../setup-replication.sh | 0 .../postgres_exporter/tests}/test-smoke | 10 +- .../postgres_exporter/tests}/username_file | 0 .../postgres_exporter/tests}/userpass_file | 0 mage.go | 11 + magefile.go | 736 +++++++ postgres_exporter_integration_test_script | 3 + tools/Makefile | 67 - tools/src | 2 +- .../github.com/GoASTScanner/gas/README.md | 39 +- .../github.com/GoASTScanner/gas/analyzer.go | 197 ++ .../GoASTScanner/gas/{core => }/call_list.go | 41 +- .../gas/{ => cmd/gas}/filelist.go | 0 .../GoASTScanner/gas/cmd/gas/main.go | 254 +++ .../GoASTScanner/gas/cmd/gas/sort_issues.go | 20 + .../github.com/GoASTScanner/gas/config.go | 88 + .../GoASTScanner/gas/core/analyzer.go | 235 --- .../GoASTScanner/gas/core/select.go | 404 ---- .../GoASTScanner/gas/{core => }/helpers.go | 53 +- .../GoASTScanner/gas/import_tracker.go | 67 + .../GoASTScanner/gas/{core => }/issue.go | 26 +- .../github.com/GoASTScanner/gas/main.go | 293 --- .../GoASTScanner/gas/output/formatter.go | 74 +- .../gas/output/junit_xml_format.go | 74 + .../GoASTScanner/gas/{core => }/resolve.go | 3 +- .../github.com/GoASTScanner/gas/rule.go | 58 + .../github.com/GoASTScanner/gas/rulelist.go | 91 - .../github.com/GoASTScanner/gas/rules/big.go | 13 +- .../github.com/GoASTScanner/gas/rules/bind.go | 33 +- .../GoASTScanner/gas/rules/blacklist.go | 79 +- .../GoASTScanner/gas/rules/errors.go | 20 +- .../GoASTScanner/gas/rules/fileperms.go | 22 +- .../gas/rules/hardcoded_credentials.go | 24 +- .../github.com/GoASTScanner/gas/rules/rand.go | 11 +- .../github.com/GoASTScanner/gas/rules/rsa.go | 26 +- .../GoASTScanner/gas/rules/rulelist.go | 102 + .../github.com/GoASTScanner/gas/rules/sql.go | 74 +- .../github.com/GoASTScanner/gas/rules/ssh.go | 33 + .../GoASTScanner/gas/rules/subproc.go | 52 +- .../GoASTScanner/gas/rules/tempfiles.go | 24 +- .../GoASTScanner/gas/rules/templates.go | 25 +- .../github.com/GoASTScanner/gas/rules/tls.go | 124 +- .../GoASTScanner/gas/rules/tls_config.go | 132 ++ .../GoASTScanner/gas/rules/unsafe.go | 13 +- .../GoASTScanner/gas/rules/weakcrypto.go | 12 +- .../github.com/GoASTScanner/gas/tools.go | 276 --- .../github.com/GoASTScanner/gas/vendor.conf | 7 - .../alecthomas/gometalinter/README.md | 54 +- .../alecthomas/gometalinter/aggregate.go | 2 +- .../alecthomas/gometalinter/checkstyle.go | 7 +- .../alecthomas/gometalinter/config.go | 55 + .../alecthomas/gometalinter/directives.go | 16 +- .../alecthomas/gometalinter/execute.go | 39 +- .../alecthomas/gometalinter/issue.go | 68 +- .../alecthomas/gometalinter/linters.go | 2 +- .../alecthomas/gometalinter/main.go | 77 +- .../github.com/golang/lint/golint/golint.go | 2 +- .../github.com/golang/lint/golint/import.go | 7 +- .../vendor/github.com/kisielk/errcheck/go.mod | 6 + .../github.com/mattn/goveralls/README.md | 26 + .../github.com/mattn/goveralls/goveralls.go | 8 + .../opennota/check/cmd/varcheck/varcheck.go | 10 +- .../github.com/stripe/safesql/README.md | 9 +- .../github.com/stripe/safesql/safesql.go | 109 +- .../tmthrgd/go-bindata/CONTRIBUTING.md | 79 + .../github.com/tmthrgd/go-bindata/LICENSE | 54 + .../github.com/tmthrgd/go-bindata/README.md | 189 ++ .../tmthrgd/go-bindata/base32_compat.go | 22 + .../tmthrgd/go-bindata/base32_go19.go | 11 + .../github.com/tmthrgd/go-bindata/buffers.go | 49 + .../github.com/tmthrgd/go-bindata/common.go | 51 + .../github.com/tmthrgd/go-bindata/config.go | 209 ++ .../tmthrgd/go-bindata/corpus-sha256sums | 68 + .../tmthrgd/go-bindata/corpus_test_travis.sh | 7 + .../github.com/tmthrgd/go-bindata/debug.go | 84 + .../github.com/tmthrgd/go-bindata/doc.go | 128 ++ .../github.com/tmthrgd/go-bindata/files.go | 130 ++ .../github.com/tmthrgd/go-bindata/format.go | 47 + .../github.com/tmthrgd/go-bindata/generate.go | 76 + .../go-bindata/go-bindata/appendRegexValue.go | 44 + .../tmthrgd/go-bindata/go-bindata/main.go | 178 ++ .../github.com/tmthrgd/go-bindata/header.go | 73 + .../internal/identifier/identifier.go | 31 + .../github.com/tmthrgd/go-bindata/name.go | 61 + .../github.com/tmthrgd/go-bindata/release.go | 328 ++++ .../tmthrgd/go-bindata/stringwriter.go | 71 + .../github.com/tmthrgd/go-bindata/tree.go | 98 + .../vendor/golang.org/x/lint/CONTRIBUTING.md | 15 + tools/vendor/golang.org/x/lint/LICENSE | 27 + tools/vendor/golang.org/x/lint/README.md | 85 + tools/vendor/golang.org/x/lint/lint.go | 1708 +++++++++++++++++ .../x/tools/go/callgraph/rta/rta.go | 459 +++++ tools/vendor/mvdan.cc/interfacer/README.md | 2 +- tools/vendor/mvdan.cc/interfacer/main.go | 8 +- tools/vendor/mvdan.cc/unparam/check/check.go | 175 +- tools/vendor/mvdan.cc/unparam/main.go | 12 +- tools/vendor/vendor.json | 142 +- vendor/github.com/dsnet/compress/LICENSE.md | 24 + vendor/github.com/dsnet/compress/README.md | 75 + vendor/github.com/dsnet/compress/api.go | 74 + vendor/github.com/dsnet/compress/bzip2/bwt.go | 110 ++ .../github.com/dsnet/compress/bzip2/common.go | 110 ++ .../dsnet/compress/bzip2/fuzz_off.go | 13 + .../dsnet/compress/bzip2/fuzz_on.go | 77 + .../compress/bzip2/internal/sais/common.go | 28 + .../compress/bzip2/internal/sais/sais_byte.go | 661 +++++++ .../compress/bzip2/internal/sais/sais_int.go | 661 +++++++ .../dsnet/compress/bzip2/mtf_rle2.go | 131 ++ .../github.com/dsnet/compress/bzip2/prefix.go | 374 ++++ .../github.com/dsnet/compress/bzip2/reader.go | 274 +++ .../github.com/dsnet/compress/bzip2/rle1.go | 101 + .../github.com/dsnet/compress/bzip2/writer.go | 307 +++ .../dsnet/compress/internal/common.go | 107 ++ .../dsnet/compress/internal/debug.go | 12 + .../dsnet/compress/internal/errors/errors.go | 120 ++ .../dsnet/compress/internal/gofuzz.go | 12 + .../dsnet/compress/internal/prefix/debug.go | 159 ++ .../dsnet/compress/internal/prefix/decoder.go | 136 ++ .../dsnet/compress/internal/prefix/encoder.go | 66 + .../dsnet/compress/internal/prefix/prefix.go | 400 ++++ .../dsnet/compress/internal/prefix/range.go | 93 + .../dsnet/compress/internal/prefix/reader.go | 335 ++++ .../dsnet/compress/internal/prefix/wrap.go | 146 ++ .../dsnet/compress/internal/prefix/writer.go | 166 ++ .../dsnet/compress/internal/release.go | 21 + vendor/github.com/dsnet/compress/zbench.sh | 12 + vendor/github.com/dsnet/compress/zfuzz.sh | 10 + vendor/github.com/dsnet/compress/zprof.sh | 54 + vendor/github.com/dsnet/compress/ztest.sh | 50 + vendor/github.com/golang/snappy/AUTHORS | 15 + vendor/github.com/golang/snappy/CONTRIBUTORS | 37 + vendor/github.com/golang/snappy/LICENSE | 27 + vendor/github.com/golang/snappy/README | 107 ++ vendor/github.com/golang/snappy/decode.go | 237 +++ .../github.com/golang/snappy/decode_amd64.go | 14 + .../github.com/golang/snappy/decode_amd64.s | 490 +++++ .../github.com/golang/snappy/decode_other.go | 101 + vendor/github.com/golang/snappy/encode.go | 285 +++ .../github.com/golang/snappy/encode_amd64.go | 29 + .../github.com/golang/snappy/encode_amd64.s | 730 +++++++ .../github.com/golang/snappy/encode_other.go | 238 +++ vendor/github.com/golang/snappy/snappy.go | 87 + .../github.com/magefile/mage/CONTRIBUTING.md | 42 + vendor/github.com/magefile/mage/Gopkg.lock | 9 + vendor/github.com/magefile/mage/Gopkg.toml | 22 + vendor/github.com/magefile/mage/LICENSE | 201 ++ vendor/github.com/magefile/mage/README.md | 61 + vendor/github.com/magefile/mage/bootstrap.go | 19 + .../github.com/magefile/mage/build/build.go | 1655 ++++++++++++++++ vendor/github.com/magefile/mage/build/doc.go | 166 ++ vendor/github.com/magefile/mage/build/read.go | 247 +++ .../github.com/magefile/mage/build/syslist.go | 8 + vendor/github.com/magefile/mage/build/zcgo.go | 37 + .../magefile/mage/mage/command_string.go | 16 + .../magefile/mage/mage/magefile_tmpl.go | 46 + vendor/github.com/magefile/mage/mage/main.go | 459 +++++ .../github.com/magefile/mage/mage/template.go | 202 ++ vendor/github.com/magefile/mage/magefile.go | 94 + vendor/github.com/magefile/mage/main.go | 11 + vendor/github.com/magefile/mage/mg/deps.go | 166 ++ vendor/github.com/magefile/mage/mg/errors.go | 51 + vendor/github.com/magefile/mage/mg/runtime.go | 36 + .../magefile/mage/parse/import_go1.9.go | 13 + .../magefile/mage/parse/import_not_go1.9.go | 15 + .../github.com/magefile/mage/parse/parse.go | 341 ++++ .../magefile/mage/parse/srcimporter/sizes.go | 40 + .../mage/parse/srcimporter/srcimporter.go | 213 ++ vendor/github.com/magefile/mage/sh/cmd.go | 165 ++ vendor/github.com/magefile/mage/sh/helpers.go | 16 + .../github.com/magefile/mage/target/target.go | 122 ++ .../github.com/magefile/mage/types/funcs.go | 58 + vendor/github.com/mholt/archiver/LICENSE | 21 + vendor/github.com/mholt/archiver/README.md | 83 + vendor/github.com/mholt/archiver/appveyor.yml | 32 + vendor/github.com/mholt/archiver/archiver.go | 107 ++ vendor/github.com/mholt/archiver/build.bash | 21 + vendor/github.com/mholt/archiver/rar.go | 109 ++ vendor/github.com/mholt/archiver/tar.go | 234 +++ vendor/github.com/mholt/archiver/tarbz2.go | 106 + vendor/github.com/mholt/archiver/targz.go | 98 + vendor/github.com/mholt/archiver/tarlz4.go | 92 + vendor/github.com/mholt/archiver/tarsz.go | 92 + vendor/github.com/mholt/archiver/tarxz.go | 105 + vendor/github.com/mholt/archiver/zip.go | 233 +++ vendor/github.com/nwaples/rardecode/LICENSE | 23 + vendor/github.com/nwaples/rardecode/README.md | 4 + .../github.com/nwaples/rardecode/archive.go | 306 +++ .../github.com/nwaples/rardecode/archive15.go | 468 +++++ .../github.com/nwaples/rardecode/archive50.go | 475 +++++ .../nwaples/rardecode/bit_reader.go | 119 ++ .../github.com/nwaples/rardecode/decode29.go | 264 +++ .../nwaples/rardecode/decode29_lz.go | 247 +++ .../nwaples/rardecode/decode29_ppm.go | 132 ++ .../github.com/nwaples/rardecode/decode50.go | 294 +++ .../nwaples/rardecode/decode_reader.go | 290 +++ .../nwaples/rardecode/decrypt_reader.go | 126 ++ .../github.com/nwaples/rardecode/filters.go | 416 ++++ .../github.com/nwaples/rardecode/huffman.go | 208 ++ .../github.com/nwaples/rardecode/ppm_model.go | 1096 +++++++++++ vendor/github.com/nwaples/rardecode/reader.go | 369 ++++ vendor/github.com/nwaples/rardecode/vm.go | 687 +++++++ vendor/github.com/pierrec/lz4/LICENSE | 28 + vendor/github.com/pierrec/lz4/README.md | 31 + vendor/github.com/pierrec/lz4/block.go | 454 +++++ vendor/github.com/pierrec/lz4/lz4.go | 105 + vendor/github.com/pierrec/lz4/reader.go | 364 ++++ vendor/github.com/pierrec/lz4/writer.go | 377 ++++ vendor/github.com/pierrec/xxHash/LICENSE | 28 + .../pierrec/xxHash/xxHash32/xxHash32.go | 212 ++ vendor/github.com/tmthrgd/go-bindata/LICENSE | 54 + .../tmthrgd/go-bindata/restore/restore.go | 57 + vendor/github.com/ulikunitz/xz/LICENSE | 26 + vendor/github.com/ulikunitz/xz/README.md | 71 + vendor/github.com/ulikunitz/xz/TODO.md | 315 +++ vendor/github.com/ulikunitz/xz/bits.go | 74 + vendor/github.com/ulikunitz/xz/crc.go | 54 + vendor/github.com/ulikunitz/xz/format.go | 728 +++++++ vendor/github.com/ulikunitz/xz/fox.xz | Bin 0 -> 104 bytes .../ulikunitz/xz/internal/hash/cyclic_poly.go | 181 ++ .../ulikunitz/xz/internal/hash/doc.go | 14 + .../ulikunitz/xz/internal/hash/rabin_karp.go | 66 + .../ulikunitz/xz/internal/hash/roller.go | 29 + .../ulikunitz/xz/internal/xlog/xlog.go | 457 +++++ .../github.com/ulikunitz/xz/lzma/bintree.go | 523 +++++ vendor/github.com/ulikunitz/xz/lzma/bitops.go | 45 + .../github.com/ulikunitz/xz/lzma/breader.go | 39 + vendor/github.com/ulikunitz/xz/lzma/buffer.go | 171 ++ .../ulikunitz/xz/lzma/bytewriter.go | 37 + .../github.com/ulikunitz/xz/lzma/decoder.go | 277 +++ .../ulikunitz/xz/lzma/decoderdict.go | 135 ++ .../ulikunitz/xz/lzma/directcodec.go | 49 + .../github.com/ulikunitz/xz/lzma/distcodec.go | 156 ++ .../github.com/ulikunitz/xz/lzma/encoder.go | 268 +++ .../ulikunitz/xz/lzma/encoderdict.go | 149 ++ vendor/github.com/ulikunitz/xz/lzma/fox.lzma | Bin 0 -> 67 bytes .../github.com/ulikunitz/xz/lzma/hashtable.go | 309 +++ vendor/github.com/ulikunitz/xz/lzma/header.go | 167 ++ .../github.com/ulikunitz/xz/lzma/header2.go | 398 ++++ .../ulikunitz/xz/lzma/lengthcodec.go | 129 ++ .../ulikunitz/xz/lzma/literalcodec.go | 132 ++ .../ulikunitz/xz/lzma/matchalgorithm.go | 52 + .../github.com/ulikunitz/xz/lzma/operation.go | 80 + vendor/github.com/ulikunitz/xz/lzma/prob.go | 53 + .../ulikunitz/xz/lzma/properties.go | 69 + .../ulikunitz/xz/lzma/rangecodec.go | 248 +++ vendor/github.com/ulikunitz/xz/lzma/reader.go | 100 + .../github.com/ulikunitz/xz/lzma/reader2.go | 232 +++ vendor/github.com/ulikunitz/xz/lzma/state.go | 151 ++ .../ulikunitz/xz/lzma/treecodecs.go | 133 ++ vendor/github.com/ulikunitz/xz/lzma/writer.go | 209 ++ .../github.com/ulikunitz/xz/lzma/writer2.go | 305 +++ vendor/github.com/ulikunitz/xz/lzmafilter.go | 117 ++ vendor/github.com/ulikunitz/xz/make-docs | 5 + vendor/github.com/ulikunitz/xz/reader.go | 373 ++++ vendor/github.com/ulikunitz/xz/writer.go | 386 ++++ vendor/vendor.json | 150 ++ 269 files changed, 35309 insertions(+), 2017 deletions(-) rename pg_setting.go => cmd/postgres_exporter/pg_setting.go (100%) rename pg_setting_test.go => cmd/postgres_exporter/pg_setting_test.go (100%) rename postgres_exporter.go => cmd/postgres_exporter/postgres_exporter.go (100%) rename postgres_exporter_integration_test.go => cmd/postgres_exporter/postgres_exporter_integration_test.go (100%) rename postgres_exporter_test.go => cmd/postgres_exporter/postgres_exporter_test.go (100%) rename {tests => cmd/postgres_exporter/tests}/docker-postgres-replication/Dockerfile (94%) rename {tests => cmd/postgres_exporter/tests}/docker-postgres-replication/Dockerfile.p2 (100%) rename {tests => cmd/postgres_exporter/tests}/docker-postgres-replication/README.md (100%) rename {tests => cmd/postgres_exporter/tests}/docker-postgres-replication/docker-compose.yml (100%) rename {tests => cmd/postgres_exporter/tests}/docker-postgres-replication/docker-entrypoint.sh (100%) rename {tests => cmd/postgres_exporter/tests}/docker-postgres-replication/setup-replication.sh (100%) rename {tests => cmd/postgres_exporter/tests}/test-smoke (95%) rename {tests => cmd/postgres_exporter/tests}/username_file (100%) rename {tests => cmd/postgres_exporter/tests}/userpass_file (100%) create mode 100644 mage.go create mode 100644 magefile.go delete mode 100644 tools/Makefile create mode 100644 tools/vendor/github.com/GoASTScanner/gas/analyzer.go rename tools/vendor/github.com/GoASTScanner/gas/{core => }/call_list.go (62%) rename tools/vendor/github.com/GoASTScanner/gas/{ => cmd/gas}/filelist.go (100%) create mode 100644 tools/vendor/github.com/GoASTScanner/gas/cmd/gas/main.go create mode 100644 tools/vendor/github.com/GoASTScanner/gas/cmd/gas/sort_issues.go create mode 100644 tools/vendor/github.com/GoASTScanner/gas/config.go delete mode 100644 tools/vendor/github.com/GoASTScanner/gas/core/analyzer.go delete mode 100644 tools/vendor/github.com/GoASTScanner/gas/core/select.go rename tools/vendor/github.com/GoASTScanner/gas/{core => }/helpers.go (77%) create mode 100644 tools/vendor/github.com/GoASTScanner/gas/import_tracker.go rename tools/vendor/github.com/GoASTScanner/gas/{core => }/issue.go (85%) delete mode 100644 tools/vendor/github.com/GoASTScanner/gas/main.go create mode 100644 tools/vendor/github.com/GoASTScanner/gas/output/junit_xml_format.go rename tools/vendor/github.com/GoASTScanner/gas/{core => }/resolve.go (99%) create mode 100644 tools/vendor/github.com/GoASTScanner/gas/rule.go delete mode 100644 tools/vendor/github.com/GoASTScanner/gas/rulelist.go create mode 100644 tools/vendor/github.com/GoASTScanner/gas/rules/rulelist.go create mode 100644 tools/vendor/github.com/GoASTScanner/gas/rules/ssh.go create mode 100644 tools/vendor/github.com/GoASTScanner/gas/rules/tls_config.go delete mode 100644 tools/vendor/github.com/GoASTScanner/gas/tools.go delete mode 100644 tools/vendor/github.com/GoASTScanner/gas/vendor.conf create mode 100644 tools/vendor/github.com/kisielk/errcheck/go.mod create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/CONTRIBUTING.md create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/LICENSE create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/README.md create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/base32_compat.go create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/base32_go19.go create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/buffers.go create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/common.go create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/config.go create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/corpus-sha256sums create mode 100755 tools/vendor/github.com/tmthrgd/go-bindata/corpus_test_travis.sh create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/debug.go create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/doc.go create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/files.go create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/format.go create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/generate.go create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/go-bindata/appendRegexValue.go create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/go-bindata/main.go create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/header.go create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/internal/identifier/identifier.go create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/name.go create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/release.go create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/stringwriter.go create mode 100644 tools/vendor/github.com/tmthrgd/go-bindata/tree.go create mode 100644 tools/vendor/golang.org/x/lint/CONTRIBUTING.md create mode 100644 tools/vendor/golang.org/x/lint/LICENSE create mode 100644 tools/vendor/golang.org/x/lint/README.md create mode 100644 tools/vendor/golang.org/x/lint/lint.go create mode 100644 tools/vendor/golang.org/x/tools/go/callgraph/rta/rta.go create mode 100644 vendor/github.com/dsnet/compress/LICENSE.md create mode 100644 vendor/github.com/dsnet/compress/README.md create mode 100644 vendor/github.com/dsnet/compress/api.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/bwt.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/common.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/fuzz_off.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/fuzz_on.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/internal/sais/common.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_byte.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_int.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/mtf_rle2.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/prefix.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/reader.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/rle1.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/writer.go create mode 100644 vendor/github.com/dsnet/compress/internal/common.go create mode 100644 vendor/github.com/dsnet/compress/internal/debug.go create mode 100644 vendor/github.com/dsnet/compress/internal/errors/errors.go create mode 100644 vendor/github.com/dsnet/compress/internal/gofuzz.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/debug.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/decoder.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/encoder.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/prefix.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/range.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/reader.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/wrap.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/writer.go create mode 100644 vendor/github.com/dsnet/compress/internal/release.go create mode 100755 vendor/github.com/dsnet/compress/zbench.sh create mode 100755 vendor/github.com/dsnet/compress/zfuzz.sh create mode 100755 vendor/github.com/dsnet/compress/zprof.sh create mode 100755 vendor/github.com/dsnet/compress/ztest.sh create mode 100644 vendor/github.com/golang/snappy/AUTHORS create mode 100644 vendor/github.com/golang/snappy/CONTRIBUTORS create mode 100644 vendor/github.com/golang/snappy/LICENSE create mode 100644 vendor/github.com/golang/snappy/README create mode 100644 vendor/github.com/golang/snappy/decode.go create mode 100644 vendor/github.com/golang/snappy/decode_amd64.go create mode 100644 vendor/github.com/golang/snappy/decode_amd64.s create mode 100644 vendor/github.com/golang/snappy/decode_other.go create mode 100644 vendor/github.com/golang/snappy/encode.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.s create mode 100644 vendor/github.com/golang/snappy/encode_other.go create mode 100644 vendor/github.com/golang/snappy/snappy.go create mode 100644 vendor/github.com/magefile/mage/CONTRIBUTING.md create mode 100644 vendor/github.com/magefile/mage/Gopkg.lock create mode 100644 vendor/github.com/magefile/mage/Gopkg.toml create mode 100644 vendor/github.com/magefile/mage/LICENSE create mode 100644 vendor/github.com/magefile/mage/README.md create mode 100644 vendor/github.com/magefile/mage/bootstrap.go create mode 100644 vendor/github.com/magefile/mage/build/build.go create mode 100644 vendor/github.com/magefile/mage/build/doc.go create mode 100644 vendor/github.com/magefile/mage/build/read.go create mode 100644 vendor/github.com/magefile/mage/build/syslist.go create mode 100644 vendor/github.com/magefile/mage/build/zcgo.go create mode 100644 vendor/github.com/magefile/mage/mage/command_string.go create mode 100644 vendor/github.com/magefile/mage/mage/magefile_tmpl.go create mode 100644 vendor/github.com/magefile/mage/mage/main.go create mode 100644 vendor/github.com/magefile/mage/mage/template.go create mode 100644 vendor/github.com/magefile/mage/magefile.go create mode 100644 vendor/github.com/magefile/mage/main.go create mode 100644 vendor/github.com/magefile/mage/mg/deps.go create mode 100644 vendor/github.com/magefile/mage/mg/errors.go create mode 100644 vendor/github.com/magefile/mage/mg/runtime.go create mode 100644 vendor/github.com/magefile/mage/parse/import_go1.9.go create mode 100644 vendor/github.com/magefile/mage/parse/import_not_go1.9.go create mode 100644 vendor/github.com/magefile/mage/parse/parse.go create mode 100644 vendor/github.com/magefile/mage/parse/srcimporter/sizes.go create mode 100644 vendor/github.com/magefile/mage/parse/srcimporter/srcimporter.go create mode 100644 vendor/github.com/magefile/mage/sh/cmd.go create mode 100644 vendor/github.com/magefile/mage/sh/helpers.go create mode 100644 vendor/github.com/magefile/mage/target/target.go create mode 100644 vendor/github.com/magefile/mage/types/funcs.go create mode 100644 vendor/github.com/mholt/archiver/LICENSE create mode 100644 vendor/github.com/mholt/archiver/README.md create mode 100644 vendor/github.com/mholt/archiver/appveyor.yml create mode 100644 vendor/github.com/mholt/archiver/archiver.go create mode 100755 vendor/github.com/mholt/archiver/build.bash create mode 100644 vendor/github.com/mholt/archiver/rar.go create mode 100644 vendor/github.com/mholt/archiver/tar.go create mode 100644 vendor/github.com/mholt/archiver/tarbz2.go create mode 100644 vendor/github.com/mholt/archiver/targz.go create mode 100644 vendor/github.com/mholt/archiver/tarlz4.go create mode 100644 vendor/github.com/mholt/archiver/tarsz.go create mode 100644 vendor/github.com/mholt/archiver/tarxz.go create mode 100644 vendor/github.com/mholt/archiver/zip.go create mode 100644 vendor/github.com/nwaples/rardecode/LICENSE create mode 100644 vendor/github.com/nwaples/rardecode/README.md create mode 100644 vendor/github.com/nwaples/rardecode/archive.go create mode 100644 vendor/github.com/nwaples/rardecode/archive15.go create mode 100644 vendor/github.com/nwaples/rardecode/archive50.go create mode 100644 vendor/github.com/nwaples/rardecode/bit_reader.go create mode 100644 vendor/github.com/nwaples/rardecode/decode29.go create mode 100644 vendor/github.com/nwaples/rardecode/decode29_lz.go create mode 100644 vendor/github.com/nwaples/rardecode/decode29_ppm.go create mode 100644 vendor/github.com/nwaples/rardecode/decode50.go create mode 100644 vendor/github.com/nwaples/rardecode/decode_reader.go create mode 100644 vendor/github.com/nwaples/rardecode/decrypt_reader.go create mode 100644 vendor/github.com/nwaples/rardecode/filters.go create mode 100644 vendor/github.com/nwaples/rardecode/huffman.go create mode 100644 vendor/github.com/nwaples/rardecode/ppm_model.go create mode 100644 vendor/github.com/nwaples/rardecode/reader.go create mode 100644 vendor/github.com/nwaples/rardecode/vm.go create mode 100644 vendor/github.com/pierrec/lz4/LICENSE create mode 100644 vendor/github.com/pierrec/lz4/README.md create mode 100644 vendor/github.com/pierrec/lz4/block.go create mode 100644 vendor/github.com/pierrec/lz4/lz4.go create mode 100644 vendor/github.com/pierrec/lz4/reader.go create mode 100644 vendor/github.com/pierrec/lz4/writer.go create mode 100644 vendor/github.com/pierrec/xxHash/LICENSE create mode 100644 vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go create mode 100644 vendor/github.com/tmthrgd/go-bindata/LICENSE create mode 100644 vendor/github.com/tmthrgd/go-bindata/restore/restore.go create mode 100644 vendor/github.com/ulikunitz/xz/LICENSE create mode 100644 vendor/github.com/ulikunitz/xz/README.md create mode 100644 vendor/github.com/ulikunitz/xz/TODO.md create mode 100644 vendor/github.com/ulikunitz/xz/bits.go create mode 100644 vendor/github.com/ulikunitz/xz/crc.go create mode 100644 vendor/github.com/ulikunitz/xz/format.go create mode 100644 vendor/github.com/ulikunitz/xz/fox.xz create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/doc.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/roller.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bintree.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bitops.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/breader.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/buffer.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bytewriter.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/decoder.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/decoderdict.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/directcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/distcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/encoder.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/encoderdict.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/fox.lzma create mode 100644 vendor/github.com/ulikunitz/xz/lzma/hashtable.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/header.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/header2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/literalcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/operation.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/prob.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/properties.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/rangecodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/reader.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/reader2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/state.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/treecodecs.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/writer.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/writer2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzmafilter.go create mode 100755 vendor/github.com/ulikunitz/xz/make-docs create mode 100644 vendor/github.com/ulikunitz/xz/reader.go create mode 100644 vendor/github.com/ulikunitz/xz/writer.go diff --git a/.gitignore b/.gitignore index 004846ed..caefb315 100644 --- a/.gitignore +++ b/.gitignore @@ -1,15 +1,15 @@ -.build -postgres_exporter -postgres_exporter_integration_test +/.build +/postgres_exporter +/postgres_exporter_integration_test *.tar.gz *.test *-stamp -.idea +/.idea *.iml -cover.out -cover.*.out -.coverage -bin -release -*.prom -.metrics.*.*.prom +/cover.out +/cover.*.out +/.coverage +/bin +/release +/*.prom +/.metrics.*.*.prom diff --git a/.travis.yml b/.travis.yml index 322be1a3..816ebec8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,37 +3,29 @@ services: - docker language: go go: -- '1.9' -# Make sure we have p2 and the postgres client. +- '1.10' before_install: - go get -v github.com/mattn/goveralls -- sudo wget -O /usr/local/bin/p2 https://github.com/wrouesnel/p2cli/releases/download/r4/p2 && - sudo chmod +x /usr/local/bin/p2 -- sudo wget -O /usr/local/bin/docker-compose https://github.com/docker/compose/releases/download/1.9.0-rc4/docker-compose-Linux-x86_64 && - sudo chmod +x /usr/local/bin/docker-compose +- sudo wget -O /usr/local/bin/p2 https://github.com/wrouesnel/p2cli/releases/download/r4/p2 + && sudo chmod +x /usr/local/bin/p2 +- sudo wget -O /usr/local/bin/docker-compose https://github.com/docker/compose/releases/download/1.9.0-rc4/docker-compose-Linux-x86_64 + && sudo chmod +x /usr/local/bin/docker-compose - sudo apt-get update && sudo apt-get install postgresql-client-common - script: -- make all -- make docker -- make test-integration -- make cover.out -- make release -- $HOME/gopath/bin/goveralls -coverprofile=cover.out -service=travis-ci +- go run mage.go -v all +- "$HOME/gopath/bin/goveralls -coverprofile=cover.out -service=travis-ci" +- go run mage.go docker after_success: - docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_PASS -# Push a tagged build if a tag is found. -- if [ ! -z "$TRAVIS_TAG" ]; then - docker tag wrouesnel/postgres_exporter:latest wrouesnel/postgres_exporter:$TRAVIS_TAG ; - docker push wrouesnel/postgres_exporter:$TRAVIS_TAG ; - fi -# Push a latest version -- if [ "$TRAVIS_BRANCH" == "master" ]; then docker push wrouesnel/postgres_exporter ; fi +- if [ ! -z "$TRAVIS_TAG" ]; then docker tag wrouesnel/postgres_exporter:latest wrouesnel/postgres_exporter:$TRAVIS_TAG + ; docker push wrouesnel/postgres_exporter:$TRAVIS_TAG ; fi +- if [ "$TRAVIS_BRANCH" == "master" ]; then docker push wrouesnel/postgres_exporter + ; fi env: global: - - secure: RfoWQj5tEB/t3XL2tqJW7u7Qscpz1QBOfF9lMFpB4kAUMTtZU0zBbXfMo1JheGoJQQxD/7NLRHhbUWPT2489o3KKpRTQ7RHn3k8n5U7opH01bWX0+l/EPVmhlsKjSDSLGgmxz80j3I6C8ZV3qDUijSx7r90QUNHGbZtV7g+KtoUTpRV0zir/heK6qq9LHWNHbNsJyHK8qHmd6g1UzWIBaZPJ6a/n/rO2jq4uS1JR0VlIJPRF11HOLH8IjFQvVYpN7YbEslxyNsfQJUSP/7CghSLLVWPSATEjMm8a5GJVLc564+nYghm484psEtiMXkZ3n6ie7AT8aJrKfexWrwh2aCc+cK4PiyXrf4euZehZNYogmFCqWzd1LJKcN2uIkpBSuZQDm3e6c4qkkWGpx+RdFWtAMG8IgZLDbcuryxFNzMwHc2CJ009s9Zsa+g7D57csyR5LCZ8YtNGI3g8FmhwpCKvYkfKa9aijUEWyJMyT4Vhd/w7btMTuwYHgUQ85k4ov4Xjz5SNpAGgemig5G5w7PJj4NhGvIBz9weL154x/BFVjHOZZ6Y/bWgJIPoW1KM15x5K8QylWYEBUHtwiyVyXOxHqt6MOX1vYo1L37jMK88IErrfh/VmlxEhtN9wOghk8IudMfFwQtjIwiWlJf218wxMIzUjoyb5/25tU9f2OJrg= - - secure: WP96T7yshE03XsPVc9ICbwZXZ6nVsQDCQ9NGKnIEQa4T1Swu5uLVzxjGeowHPykKbKphQmT8inMniBxB48OLc3VVqNxVI+00ppLPEf7n79w2wVbwFOEa6TiOdws+0wOphkeSYc0L+o2aSfoMKJHF+rVW9tmM2tszVjofYHhdWjqloc2pqsfOnqbR7icfpmzMWKyezIE20YOIBsiKZJTKXiZ1SaG9ExkNwuZ7L+HRF1yeI0OdAM4VfEzBK1Gwicy2BtrbyHnl4zgcSoIBmuzo+pNuvqyGmBn3C221M6ki7NoDJDfW5brcvDmiMODWGnka7iW0nt5aUbVtURM8BhWZR0uINo30aYjr4j39UBq8y+mqYV0dp/dMEmy2fa1mogr+DuHUNVSg59Au45AZeom8N6FT03nlg+RcG/tV1skvP/mn9n9CKsyfvC4Rf3jp4+LTiJ9JIch74MecRYVwlpKM+i8s6uDftt3mvxeIYdK+NEMcfwKMv8KTwuxRo/3KRhif7z2cOE+oMbT5POWO19bfboRPCs4xiMTcqpx8dJVs41SacY52PPgjFSnyVrKvzAyjn6mePjLhpoPZueHZuJYPNa9QC8JcASMlCI7lf2Eq+2Dmp2JxmndkRs/cIfHgmO4gtiNM7Vb/rlML1D/8LYPWU/Rdp82/yDffC0ugMNovXt0= - - secure: RRQH4Tr94OblZoqls50BIoyK1OvK9fALs4pAq1Uk5dksY1NWnomheQzOaHzbVfMfXc4zXAzppZIqxUDGn8GiSLbtJL6pnxsxYNGoCGdS8lMjjKWXbCAs8TBJobi3krOOjqgbhOWTpyluTEShnBcg7CjrRQUa/ChS3uE5kl21/4eIl9Be6Q08Eqm3p1yvMAyAgIL6Y6oPAAtBa6zIsi2MSNlryz3RKHJO7AheilppYx3E8B03A+a/oqvTTcw6w/RwBYxB8MYfSLC0jSssZz5pUSX/byUaklGFhQLnKAzJyhrMOvRyMVcO4PHaLgVi1eAKQz6eLQh7uEiIqKh19cuvTbZHDgu8zMpLDTxOW9U95e4kbjOZ5pWZ7E5QTrb24RZIt42JGbmme7PWOvy3zNbWHwfwiOF1qwYwISUcj2KFCpes8mDGt6iv46LfdlU0uoZdZu3MAiTiW0u2SD5hIeFq4XYesPtkS/TKFoAbB5Tu1qbxdmYu5NqmfvmxsmeNEm4inFJ5ap3fRRCVo668Z6qRMuQ1URcEfOz8iEftP9CnwSOXRuiuMo+W9GgckRuDZcPyQMCftq8+PhB+SjK57zrAd4Kxqf6kVHV16tcWqmFjfJJUFqmL+gpjT/VMEVDY2FOnbOARjkeLTjVC4dADBjxfJ6wmlLrfHdUm4GinbaHq0iA= + - DOCKER_USER=wrouesnel + - DOCKER_EMAIL=w.rouesnel@gmail.com + - secure: f0H5HKL/5f/ZZVGZ7puegWZ6eig0TmruihuSEJCx1+Y6yDZn7l8lH+eETP9KAzH27c3CG1F9ytu/3gnnTOafXnDLlCve3fL5sKF3+pNQRwi3IojsODjdfPW+KEbG+1RD7IgkCn+DSRmvvpLr4zGOmZFEM1ZtLL878u4Hsrv/X5pDbKJgG/cXDRJfsu/EcpviO4WM8zOakBY8QihXhGpZiRtpRDCXWjW49PdCkW9hsfzFaU1yjvih9EJ0cfcH+9CFCRkezwAPlCETbOv288uHXc6bCuEEX1bgJ0ZzEXYAyoO00+12ePbQZEGNikSVT55nfC+jZLLTavQkFi862Hcx/lmJpA/7aeNYOrDcomwWMRRc4Ava2+cod7acVvo45SHRq+Jj9ofDhj9s0T/aZwV+2doc9GwDN9J6aEs9Nham2G955K1H0fmMW9lv0ThSVEZ3XbzCHyR4nPAwJQXrzauqbbihCim/g/YC5gbVs7O/4GkN2Z9LK30IJr1/NtJdIa6fMk3Zdhp6LGbXCvVFRbE0rMiTLbB8O3ll2smCu3aFYv7J9IfvI0ol0ww7kULpyf/vqxkK0NJXsKgoK/Uo1lM9gNpJBHsMt9nWnDvLj2DKZNTqkxzJeG8O98ADrQWEGFhpcsSsbW9pAMsrp6D4LQikN8KoFvh9F8h9lBsYpafzlOA= deploy: skip_cleanup: true provider: releases diff --git a/README.md b/README.md index e42e1706..f5aead03 100644 --- a/README.md +++ b/README.md @@ -17,16 +17,19 @@ docker run --net=host -e DATA_SOURCE_NAME="postgresql://postgres:password@localh ``` ## Building and running + +The build system is based on [Mage](https://magefile.org) + The default make file behavior is to build the binary: ``` -go get github.com/wrouesnel/postgres_exporter -cd ${GOPATH-$HOME/go}/src/github.com/wrouesnel/postgres_exporter -make -export DATA_SOURCE_NAME="postgresql://login:password@hostname:port/dbname" -./postgres_exporter +$ go get github.com/wrouesnel/postgres_exporter +$ cd ${GOPATH-$HOME/go}/src/github.com/wrouesnel/postgres_exporter +$ go run mage.go +$ export DATA_SOURCE_NAME="postgresql://login:password@hostname:port/dbname" +$ ./postgres_exporter ``` -To build the dockerfile, run `make docker`. +To build the dockerfile, run `go run mage.go docker`. This will build the docker image as `wrouesnel/postgres_exporter:latest`. This is a minimal docker image containing *just* postgres_exporter. By default no SSL @@ -130,9 +133,6 @@ GRANT SELECT ON postgres_exporter.pg_stat_replication TO postgres_exporter; > ``` # Hacking - -* The build system is currently only supported for Linux-like platforms. It - depends on GNU Make. -* To build a copy for your current architecture run `make binary` or just `make` +* To build a copy for your current architecture run `go run mage.go binary` or just `go run mage.go` This will create a symlink to the just built binary in the root directory. -* To build release tar balls run `make release`. +* To build release tar balls run `go run mage.go release`. diff --git a/pg_setting.go b/cmd/postgres_exporter/pg_setting.go similarity index 100% rename from pg_setting.go rename to cmd/postgres_exporter/pg_setting.go diff --git a/pg_setting_test.go b/cmd/postgres_exporter/pg_setting_test.go similarity index 100% rename from pg_setting_test.go rename to cmd/postgres_exporter/pg_setting_test.go diff --git a/postgres_exporter.go b/cmd/postgres_exporter/postgres_exporter.go similarity index 100% rename from postgres_exporter.go rename to cmd/postgres_exporter/postgres_exporter.go diff --git a/postgres_exporter_integration_test.go b/cmd/postgres_exporter/postgres_exporter_integration_test.go similarity index 100% rename from postgres_exporter_integration_test.go rename to cmd/postgres_exporter/postgres_exporter_integration_test.go diff --git a/postgres_exporter_test.go b/cmd/postgres_exporter/postgres_exporter_test.go similarity index 100% rename from postgres_exporter_test.go rename to cmd/postgres_exporter/postgres_exporter_test.go diff --git a/tests/docker-postgres-replication/Dockerfile b/cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile similarity index 94% rename from tests/docker-postgres-replication/Dockerfile rename to cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile index e17409a8..edea0fe4 100755 --- a/tests/docker-postgres-replication/Dockerfile +++ b/cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile @@ -1,4 +1,4 @@ -FROM postgres:9.6 +FROM postgres:10 MAINTAINER Daniel Dent (https://www.danieldent.com) ENV PG_MAX_WAL_SENDERS 8 ENV PG_WAL_KEEP_SEGMENTS 8 diff --git a/tests/docker-postgres-replication/Dockerfile.p2 b/cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile.p2 similarity index 100% rename from tests/docker-postgres-replication/Dockerfile.p2 rename to cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile.p2 diff --git a/tests/docker-postgres-replication/README.md b/cmd/postgres_exporter/tests/docker-postgres-replication/README.md similarity index 100% rename from tests/docker-postgres-replication/README.md rename to cmd/postgres_exporter/tests/docker-postgres-replication/README.md diff --git a/tests/docker-postgres-replication/docker-compose.yml b/cmd/postgres_exporter/tests/docker-postgres-replication/docker-compose.yml similarity index 100% rename from tests/docker-postgres-replication/docker-compose.yml rename to cmd/postgres_exporter/tests/docker-postgres-replication/docker-compose.yml diff --git a/tests/docker-postgres-replication/docker-entrypoint.sh b/cmd/postgres_exporter/tests/docker-postgres-replication/docker-entrypoint.sh similarity index 100% rename from tests/docker-postgres-replication/docker-entrypoint.sh rename to cmd/postgres_exporter/tests/docker-postgres-replication/docker-entrypoint.sh diff --git a/tests/docker-postgres-replication/setup-replication.sh b/cmd/postgres_exporter/tests/docker-postgres-replication/setup-replication.sh similarity index 100% rename from tests/docker-postgres-replication/setup-replication.sh rename to cmd/postgres_exporter/tests/docker-postgres-replication/setup-replication.sh diff --git a/tests/test-smoke b/cmd/postgres_exporter/tests/test-smoke similarity index 95% rename from tests/test-smoke rename to cmd/postgres_exporter/tests/test-smoke index e52be71d..00b0c370 100755 --- a/tests/test-smoke +++ b/cmd/postgres_exporter/tests/test-smoke @@ -12,11 +12,17 @@ DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" METRICS_DIR=$(pwd) # Read the absolute path to the exporter -postgres_exporter=$(readlink -f $1) -test_binary=$(readlink -f $2) +postgres_exporter="$1" +test_binary="$2" export POSTGRES_PASSWORD=postgres exporter_port=9187 +echo "Exporter Binary: $postgres_exporter" 1>&2 +echo "Test Binary: $test_binary" 1>&2 + +[ -z "$postgres_exporter" ] && echo "Missing exporter binary" && exit 1 +[ -z "$test_binary" ] && echo "Missing test binary" && exit 1 + cd $DIR VERSIONS=( \ diff --git a/tests/username_file b/cmd/postgres_exporter/tests/username_file similarity index 100% rename from tests/username_file rename to cmd/postgres_exporter/tests/username_file diff --git a/tests/userpass_file b/cmd/postgres_exporter/tests/userpass_file similarity index 100% rename from tests/userpass_file rename to cmd/postgres_exporter/tests/userpass_file diff --git a/mage.go b/mage.go new file mode 100644 index 00000000..c1392b28 --- /dev/null +++ b/mage.go @@ -0,0 +1,11 @@ +// +build ignore + +package main + +import ( + "os" + + "github.com/magefile/mage/mage" +) + +func main() { os.Exit(mage.Main()) } diff --git a/magefile.go b/magefile.go new file mode 100644 index 00000000..1ed8be8a --- /dev/null +++ b/magefile.go @@ -0,0 +1,736 @@ +// +build mage +// Self-contained go-project magefile. + +// nolint: deadcode +package main + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "regexp" + "runtime" + "strings" + "time" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" + "github.com/magefile/mage/target" + + "errors" + "math/bits" + "strconv" + + "github.com/mholt/archiver" +) + +var curDir = func() string { + name, _ := os.Getwd() + return name +}() + +const constCoverageDir = ".coverage" +const constToolDir = "tools" +const constBinDir = "bin" +const constReleaseDir = "release" +const constCmdDir = "cmd" +const constCoverFile = "cover.out" +const constAssets = "assets" +const constAssetsGenerated = "assets/generated" + +var coverageDir = mustStr(filepath.Abs(path.Join(curDir, constCoverageDir))) +var toolDir = mustStr(filepath.Abs(path.Join(curDir, constToolDir))) +var binDir = mustStr(filepath.Abs(path.Join(curDir, constBinDir))) +var releaseDir = mustStr(filepath.Abs(path.Join(curDir, constReleaseDir))) +var cmdDir = mustStr(filepath.Abs(path.Join(curDir, constCmdDir))) +var assetsGenerated = mustStr(filepath.Abs(path.Join(curDir, constAssetsGenerated))) + +// Calculate file paths +var toolsGoPath = toolDir +var toolsSrcDir = mustStr(filepath.Abs(path.Join(toolDir, "src"))) +var toolsBinDir = mustStr(filepath.Abs(path.Join(toolDir, "bin"))) +var toolsVendorDir = mustStr(filepath.Abs(path.Join(toolDir, "vendor"))) + +var outputDirs = []string{binDir, releaseDir, toolsGoPath, toolsBinDir, + toolsVendorDir, assetsGenerated, coverageDir} + +var toolsEnv = map[string]string{"GOPATH": toolsGoPath} + +var containerName = func() string { + if name := os.Getenv("CONTAINER_NAME"); name != "" { + return name + } + return "wrouesnel/postgres_exporter:latest" +}() + +type Platform struct { + OS string + Arch string + BinSuffix string +} + +func (p *Platform) String() string { + return fmt.Sprintf("%s-%s", p.OS, p.Arch) +} + +func (p *Platform) PlatformDir() string { + platformDir := path.Join(binDir, fmt.Sprintf("%s_%s_%s", productName, versionShort, p.String())) + return platformDir +} + +func (p *Platform) PlatformBin(cmd string) string { + platformBin := fmt.Sprintf("%s%s", cmd, p.BinSuffix) + return path.Join(p.PlatformDir(), platformBin) +} + +func (p *Platform) ArchiveDir() string { + return fmt.Sprintf("%s_%s_%s", productName, versionShort, p.String()) +} + +func (p *Platform) ReleaseBase() string { + return path.Join(releaseDir, fmt.Sprintf("%s_%s_%s", productName, versionShort, p.String())) +} + +// Supported platforms +var platforms []Platform = []Platform{ + {"linux", "amd64", ""}, + {"linux", "386", ""}, + {"darwin", "amd64", ""}, + {"darwin", "386", ""}, + {"windows", "amd64", ".exe"}, + {"windows", "386", ".exe"}, +} + +// productName can be overridden by environ product name +var productName = func() string { + if name := os.Getenv("PRODUCT_NAME"); name != "" { + return name + } + name, _ := os.Getwd() + return path.Base(name) +}() + +// Source files +var goSrc []string +var goDirs []string +var goPkgs []string +var goCmds []string + +var version = func() string { + if v := os.Getenv("VERSION"); v != "" { + return v + } + out, _ := sh.Output("git", "describe", "--dirty") + + if out == "" { + return "v0.0.0" + } + + return out +}() + +var versionShort = func() string { + if v := os.Getenv("VERSION_SHORT"); v != "" { + return v + } + out, _ := sh.Output("git", "describe", "--abbrev=0") + + if out == "" { + return "v0.0.0" + } + + return out +}() + +var concurrency = func() int { + if v := os.Getenv("CONCURRENCY"); v != "" { + pv, err := strconv.ParseUint(v, 10, bits.UintSize) + if err != nil { + panic(err) + } + return int(pv) + } + return runtime.NumCPU() +}() + +var linterDeadline = func() time.Duration { + if v := os.Getenv("LINTER_DEADLINE"); v != "" { + d, _ := time.ParseDuration(v) + if d != 0 { + return d + } + } + return time.Second * 60 +}() + +func Log(args ...interface{}) { + if mg.Verbose() { + fmt.Println(args...) + } +} + +func init() { + // Set environment + os.Setenv("PATH", fmt.Sprintf("%s:%s", toolsBinDir, os.Getenv("PATH"))) + Log("Build PATH: ", os.Getenv("PATH")) + Log("Concurrency:", concurrency) + goSrc = func() []string { + results := new([]string) + filepath.Walk(".", func(path string, info os.FileInfo, err error) error { + // Look for files + if info.IsDir() { + return nil + } + // Exclusions + if matched, _ := filepath.Match("*/vendor/*", path); matched { + return nil + } else if matched, _ := filepath.Match(fmt.Sprintf("%s/*", toolDir), path); matched { + return nil + } else if matched, _ := filepath.Match(fmt.Sprintf("%s/*", binDir), path); matched { + return nil + } else if matched, _ := filepath.Match(fmt.Sprintf("%s/*", releaseDir), path); matched { + return nil + } + + if matched, _ := filepath.Match("*.go", path); !matched { + return nil + } + + *results = append(*results, path) + return nil + }) + return *results + }() + goDirs = func() []string { + resultMap := make(map[string]struct{}) + for _, path := range goSrc { + absDir, err := filepath.Abs(filepath.Dir(path)) + if err != nil { + panic(err) + } + resultMap[absDir] = struct{}{} + } + results := []string{} + for k := range resultMap { + results = append(results, k) + } + return results + }() + goPkgs = func() []string { + results := []string{} + out, err := sh.Output("go", "list", "./...") + if err != nil { + panic(err) + } + for _, line := range strings.Split(out, "\n") { + if !strings.Contains(line, "/vendor/") { + results = append(results, line) + } + } + return results + }() + goCmds = func() []string { + results := []string{} + + finfos, err := ioutil.ReadDir(cmdDir) + if err != nil { + panic(err) + } + for _, finfo := range finfos { + results = append(results, finfo.Name()) + } + return results + }() + + // Ensure output dirs exist + for _, dir := range outputDirs { + os.MkdirAll(dir, os.FileMode(0777)) + } +} + +func mustStr(r string, err error) string { + if err != nil { + panic(err) + } + return r +} + +func getCoreTools() []string { + staticTools := []string{ + "github.com/kardianos/govendor", + "github.com/wadey/gocovmerge", + "github.com/mattn/goveralls", + "github.com/tmthrgd/go-bindata/go-bindata", + "github.com/GoASTScanner/gas/cmd/gas", // workaround for Ast scanner + "github.com/alecthomas/gometalinter", + } + return staticTools +} + +func getMetalinters() []string { + // Gometalinter should now be on the command line + dynamicTools := []string{} + + goMetalinterHelp, _ := sh.Output("gometalinter", "--help") + linterRx := regexp.MustCompile(`\s+\w+:\s*\((.+)\)`) + for _, l := range strings.Split(goMetalinterHelp, "\n") { + linter := linterRx.FindStringSubmatch(l) + if len(linter) > 1 { + dynamicTools = append(dynamicTools, linter[1]) + } + } + return dynamicTools +} + +func ensureVendorSrcLink() error { + Log("Symlink vendor to tools dir") + if err := sh.Rm(toolsSrcDir); err != nil { + return err + } + if err := os.Symlink(toolsVendorDir, toolsSrcDir); err != nil { + return err + } + return nil +} + +// concurrencyLimitedBuild executes a certain number of commands limited by concurrency +func concurrencyLimitedBuild(buildCmds ...interface{}) error { + resultsCh := make(chan error, len(buildCmds)) + concurrencyControl := make(chan struct{}, concurrency) + for _, buildCmd := range buildCmds { + go func(buildCmd interface{}) { + concurrencyControl <- struct{}{} + resultsCh <- buildCmd.(func() error)() + <-concurrencyControl + + }(buildCmd) + } + // Doesn't work at the moment + // mg.Deps(buildCmds...) + results := []error{} + var resultErr error = nil + for len(results) < len(buildCmds) { + err := <-resultsCh + results = append(results, err) + if err != nil { + fmt.Println(err) + resultErr = errors.New("parallel build failed") + } + fmt.Printf("Finished %v of %v\n", len(results), len(buildCmds)) + } + + return resultErr +} + +// Tools builds build tools of the project and is depended on by all other build targets. +func Tools() (err error) { + // Catch panics and convert to errors + defer func() { + if perr := recover(); perr != nil { + err = perr.(error) + } + }() + + if err := ensureVendorSrcLink(); err != nil { + return err + } + + toolBuild := func(toolType string, tools ...string) error { + toolTargets := []interface{}{} + for _, toolImport := range tools { + toolParts := strings.Split(toolImport, "/") + toolBin := path.Join(toolsBinDir, toolParts[len(toolParts)-1]) + Log("Check for changes:", toolBin, toolsVendorDir) + changed, terr := target.Dir(toolBin, toolsVendorDir) + if terr != nil { + if !os.IsNotExist(terr) { + panic(terr) + } + changed = true + } + if changed { + localToolImport := toolImport + f := func() error { return sh.RunWith(toolsEnv, "go", "install", "-v", localToolImport) } + toolTargets = append(toolTargets, f) + } + } + + Log("Build", toolType, "tools") + if berr := concurrencyLimitedBuild(toolTargets...); berr != nil { + return berr + } + return nil + } + + if berr := toolBuild("static", getCoreTools()...); berr != nil { + return berr + } + + if berr := toolBuild("static", getMetalinters()...); berr != nil { + return berr + } + + return nil +} + +// UpdateTools automatically updates tool dependencies to the latest version. +func UpdateTools() error { + if err := ensureVendorSrcLink(); err != nil { + return err + } + + // Ensure govendor is up to date without doing anything + govendorPkg := "github.com/kardianos/govendor" + govendorParts := strings.Split(govendorPkg, "/") + govendorBin := path.Join(toolsBinDir, govendorParts[len(govendorParts)-1]) + + sh.RunWith(toolsEnv, "go", "get", "-v", "-u", govendorPkg) + + if changed, cerr := target.Dir(govendorBin, toolsSrcDir); changed || os.IsNotExist(cerr) { + if err := sh.RunWith(toolsEnv, "go", "install", "-v", govendorPkg); err != nil { + return err + } + } else if cerr != nil { + panic(cerr) + } + + // Set current directory so govendor has the right path + previousPwd, wderr := os.Getwd() + if wderr != nil { + return wderr + } + if err := os.Chdir(toolDir); err != nil { + return err + } + + // govendor fetch core tools + for _, toolImport := range append(getCoreTools(), getMetalinters()...) { + sh.RunV("govendor", "fetch", "-v", toolImport) + } + + // change back to original working directory + if err := os.Chdir(previousPwd); err != nil { + return err + } + return nil +} + +// Assets builds binary assets to be bundled into the binary. +func Assets() error { + mg.Deps(Tools) + + if err := os.MkdirAll("assets/generated", os.FileMode(0777)); err != nil { + return err + } + + return sh.RunV("go-bindata", "-pkg=assets", "-o", "assets/bindata.go", "-ignore=bindata.go", + "-ignore=.*.map$", "-prefix=assets/generated", "assets/generated/...") +} + +// Lint runs gometalinter for code quality. CI will run this before accepting PRs. +func Lint() error { + mg.Deps(Tools) + args := []string{"-j", fmt.Sprintf("%v", concurrency), fmt.Sprintf("--deadline=%s", + linterDeadline.String()), "--enable-all", "--line-length=120", + "--disable=gocyclo", "--disable=testify", "--disable=test", "--exclude=assets/bindata.go"} + return sh.RunV("gometalinter", append(args, goDirs...)...) +} + +// Style checks formatting of the file. CI will run this before acceptiing PRs. +func Style() error { + mg.Deps(Tools) + args := []string{"--disable-all", "--enable=gofmt", "--enable=goimports"} + return sh.RunV("gometalinter", append(args, goSrc...)...) +} + +// Fmt automatically formats all source code files +func Fmt() error { + mg.Deps(Tools) + fmtErr := sh.RunV("gofmt", append([]string{"-s", "-w"}, goSrc...)...) + if fmtErr != nil { + return fmtErr + } + impErr := sh.RunV("goimports", append([]string{"-w"}, goSrc...)...) + if impErr != nil { + return fmtErr + } + return nil +} + +func listCoverageFiles() ([]string, error) { + result := []string{} + finfos, derr := ioutil.ReadDir(coverageDir) + if derr != nil { + return result, derr + } + for _, finfo := range finfos { + result = append(result, path.Join(coverageDir, finfo.Name())) + } + return result, nil +} + +// Test run test suite +func Test() error { + mg.Deps(Tools) + + // Ensure coverage directory exists + if err := os.MkdirAll(coverageDir, os.FileMode(0777)); err != nil { + return err + } + + // Clean up coverage directory + coverFiles, derr := listCoverageFiles() + if derr != nil { + return derr + } + for _, coverFile := range coverFiles { + if err := sh.Rm(coverFile); err != nil { + return err + } + } + + // Run tests + coverProfiles := []string{} + for _, pkg := range goPkgs { + coverProfile := path.Join(coverageDir, fmt.Sprintf("%s%s", strings.Replace(pkg, "/", "-", -1), ".out")) + testErr := sh.Run("go", "test", "-v", "-covermode", "count", fmt.Sprintf("-coverprofile=%s", coverProfile), + pkg) + if testErr != nil { + return testErr + } + coverProfiles = append(coverProfiles, coverProfile) + } + + return nil +} + +// Build the intgration test binary +func IntegrationTestBinary() error { + changed, err := target.Path("postgres_exporter_integration_test", goSrc...) + if (changed && (err == nil)) || os.IsNotExist(err) { + return sh.RunWith(map[string]string{"CGO_ENABLED": "0"}, "go", "test", "./cmd/postgres_exporter", + "-c", "-tags", "integration", + "-a", "-ldflags", "-extldflags '-static'", "-X", fmt.Sprintf("main.Version=%s", version), + "-o", "postgres_exporter_integration_test", "-cover", "-covermode", "count") + } + return err +} + +// TestIntegration runs integration tests +func TestIntegration() error { + mg.Deps(Binary, IntegrationTestBinary) + + exporterPath := mustStr(filepath.Abs("postgres_exporter")) + testBinaryPath := mustStr(filepath.Abs("postgres_exporter_integration_test")) + testScriptPath := mustStr(filepath.Abs("postgres_exporter_integration_test_script")) + + integrationCoverageProfile := path.Join(coverageDir, "cover.integration.out") + + return sh.RunV("cmd/postgres_exporter/tests/test-smoke", exporterPath, + fmt.Sprintf("%s %s %s", testScriptPath, testBinaryPath, integrationCoverageProfile)) +} + +// Coverage sums up the coverage profiles in .coverage. It does not clean up after itself or before. +func Coverage() error { + // Clean up coverage directory + coverFiles, derr := listCoverageFiles() + if derr != nil { + return derr + } + + mergedCoverage, err := sh.Output("gocovmerge", coverFiles...) + if err != nil { + return err + } + return ioutil.WriteFile(constCoverFile, []byte(mergedCoverage), os.FileMode(0777)) +} + +// All runs a full suite suitable for CI +func All() error { + mg.SerialDeps(Style, Lint, Test, TestIntegration, Coverage, Release) + return nil +} + +// Release builds release archives under the release/ directory +func Release() error { + mg.Deps(ReleaseBin) + + for _, platform := range platforms { + owd, wderr := os.Getwd() + if wderr != nil { + return wderr + } + os.Chdir(binDir) + + if platform.OS == "windows" { + // build a zip binary as well + err := archiver.Zip.Make(fmt.Sprintf("%s.zip", platform.ReleaseBase()), []string{platform.ArchiveDir()}) + if err != nil { + return err + } + } + // build tar gz + err := archiver.TarGz.Make(fmt.Sprintf("%s.tar.gz", platform.ReleaseBase()), []string{platform.ArchiveDir()}) + if err != nil { + return err + } + os.Chdir(owd) + } + + return nil +} + +func makeBuilder(cmd string, platform Platform) func() error { + f := func() error { + // Depend on assets + mg.Deps(Assets) + + cmdSrc := fmt.Sprintf("./%s/%s", mustStr(filepath.Rel(curDir, cmdDir)), cmd) + + Log("Make platform binary directory:", platform.PlatformDir()) + if err := os.MkdirAll(platform.PlatformDir(), os.FileMode(0777)); err != nil { + return err + } + + Log("Checking for changes:", platform.PlatformBin(cmd)) + if changed, err := target.Path(platform.PlatformBin(cmd), goSrc...); !changed { + if err != nil { + if !os.IsNotExist(err) { + return err + } + } else { + return nil + } + } + + fmt.Println("Building", platform.PlatformBin(cmd)) + return sh.RunWith(map[string]string{"CGO_ENABLED": "0", "GOOS": platform.OS, "GOARCH": platform.Arch}, + "go", "build", "-a", "-ldflags", fmt.Sprintf("-extldflags '-static' -X version.Version=%s", version), + "-o", platform.PlatformBin(cmd), cmdSrc) + } + return f +} + +func getCurrentPlatform() *Platform { + var curPlatform *Platform + for _, p := range platforms { + if p.OS == runtime.GOOS && p.Arch == runtime.GOARCH { + storedP := p + curPlatform = &storedP + } + } + Log("Determined current platform:", curPlatform) + return curPlatform +} + +// Binary build a binary for the current platform +func Binary() error { + curPlatform := getCurrentPlatform() + if curPlatform == nil { + return errors.New("current platform is not supported") + } + + for _, cmd := range goCmds { + err := makeBuilder(cmd, *curPlatform)() + if err != nil { + return err + } + // Make a root symlink to the build + cmdPath := path.Join(curDir, cmd) + os.Remove(cmdPath) + if err := os.Symlink(curPlatform.PlatformBin(cmd), cmdPath); err != nil { + return err + } + } + + return nil +} + +// ReleaseBin builds cross-platform release binaries under the bin/ directory +func ReleaseBin() error { + buildCmds := []interface{}{} + + for _, cmd := range goCmds { + for _, platform := range platforms { + buildCmds = append(buildCmds, makeBuilder(cmd, platform)) + } + } + + resultsCh := make(chan error, len(buildCmds)) + concurrencyControl := make(chan struct{}, concurrency) + for _, buildCmd := range buildCmds { + go func(buildCmd interface{}) { + concurrencyControl <- struct{}{} + resultsCh <- buildCmd.(func() error)() + <-concurrencyControl + + }(buildCmd) + } + // Doesn't work at the moment + // mg.Deps(buildCmds...) + results := []error{} + var resultErr error = nil + for len(results) < len(buildCmds) { + err := <-resultsCh + results = append(results, err) + if err != nil { + fmt.Println(err) + resultErr = errors.New("parallel build failed") + } + fmt.Printf("Finished %v of %v\n", len(results), len(buildCmds)) + } + + return resultErr +} + +// Docker builds the docker image +func Docker() error { + mg.Deps(Binary) + p := getCurrentPlatform() + if p == nil { + return errors.New("current platform is not supported") + } + + return sh.RunV("docker", "build", + fmt.Sprintf("--build-arg=binary=%s", + mustStr(filepath.Rel(curDir, p.PlatformBin("postgres_exporter")))), + "-t", containerName, ".") +} + +// Clean deletes build output and cleans up the working directory +func Clean() error { + for _, name := range goCmds { + if err := sh.Rm(path.Join(binDir, name)); err != nil { + return err + } + } + + for _, name := range outputDirs { + if err := sh.Rm(name); err != nil { + return err + } + } + return nil +} + +// Debug prints the value of internal state variables +func Debug() error { + fmt.Println("Source Files:", goSrc) + fmt.Println("Packages:", goPkgs) + fmt.Println("Directories:", goDirs) + fmt.Println("Command Paths:", goCmds) + fmt.Println("Output Dirs:", outputDirs) + fmt.Println("Tool Src Dir:", toolsSrcDir) + fmt.Println("Tool Vendor Dir:", toolsVendorDir) + fmt.Println("Tool GOPATH:", toolsGoPath) + fmt.Println("PATH:", os.Getenv("PATH")) + return nil +} + +// Autogen configure local git repository with commit hooks +func Autogen() error { + fmt.Println("Installing git hooks in local repository...") + return os.Link(path.Join(curDir, toolDir, "pre-commit"), ".git/hooks/pre-commit") +} diff --git a/postgres_exporter_integration_test_script b/postgres_exporter_integration_test_script index 4d663694..ebaf83d9 100755 --- a/postgres_exporter_integration_test_script +++ b/postgres_exporter_integration_test_script @@ -7,6 +7,9 @@ shift output_cov=$1 shift +echo "Test Binary: $test_binary" 1>&2 +echo "Coverage File: $output_cov" 1>&2 + echo "mode: count" > $output_cov test_cov=$(mktemp) diff --git a/tools/Makefile b/tools/Makefile deleted file mode 100644 index 331c0b6b..00000000 --- a/tools/Makefile +++ /dev/null @@ -1,67 +0,0 @@ -# Makefile to build the tools used in the build system. -# If recreating from scratch, you will need a local install of govendor -# and to run govendor init in this folder before running govendor fetch. - -# Ensure we use local bin dir -export PATH := bin:$(PATH) -SHELL := env PATH=$(PATH) /bin/bash - -THIS_FILE := $(lastword $(MAKEFILE_LIST)) - -# This function is used to get the linters used by metalinter -get_metalinters := gometalinter --help | grep -oP '\s+\w+:\s*\(.+\)' | tr -s ' ' | cut -d' ' -f3 | grep -oP '[^()]+' - -# This is a list of external tools we want to vendor -TOOL_SRCS := github.com/kardianos/govendor \ - github.com/wadey/gocovmerge \ - github.com/mattn/goveralls \ - github.com/alecthomas/gometalinter - -# This is populated by imported dependencies from gometalinter -METATOOL_SRCS := - -GO_SRC := $(shell find $(SOURCEDIR) -name '*.go') - -GO := GOPATH=$(shell pwd) go - -DEFAULT: all - -tools.deps: $(GO_SRC) - @# Generate build patterns for static tools - @for pkg in $(TOOL_SRCS); do \ - echo -e "bin/$$(basename $$pkg): $$GO_SRC\n\t\$$(GO) install -v $$pkg" ; \ - done > tools.deps - --include tools.deps - -metatools.deps: bin/gometalinter $(GO_SRC) - # Generate build patterns for metalinters tools - @echo -e "METATOOL_SRCS+=$(shell $(get_metalinters))" > metatools.deps - @for pkg in $(shell $(get_metalinters)) ; do \ - echo -e "bin/$$(basename $$pkg): $$GO_SRC\n\t\$$(GO) install -v $$pkg" ; \ - done >> metatools.deps - --include metatools.deps - -update: - # Fetch govendor, then rebuild govendor. - govendor fetch github.com/kardianos/govendor - $(GO) install -v github.com/kardianos/govendor - # Fetch gometalinter and rebuild gometalinter. - govendor fetch github.com/alecthomas/gometalinter - $(GO) install -v github.com/alecthomas/gometalinter - $(MAKE) -f $(THIS_FILE) update-phase-2 - -update-phase-2: - # Fetch the new metalinter list. - for pkg in $(TOOL_SRCS) $$($(get_metalinters)); do \ - govendor fetch -v $$pkg ; \ - done - -clean: - rm -rf bin pkg tools.deps metatools.deps - -all: $(addprefix bin/,$(notdir $(TOOL_SRCS) $(METATOOL_SRCS) )) - -# TOOL_SRCS is included here since we'll never really have these files. -.PHONY: all update clean $(TOOL_SRCS) $(METATOOL_SRCS) diff --git a/tools/src b/tools/src index 5657f6ea..f8cf471b 120000 --- a/tools/src +++ b/tools/src @@ -1 +1 @@ -vendor \ No newline at end of file +/home/will/src/go/src/github.com/wrouesnel/postgres_exporter/tools/vendor \ No newline at end of file diff --git a/tools/vendor/github.com/GoASTScanner/gas/README.md b/tools/vendor/github.com/GoASTScanner/gas/README.md index 81c2ed5e..d2c3bbc5 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/README.md +++ b/tools/vendor/github.com/GoASTScanner/gas/README.md @@ -18,6 +18,10 @@ You may obtain a copy of the License [here](http://www.apache.org/licenses/LICEN Gas is still in alpha and accepting feedback from early adopters. We do not consider it production ready at this time. +### Install + +`$ go get github.com/GoASTScanner/gas/cmd/gas/...` + ### Usage Gas can be configured to only run a subset of rules, to exclude certain file @@ -37,6 +41,7 @@ or to specify a set of rules to explicitly exclude using the '-exclude=' flag. - G103: Audit the use of unsafe block - G104: Audit errors not checked - G105: Audit the use of math/big.Int.Exp + - G106: Audit the use of ssh.InsecureIgnoreHostKey - G201: SQL query construction using format string - G202: SQL query construction using string concatenation - G203: Use of unescaped data in HTML templates @@ -64,12 +69,8 @@ $ gas -exclude=G303 ./... #### Excluding files: -Gas can be told to \ignore paths that match a supplied pattern using the 'skip' command line option. This is -accomplished via [go-glob](github.com/ryanuber/go-glob). Multiple patterns can be specified as follows: - -``` -$ gas -skip=tests* -skip=*_example.go ./... -``` +Gas will ignore dependencies in your vendor directory any files +that are not considered build artifacts by the compiler (so test files). #### Annotating code @@ -104,7 +105,7 @@ $ gas -nosec=true ./... ### Output formats -Gas currently supports text, json and csv output formats. By default +Gas currently supports text, json, yaml, csv and JUnit XML output formats. By default results will be reported to stdout, but can also be written to an output file. The output format is controlled by the '-fmt' flag, and the output file is controlled by the '-out' flag as follows: @@ -113,19 +114,21 @@ file. The output format is controlled by the '-fmt' flag, and the output file is $ gas -fmt=json -out=results.json *.go ``` -### Docker container +### Generate TLS rule -A Dockerfile is included with the Gas source code to provide a container that -allows users to easily run Gas on their code. It builds Gas, then runs it on -all Go files in your current directory. Use the following commands to build -and run locally: +The configuration of TLS rule can be generated from [Mozilla's TLS ciphers recommendation](https://statics.tls.security.mozilla.org/server-side-tls-conf.json). -To build: (run command in cloned Gas source code directory) - docker build --build-arg http_proxy --build-arg https_proxy - --build-arg no_proxy -t goastscanner/gas:latest . -To run: (run command in desired directory with Go files) - docker run -v $PWD:$PWD --workdir $PWD goastscanner/gas:latest +First you need to install the generator tool: -Note: Docker version 17.05 or later is required (to permit multistage build). ``` +go get github.com/GoASTScanner/gas/cmd/tlsconfig/... +``` + +You can invoke now the `go generate` in the root of the project: + +``` +go generate ./... +``` + +This will generate the `rules/tls_config.go` file with will contain the current ciphers recommendation from Mozilla. diff --git a/tools/vendor/github.com/GoASTScanner/gas/analyzer.go b/tools/vendor/github.com/GoASTScanner/gas/analyzer.go new file mode 100644 index 00000000..b7a5e8f8 --- /dev/null +++ b/tools/vendor/github.com/GoASTScanner/gas/analyzer.go @@ -0,0 +1,197 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package gas holds the central scanning logic used by GAS +package gas + +import ( + "go/ast" + "go/build" + "go/parser" + "go/token" + "go/types" + "log" + "os" + "path" + "reflect" + "strings" + + "path/filepath" + + "golang.org/x/tools/go/loader" +) + +// The Context is populated with data parsed from the source code as it is scanned. +// It is passed through to all rule functions as they are called. Rules may use +// this data in conjunction withe the encoutered AST node. +type Context struct { + FileSet *token.FileSet + Comments ast.CommentMap + Info *types.Info + Pkg *types.Package + Root *ast.File + Config map[string]interface{} + Imports *ImportTracker +} + +// Metrics used when reporting information about a scanning run. +type Metrics struct { + NumFiles int `json:"files"` + NumLines int `json:"lines"` + NumNosec int `json:"nosec"` + NumFound int `json:"found"` +} + +// Analyzer object is the main object of GAS. It has methods traverse an AST +// and invoke the correct checking rules as on each node as required. +type Analyzer struct { + ignoreNosec bool + ruleset RuleSet + context *Context + config Config + logger *log.Logger + issues []*Issue + stats *Metrics +} + +// NewAnalyzer builds a new anaylzer. +func NewAnalyzer(conf Config, logger *log.Logger) *Analyzer { + ignoreNoSec := false + if setting, err := conf.GetGlobal("nosec"); err == nil { + ignoreNoSec = setting == "true" || setting == "enabled" + } + if logger == nil { + logger = log.New(os.Stderr, "[gas]", log.LstdFlags) + } + return &Analyzer{ + ignoreNosec: ignoreNoSec, + ruleset: make(RuleSet), + context: &Context{}, + config: conf, + logger: logger, + issues: make([]*Issue, 0, 16), + stats: &Metrics{}, + } +} + +// LoadRules instantiates all the rules to be used when analyzing source +// packages +func (gas *Analyzer) LoadRules(ruleDefinitions ...RuleBuilder) { + for _, builder := range ruleDefinitions { + r, nodes := builder(gas.config) + gas.ruleset.Register(r, nodes...) + } +} + +// Process kicks off the analysis process for a given package +func (gas *Analyzer) Process(packagePaths ...string) error { + packageConfig := loader.Config{ + Build: &build.Default, + ParserMode: parser.ParseComments, + AllowErrors: true, + } + for _, packagePath := range packagePaths { + abspath, err := filepath.Abs(packagePath) + if err != nil { + return err + } + gas.logger.Println("Searching directory:", abspath) + + basePackage, err := build.Default.ImportDir(packagePath, build.ImportComment) + if err != nil { + return err + } + + var packageFiles []string + for _, filename := range basePackage.GoFiles { + packageFiles = append(packageFiles, path.Join(packagePath, filename)) + } + + packageConfig.CreateFromFilenames(basePackage.Name, packageFiles...) + } + + builtPackage, err := packageConfig.Load() + if err != nil { + return err + } + + for _, pkg := range builtPackage.Created { + gas.logger.Println("Checking package:", pkg.String()) + for _, file := range pkg.Files { + gas.logger.Println("Checking file:", builtPackage.Fset.File(file.Pos()).Name()) + gas.context.FileSet = builtPackage.Fset + gas.context.Config = gas.config + gas.context.Comments = ast.NewCommentMap(gas.context.FileSet, file, file.Comments) + gas.context.Root = file + gas.context.Info = &pkg.Info + gas.context.Pkg = pkg.Pkg + gas.context.Imports = NewImportTracker() + gas.context.Imports.TrackPackages(gas.context.Pkg.Imports()...) + ast.Walk(gas, file) + gas.stats.NumFiles++ + gas.stats.NumLines += builtPackage.Fset.File(file.Pos()).LineCount() + } + } + return nil +} + +// ignore a node (and sub-tree) if it is tagged with a "#nosec" comment +func (gas *Analyzer) ignore(n ast.Node) bool { + if groups, ok := gas.context.Comments[n]; ok && !gas.ignoreNosec { + for _, group := range groups { + if strings.Contains(group.Text(), "#nosec") { + gas.stats.NumNosec++ + return true + } + } + } + return false +} + +// Visit runs the GAS visitor logic over an AST created by parsing go code. +// Rule methods added with AddRule will be invoked as necessary. +func (gas *Analyzer) Visit(n ast.Node) ast.Visitor { + if !gas.ignore(n) { + + // Track aliased and initialization imports + gas.context.Imports.TrackImport(n) + + for _, rule := range gas.ruleset.RegisteredFor(n) { + issue, err := rule.Match(n, gas.context) + if err != nil { + file, line := GetLocation(n, gas.context) + file = path.Base(file) + gas.logger.Printf("Rule error: %v => %s (%s:%d)\n", reflect.TypeOf(rule), err, file, line) + } + if issue != nil { + gas.issues = append(gas.issues, issue) + gas.stats.NumFound++ + } + } + return gas + } + return nil +} + +// Report returns the current issues discovered and the metrics about the scan +func (gas *Analyzer) Report() ([]*Issue, *Metrics) { + return gas.issues, gas.stats +} + +// Reset clears state such as context, issues and metrics from the configured analyzer +func (gas *Analyzer) Reset() { + gas.context = &Context{} + gas.issues = make([]*Issue, 0, 16) + gas.stats = &Metrics{} +} diff --git a/tools/vendor/github.com/GoASTScanner/gas/core/call_list.go b/tools/vendor/github.com/GoASTScanner/gas/call_list.go similarity index 62% rename from tools/vendor/github.com/GoASTScanner/gas/core/call_list.go rename to tools/vendor/github.com/GoASTScanner/gas/call_list.go index 20020245..e277950b 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/core/call_list.go +++ b/tools/vendor/github.com/GoASTScanner/gas/call_list.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package core +package gas import ( "go/ast" @@ -19,23 +19,23 @@ import ( type set map[string]bool -/// CallList is used to check for usage of specific packages -/// and functions. +// CallList is used to check for usage of specific packages +// and functions. type CallList map[string]set -/// NewCallList creates a new empty CallList +// NewCallList creates a new empty CallList func NewCallList() CallList { return make(CallList) } -/// AddAll will add several calls to the call list at once +// AddAll will add several calls to the call list at once func (c CallList) AddAll(selector string, idents ...string) { for _, ident := range idents { c.Add(selector, ident) } } -/// Add a selector and call to the call list +// Add a selector and call to the call list func (c CallList) Add(selector, ident string) { if _, ok := c[selector]; !ok { c[selector] = make(set) @@ -43,7 +43,7 @@ func (c CallList) Add(selector, ident string) { c[selector][ident] = true } -/// Contains returns true if the package and function are +// Contains returns true if the package and function are /// members of this call list. func (c CallList) Contains(selector, ident string) bool { if idents, ok := c[selector]; ok { @@ -53,21 +53,26 @@ func (c CallList) Contains(selector, ident string) bool { return false } -/// ContainsCallExpr resolves the call expression name and type +// ContainsCallExpr resolves the call expression name and type /// or package and determines if it exists within the CallList -func (c CallList) ContainsCallExpr(n ast.Node, ctx *Context) bool { +func (c CallList) ContainsCallExpr(n ast.Node, ctx *Context) *ast.CallExpr { selector, ident, err := GetCallInfo(n, ctx) if err != nil { - return false - } - // Try direct resolution - if c.Contains(selector, ident) { - return true + return nil } - // Also support explicit path - if path, ok := GetImportPath(selector, ctx); ok { - return c.Contains(path, ident) + // Use only explicit path to reduce conflicts + if path, ok := GetImportPath(selector, ctx); ok && c.Contains(path, ident) { + return n.(*ast.CallExpr) } - return false + + /* + // Try direct resolution + if c.Contains(selector, ident) { + log.Printf("c.Contains == true, %s, %s.", selector, ident) + return n.(*ast.CallExpr) + } + */ + + return nil } diff --git a/tools/vendor/github.com/GoASTScanner/gas/filelist.go b/tools/vendor/github.com/GoASTScanner/gas/cmd/gas/filelist.go similarity index 100% rename from tools/vendor/github.com/GoASTScanner/gas/filelist.go rename to tools/vendor/github.com/GoASTScanner/gas/cmd/gas/filelist.go diff --git a/tools/vendor/github.com/GoASTScanner/gas/cmd/gas/main.go b/tools/vendor/github.com/GoASTScanner/gas/cmd/gas/main.go new file mode 100644 index 00000000..a04c1510 --- /dev/null +++ b/tools/vendor/github.com/GoASTScanner/gas/cmd/gas/main.go @@ -0,0 +1,254 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "flag" + "fmt" + "log" + "os" + "regexp" + "sort" + "strings" + + "github.com/GoASTScanner/gas" + "github.com/GoASTScanner/gas/output" + "github.com/GoASTScanner/gas/rules" + "github.com/kisielk/gotool" +) + +const ( + usageText = ` +GAS - Go AST Scanner + +Gas analyzes Go source code to look for common programming mistakes that +can lead to security problems. + +USAGE: + + # Check a single package + $ gas $GOPATH/src/github.com/example/project + + # Check all packages under the current directory and save results in + # json format. + $ gas -fmt=json -out=results.json ./... + + # Run a specific set of rules (by default all rules will be run): + $ gas -include=G101,G203,G401 ./... + + # Run all rules except the provided + $ gas -exclude=G101 $GOPATH/src/github.com/example/project/... + +` +) + +var ( + // #nosec flag + flagIgnoreNoSec = flag.Bool("nosec", false, "Ignores #nosec comments when set") + + // format output + flagFormat = flag.String("fmt", "text", "Set output format. Valid options are: json, yaml, csv, junit-xml, html, or text") + + // output file + flagOutput = flag.String("out", "", "Set output file for results") + + // config file + flagConfig = flag.String("conf", "", "Path to optional config file") + + // quiet + flagQuiet = flag.Bool("quiet", false, "Only show output when errors are found") + + // rules to explicitly include + flagRulesInclude = flag.String("include", "", "Comma separated list of rules IDs to include. (see rule list)") + + // rules to explicitly exclude + flagRulesExclude = flag.String("exclude", "", "Comma separated list of rules IDs to exclude. (see rule list)") + + // log to file or stderr + flagLogfile = flag.String("log", "", "Log messages to file rather than stderr") + + // sort the issues by severity + flagSortIssues = flag.Bool("sort", true, "Sort issues by severity") + + logger *log.Logger +) + +// #nosec +func usage() { + + fmt.Fprintln(os.Stderr, usageText) + fmt.Fprint(os.Stderr, "OPTIONS:\n\n") + flag.PrintDefaults() + fmt.Fprint(os.Stderr, "\n\nRULES:\n\n") + + // sorted rule list for ease of reading + rl := rules.Generate() + keys := make([]string, 0, len(rl)) + for key := range rl { + keys = append(keys, key) + } + sort.Strings(keys) + for _, k := range keys { + v := rl[k] + fmt.Fprintf(os.Stderr, "\t%s: %s\n", k, v.Description) + } + fmt.Fprint(os.Stderr, "\n") +} + +func loadConfig(configFile string) (gas.Config, error) { + config := gas.NewConfig() + if configFile != "" { + file, err := os.Open(configFile) + if err != nil { + return nil, err + } + defer file.Close() + if _, err := config.ReadFrom(file); err != nil { + return nil, err + } + } + if *flagIgnoreNoSec { + config.SetGlobal("nosec", "true") + } + return config, nil +} + +func loadRules(include, exclude string) rules.RuleList { + var filters []rules.RuleFilter + if include != "" { + logger.Printf("including rules: %s", include) + including := strings.Split(include, ",") + filters = append(filters, rules.NewRuleFilter(false, including...)) + } else { + logger.Println("including rules: default") + } + + if exclude != "" { + logger.Printf("excluding rules: %s", exclude) + excluding := strings.Split(exclude, ",") + filters = append(filters, rules.NewRuleFilter(true, excluding...)) + } else { + logger.Println("excluding rules: default") + } + return rules.Generate(filters...) +} + +func saveOutput(filename, format string, issues []*gas.Issue, metrics *gas.Metrics) error { + if filename != "" { + outfile, err := os.Create(filename) + if err != nil { + return err + } + defer outfile.Close() + err = output.CreateReport(outfile, format, issues, metrics) + if err != nil { + return err + } + } else { + err := output.CreateReport(os.Stdout, format, issues, metrics) + if err != nil { + return err + } + } + return nil +} + +func main() { + + // Setup usage description + flag.Usage = usage + + // Parse command line arguments + flag.Parse() + + // Ensure at least one file was specified + if flag.NArg() == 0 { + fmt.Fprintf(os.Stderr, "\nError: FILE [FILE...] or './...' expected\n") // #nosec + flag.Usage() + os.Exit(1) + } + + // Setup logging + logWriter := os.Stderr + if *flagLogfile != "" { + var e error + logWriter, e = os.Create(*flagLogfile) + if e != nil { + flag.Usage() + log.Fatal(e) + } + } + logger = log.New(logWriter, "[gas] ", log.LstdFlags) + + // Load config + config, err := loadConfig(*flagConfig) + if err != nil { + logger.Fatal(err) + } + + // Load enabled rule definitions + ruleDefinitions := loadRules(*flagRulesInclude, *flagRulesExclude) + if len(ruleDefinitions) <= 0 { + logger.Fatal("cannot continue: no rules are configured.") + } + + // Create the analyzer + analyzer := gas.NewAnalyzer(config, logger) + analyzer.LoadRules(ruleDefinitions.Builders()...) + + vendor := regexp.MustCompile(`[\\/]vendor([\\/]|$)`) + + var packages []string + // Iterate over packages on the import paths + for _, pkg := range gotool.ImportPaths(flag.Args()) { + + // Skip vendor directory + if vendor.MatchString(pkg) { + continue + } + packages = append(packages, pkg) + } + + if err := analyzer.Process(packages...); err != nil { + logger.Fatal(err) + } + + // Collect the results + issues, metrics := analyzer.Report() + + issuesFound := len(issues) > 0 + // Exit quietly if nothing was found + if !issuesFound && *flagQuiet { + os.Exit(0) + } + + // Sort the issue by severity + if *flagSortIssues { + sortIssues(issues) + } + + // Create output report + if err := saveOutput(*flagOutput, *flagFormat, issues, metrics); err != nil { + logger.Fatal(err) + } + + // Finialize logging + logWriter.Close() // #nosec + + // Do we have an issue? If so exit 1 + if issuesFound { + os.Exit(1) + } +} diff --git a/tools/vendor/github.com/GoASTScanner/gas/cmd/gas/sort_issues.go b/tools/vendor/github.com/GoASTScanner/gas/cmd/gas/sort_issues.go new file mode 100644 index 00000000..5557f96e --- /dev/null +++ b/tools/vendor/github.com/GoASTScanner/gas/cmd/gas/sort_issues.go @@ -0,0 +1,20 @@ +package main + +import ( + "sort" + + "github.com/GoASTScanner/gas" +) + +type sortBySeverity []*gas.Issue + +func (s sortBySeverity) Len() int { return len(s) } + +func (s sortBySeverity) Less(i, j int) bool { return s[i].Severity > s[i].Severity } + +func (s sortBySeverity) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sortIssues sorts the issues by severity in descending order +func sortIssues(issues []*gas.Issue) { + sort.Sort(sortBySeverity(issues)) +} diff --git a/tools/vendor/github.com/GoASTScanner/gas/config.go b/tools/vendor/github.com/GoASTScanner/gas/config.go new file mode 100644 index 00000000..09b97d3e --- /dev/null +++ b/tools/vendor/github.com/GoASTScanner/gas/config.go @@ -0,0 +1,88 @@ +package gas + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" +) + +const ( + // Globals are applicable to all rules and used for general + // configuration settings for gas. + Globals = "global" +) + +// Config is used to provide configuration and customization to each of the rules. +type Config map[string]interface{} + +// NewConfig initializes a new configuration instance. The configuration data then +// needs to be loaded via c.ReadFrom(strings.NewReader("config data")) +// or from a *os.File. +func NewConfig() Config { + cfg := make(Config) + cfg[Globals] = make(map[string]string) + return cfg +} + +// ReadFrom implements the io.ReaderFrom interface. This +// should be used with io.Reader to load configuration from +//file or from string etc. +func (c Config) ReadFrom(r io.Reader) (int64, error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return int64(len(data)), err + } + if err = json.Unmarshal(data, &c); err != nil { + return int64(len(data)), err + } + return int64(len(data)), nil +} + +// WriteTo implements the io.WriteTo interface. This should +// be used to save or print out the configuration information. +func (c Config) WriteTo(w io.Writer) (int64, error) { + data, err := json.Marshal(c) + if err != nil { + return int64(len(data)), err + } + return io.Copy(w, bytes.NewReader(data)) +} + +// Get returns the configuration section for the supplied key +func (c Config) Get(section string) (interface{}, error) { + settings, found := c[section] + if !found { + return nil, fmt.Errorf("Section %s not in configuration", section) + } + return settings, nil +} + +// Set section in the configuration to specified value +func (c Config) Set(section string, value interface{}) { + c[section] = value +} + +// GetGlobal returns value associated with global configuration option +func (c Config) GetGlobal(option string) (string, error) { + if globals, ok := c[Globals]; ok { + if settings, ok := globals.(map[string]string); ok { + if value, ok := settings[option]; ok { + return value, nil + } + return "", fmt.Errorf("global setting for %s not found", option) + } + } + return "", fmt.Errorf("no global config options found") + +} + +// SetGlobal associates a value with a global configuration ooption +func (c Config) SetGlobal(option, value string) { + if globals, ok := c[Globals]; ok { + if settings, ok := globals.(map[string]string); ok { + settings[option] = value + } + } +} diff --git a/tools/vendor/github.com/GoASTScanner/gas/core/analyzer.go b/tools/vendor/github.com/GoASTScanner/gas/core/analyzer.go deleted file mode 100644 index 116cf783..00000000 --- a/tools/vendor/github.com/GoASTScanner/gas/core/analyzer.go +++ /dev/null @@ -1,235 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package core holds the central scanning logic used by GAS -package core - -import ( - "go/ast" - "go/importer" - "go/parser" - "go/token" - "go/types" - "log" - "os" - "path" - "reflect" - "strings" -) - -// ImportInfo is used to track aliased and initialization only imports. -type ImportInfo struct { - Imported map[string]string - Aliased map[string]string - InitOnly map[string]bool -} - -func NewImportInfo() *ImportInfo { - return &ImportInfo{ - make(map[string]string), - make(map[string]string), - make(map[string]bool), - } -} - -// The Context is populated with data parsed from the source code as it is scanned. -// It is passed through to all rule functions as they are called. Rules may use -// this data in conjunction withe the encoutered AST node. -type Context struct { - FileSet *token.FileSet - Comments ast.CommentMap - Info *types.Info - Pkg *types.Package - Root *ast.File - Config map[string]interface{} - Imports *ImportInfo -} - -// The Rule interface used by all rules supported by GAS. -type Rule interface { - Match(ast.Node, *Context) (*Issue, error) -} - -// A RuleSet maps lists of rules to the type of AST node they should be run on. -// The anaylzer will only invoke rules contained in the list associated with the -// type of AST node it is currently visiting. -type RuleSet map[reflect.Type][]Rule - -// Metrics used when reporting information about a scanning run. -type Metrics struct { - NumFiles int `json:"files"` - NumLines int `json:"lines"` - NumNosec int `json:"nosec"` - NumFound int `json:"found"` -} - -// The Analyzer object is the main object of GAS. It has methods traverse an AST -// and invoke the correct checking rules as on each node as required. -type Analyzer struct { - ignoreNosec bool - ruleset RuleSet - context *Context - logger *log.Logger - Issues []*Issue `json:"issues"` - Stats *Metrics `json:"metrics"` -} - -// NewAnalyzer builds a new anaylzer. -func NewAnalyzer(conf map[string]interface{}, logger *log.Logger) Analyzer { - if logger == nil { - logger = log.New(os.Stdout, "[gas]", 0) - } - a := Analyzer{ - ignoreNosec: conf["ignoreNosec"].(bool), - ruleset: make(RuleSet), - context: &Context{nil, nil, nil, nil, nil, nil, nil}, - logger: logger, - Issues: make([]*Issue, 0, 16), - Stats: &Metrics{0, 0, 0, 0}, - } - - // TODO(tkelsey): use the inc/exc lists - - return a -} - -func (gas *Analyzer) process(filename string, source interface{}) error { - mode := parser.ParseComments - gas.context.FileSet = token.NewFileSet() - root, err := parser.ParseFile(gas.context.FileSet, filename, source, mode) - if err == nil { - gas.context.Comments = ast.NewCommentMap(gas.context.FileSet, root, root.Comments) - gas.context.Root = root - - // here we get type info - gas.context.Info = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Selections: make(map[*ast.SelectorExpr]*types.Selection), - Scopes: make(map[ast.Node]*types.Scope), - Implicits: make(map[ast.Node]types.Object), - } - - conf := types.Config{Importer: importer.Default()} - gas.context.Pkg, err = conf.Check("pkg", gas.context.FileSet, []*ast.File{root}, gas.context.Info) - if err != nil { - // TODO(gm) Type checker not currently considering all files within a package - // see: issue #113 - gas.logger.Printf(`Error during type checking: "%s"`, err) - err = nil - } - - gas.context.Imports = NewImportInfo() - for _, pkg := range gas.context.Pkg.Imports() { - gas.context.Imports.Imported[pkg.Path()] = pkg.Name() - } - ast.Walk(gas, root) - gas.Stats.NumFiles++ - } - return err -} - -// AddRule adds a rule into a rule set list mapped to the given AST node's type. -// The node is only needed for its type and is not otherwise used. -func (gas *Analyzer) AddRule(r Rule, nodes []ast.Node) { - for _, n := range nodes { - t := reflect.TypeOf(n) - if val, ok := gas.ruleset[t]; ok { - gas.ruleset[t] = append(val, r) - } else { - gas.ruleset[t] = []Rule{r} - } - } -} - -// Process reads in a source file, convert it to an AST and traverse it. -// Rule methods added with AddRule will be invoked as necessary. -func (gas *Analyzer) Process(filename string) error { - err := gas.process(filename, nil) - fun := func(f *token.File) bool { - gas.Stats.NumLines += f.LineCount() - return true - } - gas.context.FileSet.Iterate(fun) - return err -} - -// ProcessSource will convert a source code string into an AST and traverse it. -// Rule methods added with AddRule will be invoked as necessary. The string is -// identified by the filename given but no file IO will be done. -func (gas *Analyzer) ProcessSource(filename string, source string) error { - err := gas.process(filename, source) - fun := func(f *token.File) bool { - gas.Stats.NumLines += f.LineCount() - return true - } - gas.context.FileSet.Iterate(fun) - return err -} - -// ignore a node (and sub-tree) if it is tagged with a "#nosec" comment -func (gas *Analyzer) ignore(n ast.Node) bool { - if groups, ok := gas.context.Comments[n]; ok && !gas.ignoreNosec { - for _, group := range groups { - if strings.Contains(group.Text(), "#nosec") { - gas.Stats.NumNosec++ - return true - } - } - } - return false -} - -// Visit runs the GAS visitor logic over an AST created by parsing go code. -// Rule methods added with AddRule will be invoked as necessary. -func (gas *Analyzer) Visit(n ast.Node) ast.Visitor { - if !gas.ignore(n) { - - // Track aliased and initialization imports - if imported, ok := n.(*ast.ImportSpec); ok { - path := strings.Trim(imported.Path.Value, `"`) - if imported.Name != nil { - if imported.Name.Name == "_" { - // Initialization import - gas.context.Imports.InitOnly[path] = true - } else { - // Aliased import - gas.context.Imports.Aliased[path] = imported.Name.Name - } - } - // unsafe is not included in Package.Imports() - if path == "unsafe" { - gas.context.Imports.Imported[path] = path - } - } - - if val, ok := gas.ruleset[reflect.TypeOf(n)]; ok { - for _, rule := range val { - ret, err := rule.Match(n, gas.context) - if err != nil { - file, line := GetLocation(n, gas.context) - file = path.Base(file) - gas.logger.Printf("Rule error: %v => %s (%s:%d)\n", reflect.TypeOf(rule), err, file, line) - } - if ret != nil { - gas.Issues = append(gas.Issues, ret) - gas.Stats.NumFound++ - } - } - } - return gas - } - return nil -} diff --git a/tools/vendor/github.com/GoASTScanner/gas/core/select.go b/tools/vendor/github.com/GoASTScanner/gas/core/select.go deleted file mode 100644 index e11c946e..00000000 --- a/tools/vendor/github.com/GoASTScanner/gas/core/select.go +++ /dev/null @@ -1,404 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package core - -import ( - "fmt" - "go/ast" - "reflect" -) - -// SelectFunc is like an AST visitor, but has a richer interface. It -// is called with the current ast.Node being visitied and that nodes depth in -// the tree. The function can return true to continue traversing the tree, or -// false to end traversal here. -type SelectFunc func(ast.Node, int) bool - -func walkIdentList(list []*ast.Ident, depth int, fun SelectFunc) { - for _, x := range list { - depthWalk(x, depth, fun) - } -} - -func walkExprList(list []ast.Expr, depth int, fun SelectFunc) { - for _, x := range list { - depthWalk(x, depth, fun) - } -} - -func walkStmtList(list []ast.Stmt, depth int, fun SelectFunc) { - for _, x := range list { - depthWalk(x, depth, fun) - } -} - -func walkDeclList(list []ast.Decl, depth int, fun SelectFunc) { - for _, x := range list { - depthWalk(x, depth, fun) - } -} - -func depthWalk(node ast.Node, depth int, fun SelectFunc) { - if !fun(node, depth) { - return - } - - switch n := node.(type) { - // Comments and fields - case *ast.Comment: - - case *ast.CommentGroup: - for _, c := range n.List { - depthWalk(c, depth+1, fun) - } - - case *ast.Field: - if n.Doc != nil { - depthWalk(n.Doc, depth+1, fun) - } - walkIdentList(n.Names, depth+1, fun) - depthWalk(n.Type, depth+1, fun) - if n.Tag != nil { - depthWalk(n.Tag, depth+1, fun) - } - if n.Comment != nil { - depthWalk(n.Comment, depth+1, fun) - } - - case *ast.FieldList: - for _, f := range n.List { - depthWalk(f, depth+1, fun) - } - - // Expressions - case *ast.BadExpr, *ast.Ident, *ast.BasicLit: - - case *ast.Ellipsis: - if n.Elt != nil { - depthWalk(n.Elt, depth+1, fun) - } - - case *ast.FuncLit: - depthWalk(n.Type, depth+1, fun) - depthWalk(n.Body, depth+1, fun) - - case *ast.CompositeLit: - if n.Type != nil { - depthWalk(n.Type, depth+1, fun) - } - walkExprList(n.Elts, depth+1, fun) - - case *ast.ParenExpr: - depthWalk(n.X, depth+1, fun) - - case *ast.SelectorExpr: - depthWalk(n.X, depth+1, fun) - depthWalk(n.Sel, depth+1, fun) - - case *ast.IndexExpr: - depthWalk(n.X, depth+1, fun) - depthWalk(n.Index, depth+1, fun) - - case *ast.SliceExpr: - depthWalk(n.X, depth+1, fun) - if n.Low != nil { - depthWalk(n.Low, depth+1, fun) - } - if n.High != nil { - depthWalk(n.High, depth+1, fun) - } - if n.Max != nil { - depthWalk(n.Max, depth+1, fun) - } - - case *ast.TypeAssertExpr: - depthWalk(n.X, depth+1, fun) - if n.Type != nil { - depthWalk(n.Type, depth+1, fun) - } - - case *ast.CallExpr: - depthWalk(n.Fun, depth+1, fun) - walkExprList(n.Args, depth+1, fun) - - case *ast.StarExpr: - depthWalk(n.X, depth+1, fun) - - case *ast.UnaryExpr: - depthWalk(n.X, depth+1, fun) - - case *ast.BinaryExpr: - depthWalk(n.X, depth+1, fun) - depthWalk(n.Y, depth+1, fun) - - case *ast.KeyValueExpr: - depthWalk(n.Key, depth+1, fun) - depthWalk(n.Value, depth+1, fun) - - // Types - case *ast.ArrayType: - if n.Len != nil { - depthWalk(n.Len, depth+1, fun) - } - depthWalk(n.Elt, depth+1, fun) - - case *ast.StructType: - depthWalk(n.Fields, depth+1, fun) - - case *ast.FuncType: - if n.Params != nil { - depthWalk(n.Params, depth+1, fun) - } - if n.Results != nil { - depthWalk(n.Results, depth+1, fun) - } - - case *ast.InterfaceType: - depthWalk(n.Methods, depth+1, fun) - - case *ast.MapType: - depthWalk(n.Key, depth+1, fun) - depthWalk(n.Value, depth+1, fun) - - case *ast.ChanType: - depthWalk(n.Value, depth+1, fun) - - // Statements - case *ast.BadStmt: - - case *ast.DeclStmt: - depthWalk(n.Decl, depth+1, fun) - - case *ast.EmptyStmt: - - case *ast.LabeledStmt: - depthWalk(n.Label, depth+1, fun) - depthWalk(n.Stmt, depth+1, fun) - - case *ast.ExprStmt: - depthWalk(n.X, depth+1, fun) - - case *ast.SendStmt: - depthWalk(n.Chan, depth+1, fun) - depthWalk(n.Value, depth+1, fun) - - case *ast.IncDecStmt: - depthWalk(n.X, depth+1, fun) - - case *ast.AssignStmt: - walkExprList(n.Lhs, depth+1, fun) - walkExprList(n.Rhs, depth+1, fun) - - case *ast.GoStmt: - depthWalk(n.Call, depth+1, fun) - - case *ast.DeferStmt: - depthWalk(n.Call, depth+1, fun) - - case *ast.ReturnStmt: - walkExprList(n.Results, depth+1, fun) - - case *ast.BranchStmt: - if n.Label != nil { - depthWalk(n.Label, depth+1, fun) - } - - case *ast.BlockStmt: - walkStmtList(n.List, depth+1, fun) - - case *ast.IfStmt: - if n.Init != nil { - depthWalk(n.Init, depth+1, fun) - } - depthWalk(n.Cond, depth+1, fun) - depthWalk(n.Body, depth+1, fun) - if n.Else != nil { - depthWalk(n.Else, depth+1, fun) - } - - case *ast.CaseClause: - walkExprList(n.List, depth+1, fun) - walkStmtList(n.Body, depth+1, fun) - - case *ast.SwitchStmt: - if n.Init != nil { - depthWalk(n.Init, depth+1, fun) - } - if n.Tag != nil { - depthWalk(n.Tag, depth+1, fun) - } - depthWalk(n.Body, depth+1, fun) - - case *ast.TypeSwitchStmt: - if n.Init != nil { - depthWalk(n.Init, depth+1, fun) - } - depthWalk(n.Assign, depth+1, fun) - depthWalk(n.Body, depth+1, fun) - - case *ast.CommClause: - if n.Comm != nil { - depthWalk(n.Comm, depth+1, fun) - } - walkStmtList(n.Body, depth+1, fun) - - case *ast.SelectStmt: - depthWalk(n.Body, depth+1, fun) - - case *ast.ForStmt: - if n.Init != nil { - depthWalk(n.Init, depth+1, fun) - } - if n.Cond != nil { - depthWalk(n.Cond, depth+1, fun) - } - if n.Post != nil { - depthWalk(n.Post, depth+1, fun) - } - depthWalk(n.Body, depth+1, fun) - - case *ast.RangeStmt: - if n.Key != nil { - depthWalk(n.Key, depth+1, fun) - } - if n.Value != nil { - depthWalk(n.Value, depth+1, fun) - } - depthWalk(n.X, depth+1, fun) - depthWalk(n.Body, depth+1, fun) - - // Declarations - case *ast.ImportSpec: - if n.Doc != nil { - depthWalk(n.Doc, depth+1, fun) - } - if n.Name != nil { - depthWalk(n.Name, depth+1, fun) - } - depthWalk(n.Path, depth+1, fun) - if n.Comment != nil { - depthWalk(n.Comment, depth+1, fun) - } - - case *ast.ValueSpec: - if n.Doc != nil { - depthWalk(n.Doc, depth+1, fun) - } - walkIdentList(n.Names, depth+1, fun) - if n.Type != nil { - depthWalk(n.Type, depth+1, fun) - } - walkExprList(n.Values, depth+1, fun) - if n.Comment != nil { - depthWalk(n.Comment, depth+1, fun) - } - - case *ast.TypeSpec: - if n.Doc != nil { - depthWalk(n.Doc, depth+1, fun) - } - depthWalk(n.Name, depth+1, fun) - depthWalk(n.Type, depth+1, fun) - if n.Comment != nil { - depthWalk(n.Comment, depth+1, fun) - } - - case *ast.BadDecl: - - case *ast.GenDecl: - if n.Doc != nil { - depthWalk(n.Doc, depth+1, fun) - } - for _, s := range n.Specs { - depthWalk(s, depth+1, fun) - } - - case *ast.FuncDecl: - if n.Doc != nil { - depthWalk(n.Doc, depth+1, fun) - } - if n.Recv != nil { - depthWalk(n.Recv, depth+1, fun) - } - depthWalk(n.Name, depth+1, fun) - depthWalk(n.Type, depth+1, fun) - if n.Body != nil { - depthWalk(n.Body, depth+1, fun) - } - - // Files and packages - case *ast.File: - if n.Doc != nil { - depthWalk(n.Doc, depth+1, fun) - } - depthWalk(n.Name, depth+1, fun) - walkDeclList(n.Decls, depth+1, fun) - // don't walk n.Comments - they have been - // visited already through the individual - // nodes - - case *ast.Package: - for _, f := range n.Files { - depthWalk(f, depth+1, fun) - } - - default: - panic(fmt.Sprintf("gas.depthWalk: unexpected node type %T", n)) - } -} - -type Selector interface { - Final(ast.Node) - Partial(ast.Node) bool -} - -func Select(s Selector, n ast.Node, bits ...reflect.Type) { - fun := func(n ast.Node, d int) bool { - if d < len(bits) && reflect.TypeOf(n) == bits[d] { - if d == len(bits)-1 { - s.Final(n) - return false - } else if s.Partial(n) { - return true - } - } - return false - } - depthWalk(n, 0, fun) -} - -// SimpleSelect will try to match a path through a sub-tree starting at a given AST node. -// The type of each node in the path at a given depth must match its entry in list of -// node types given. -func SimpleSelect(n ast.Node, bits ...reflect.Type) ast.Node { - var found ast.Node - fun := func(n ast.Node, d int) bool { - if found != nil { - return false // short cut logic if we have found a match - } - - if d < len(bits) && reflect.TypeOf(n) == bits[d] { - if d == len(bits)-1 { - found = n - return false - } - return true - } - return false - } - - depthWalk(n, 0, fun) - return found -} diff --git a/tools/vendor/github.com/GoASTScanner/gas/core/helpers.go b/tools/vendor/github.com/GoASTScanner/gas/helpers.go similarity index 77% rename from tools/vendor/github.com/GoASTScanner/gas/core/helpers.go rename to tools/vendor/github.com/GoASTScanner/gas/helpers.go index 8ccfbbf2..8bd1f5c5 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/core/helpers.go +++ b/tools/vendor/github.com/GoASTScanner/gas/helpers.go @@ -12,41 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -package core +package gas import ( "fmt" "go/ast" "go/token" "go/types" - "reflect" - "regexp" "strconv" - "strings" ) -// helpfull "canned" matching routines ---------------------------------------- - -func selectName(n ast.Node, s reflect.Type) (string, bool) { - t := reflect.TypeOf(&ast.SelectorExpr{}) - if node, ok := SimpleSelect(n, s, t).(*ast.SelectorExpr); ok { - t = reflect.TypeOf(&ast.Ident{}) - if ident, ok := SimpleSelect(node.X, t).(*ast.Ident); ok { - return strings.Join([]string{ident.Name, node.Sel.Name}, "."), ok - } - } - return "", false -} - -// MatchCall will match an ast.CallNode if its method name obays the given regex. -func MatchCall(n ast.Node, r *regexp.Regexp) *ast.CallExpr { - t := reflect.TypeOf(&ast.CallExpr{}) - if name, ok := selectName(n, t); ok && r.MatchString(name) { - return n.(*ast.CallExpr) - } - return nil -} - // MatchCallByPackage ensures that the specified package is imported, // adjusts the name for any aliases and ignores cases that are // initialization only imports. @@ -100,11 +75,13 @@ func MatchCallByType(n ast.Node, ctx *Context, requiredType string, calls ...str return nil, false } -// MatchCompLit will match an ast.CompositeLit if its string value obays the given regex. -func MatchCompLit(n ast.Node, r *regexp.Regexp) *ast.CompositeLit { - t := reflect.TypeOf(&ast.CompositeLit{}) - if name, ok := selectName(n, t); ok && r.MatchString(name) { - return n.(*ast.CompositeLit) +// MatchCompLit will match an ast.CompositeLit based on the supplied type +func MatchCompLit(n ast.Node, ctx *Context, required string) *ast.CompositeLit { + if complit, ok := n.(*ast.CompositeLit); ok { + typeOf := ctx.Info.TypeOf(complit) + if typeOf.String() == required { + return complit + } } return nil } @@ -117,7 +94,7 @@ func GetInt(n ast.Node) (int64, error) { return 0, fmt.Errorf("Unexpected AST node type: %T", n) } -// GetInt will read and return a float value from an ast.BasicLit +// GetFloat will read and return a float value from an ast.BasicLit func GetFloat(n ast.Node) (float64, error) { if node, ok := n.(*ast.BasicLit); ok && node.Kind == token.FLOAT { return strconv.ParseFloat(node.Value, 64) @@ -125,7 +102,7 @@ func GetFloat(n ast.Node) (float64, error) { return 0.0, fmt.Errorf("Unexpected AST node type: %T", n) } -// GetInt will read and return a char value from an ast.BasicLit +// GetChar will read and return a char value from an ast.BasicLit func GetChar(n ast.Node) (byte, error) { if node, ok := n.(*ast.BasicLit); ok && node.Kind == token.CHAR { return node.Value[0], nil @@ -133,7 +110,7 @@ func GetChar(n ast.Node) (byte, error) { return 0, fmt.Errorf("Unexpected AST node type: %T", n) } -// GetInt will read and return a string value from an ast.BasicLit +// GetString will read and return a string value from an ast.BasicLit func GetString(n ast.Node) (string, error) { if node, ok := n.(*ast.BasicLit); ok && node.Kind == token.STRING { return strconv.Unquote(node.Value) @@ -170,12 +147,10 @@ func GetCallInfo(n ast.Node, ctx *Context) (string, string, error) { t := ctx.Info.TypeOf(expr) if t != nil { return t.String(), fn.Sel.Name, nil - } else { - return "undefined", fn.Sel.Name, fmt.Errorf("missing type info") } - } else { - return expr.Name, fn.Sel.Name, nil + return "undefined", fn.Sel.Name, fmt.Errorf("missing type info") } + return expr.Name, fn.Sel.Name, nil } case *ast.Ident: return ctx.Pkg.Name(), fn.Name, nil @@ -205,7 +180,7 @@ func GetImportedName(path string, ctx *Context) (string, bool) { // GetImportPath resolves the full import path of an identifer based on // the imports in the current context. func GetImportPath(name string, ctx *Context) (string, bool) { - for path, _ := range ctx.Imports.Imported { + for path := range ctx.Imports.Imported { if imported, ok := GetImportedName(path, ctx); ok && imported == name { return path, true } diff --git a/tools/vendor/github.com/GoASTScanner/gas/import_tracker.go b/tools/vendor/github.com/GoASTScanner/gas/import_tracker.go new file mode 100644 index 00000000..0f948fb6 --- /dev/null +++ b/tools/vendor/github.com/GoASTScanner/gas/import_tracker.go @@ -0,0 +1,67 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gas + +import ( + "go/ast" + "go/types" + "strings" +) + +// ImportTracker is used to normalize the packages that have been imported +// by a source file. It is able to differentiate between plain imports, aliased +// imports and init only imports. +type ImportTracker struct { + Imported map[string]string + Aliased map[string]string + InitOnly map[string]bool +} + +// NewImportTracker creates an empty Import tracker instance +func NewImportTracker() *ImportTracker { + return &ImportTracker{ + make(map[string]string), + make(map[string]string), + make(map[string]bool), + } +} + +// TrackPackages tracks all the imports used by the supplied packages +func (t *ImportTracker) TrackPackages(pkgs ...*types.Package) { + for _, pkg := range pkgs { + t.Imported[pkg.Path()] = pkg.Name() + // Transient imports + //for _, imp := range pkg.Imports() { + // t.Imported[imp.Path()] = imp.Name() + //} + } +} + +// TrackImport tracks imports and handles the 'unsafe' import +func (t *ImportTracker) TrackImport(n ast.Node) { + if imported, ok := n.(*ast.ImportSpec); ok { + path := strings.Trim(imported.Path.Value, `"`) + if imported.Name != nil { + if imported.Name.Name == "_" { + // Initialization only import + t.InitOnly[path] = true + } else { + // Aliased import + t.Aliased[path] = imported.Name.Name + } + } + if path == "unsafe" { + t.Imported[path] = path + } + } +} diff --git a/tools/vendor/github.com/GoASTScanner/gas/core/issue.go b/tools/vendor/github.com/GoASTScanner/gas/issue.go similarity index 85% rename from tools/vendor/github.com/GoASTScanner/gas/core/issue.go rename to tools/vendor/github.com/GoASTScanner/gas/issue.go index 22c50448..2113529a 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/core/issue.go +++ b/tools/vendor/github.com/GoASTScanner/gas/issue.go @@ -11,32 +11,37 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package core + +package gas import ( "encoding/json" "fmt" "go/ast" "os" + "strconv" ) // Score type used by severity and confidence values type Score int const ( - Low Score = iota // Low value - Medium // Medium value - High // High value + // Low severity or confidence + Low Score = iota + // Medium severity or confidence + Medium + // High severity or confidence + High ) -// An Issue is returnd by a GAS rule if it discovers an issue with the scanned code. +// Issue is returnd by a GAS rule if it discovers an issue with the scanned code. type Issue struct { Severity Score `json:"severity"` // issue severity (how problematic it is) Confidence Score `json:"confidence"` // issue confidence (how sure we are we found it) What string `json:"details"` // Human readable explanation File string `json:"file"` // File name we found it in Code string `json:"code"` // Impacted code line - Line int `json:"line"` // Line number in file + Line string `json:"line"` // Line number in file } // MetaData is embedded in all GAS rules. The Severity, Confidence and What message @@ -71,7 +76,7 @@ func codeSnippet(file *os.File, start int64, end int64, n ast.Node) (string, err } size := (int)(end - start) // Go bug, os.File.Read should return int64 ... - file.Seek(start, 0) + file.Seek(start, 0) // #nosec buf := make([]byte, size) if nread, err := file.Read(buf); err != nil || nread != size { @@ -85,7 +90,12 @@ func NewIssue(ctx *Context, node ast.Node, desc string, severity Score, confiden var code string fobj := ctx.FileSet.File(node.Pos()) name := fobj.Name() - line := fobj.Line(node.Pos()) + + start, end := fobj.Line(node.Pos()), fobj.Line(node.End()) + line := strconv.Itoa(start) + if start != end { + line = fmt.Sprintf("%d-%d", start, end) + } if file, err := os.Open(fobj.Name()); err == nil { defer file.Close() diff --git a/tools/vendor/github.com/GoASTScanner/gas/main.go b/tools/vendor/github.com/GoASTScanner/gas/main.go deleted file mode 100644 index 1f8d7740..00000000 --- a/tools/vendor/github.com/GoASTScanner/gas/main.go +++ /dev/null @@ -1,293 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "flag" - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - "sort" - "strings" - - gas "github.com/GoASTScanner/gas/core" - "github.com/GoASTScanner/gas/output" -) - -type recursion bool - -const ( - recurse recursion = true - noRecurse recursion = false -) - -var ( - // #nosec flag - flagIgnoreNoSec = flag.Bool("nosec", false, "Ignores #nosec comments when set") - - // format output - flagFormat = flag.String("fmt", "text", "Set output format. Valid options are: json, csv, html, or text") - - // output file - flagOutput = flag.String("out", "", "Set output file for results") - - // config file - flagConfig = flag.String("conf", "", "Path to optional config file") - - // quiet - flagQuiet = flag.Bool("quiet", false, "Only show output when errors are found") - - usageText = ` -GAS - Go AST Scanner - -Gas analyzes Go source code to look for common programming mistakes that -can lead to security problems. - -USAGE: - - # Check a single Go file - $ gas example.go - - # Check all files under the current directory and save results in - # json format. - $ gas -fmt=json -out=results.json ./... - - # Run a specific set of rules (by default all rules will be run): - $ gas -include=G101,G203,G401 ./... - - # Run all rules except the provided - $ gas -exclude=G101 ./... - -` - - logger *log.Logger -) - -func extendConfList(conf map[string]interface{}, name string, inputStr string) { - if inputStr == "" { - conf[name] = []string{} - } else { - input := strings.Split(inputStr, ",") - if val, ok := conf[name]; ok { - if data, ok := val.(*[]string); ok { - conf[name] = append(*data, input...) - } else { - logger.Fatal("Config item must be a string list: ", name) - } - } else { - conf[name] = input - } - } -} - -func buildConfig(incRules string, excRules string) map[string]interface{} { - config := make(map[string]interface{}) - if flagConfig != nil && *flagConfig != "" { // parse config if we have one - if data, err := ioutil.ReadFile(*flagConfig); err == nil { - if err := json.Unmarshal(data, &(config)); err != nil { - logger.Fatal("Could not parse JSON config: ", *flagConfig, ": ", err) - } - } else { - logger.Fatal("Could not read config file: ", *flagConfig) - } - } - - // add in CLI include and exclude data - extendConfList(config, "include", incRules) - extendConfList(config, "exclude", excRules) - - // override ignoreNosec if given on CLI - if flagIgnoreNoSec != nil { - config["ignoreNosec"] = *flagIgnoreNoSec - } else { - val, ok := config["ignoreNosec"] - if !ok { - config["ignoreNosec"] = false - } else if _, ok := val.(bool); !ok { - logger.Fatal("Config value must be a bool: 'ignoreNosec'") - } - } - - return config -} - -// #nosec -func usage() { - - fmt.Fprintln(os.Stderr, usageText) - fmt.Fprint(os.Stderr, "OPTIONS:\n\n") - flag.PrintDefaults() - fmt.Fprint(os.Stderr, "\n\nRULES:\n\n") - - // sorted rule list for eas of reading - rl := GetFullRuleList() - keys := make([]string, 0, len(rl)) - for key := range rl { - keys = append(keys, key) - } - sort.Strings(keys) - for _, k := range keys { - v := rl[k] - fmt.Fprintf(os.Stderr, "\t%s: %s\n", k, v.description) - } - fmt.Fprint(os.Stderr, "\n") -} - -func main() { - - // Setup usage description - flag.Usage = usage - - // Exclude files - excluded := newFileList("*_test.go") - flag.Var(excluded, "skip", "File pattern to exclude from scan. Uses simple * globs and requires full or partial match") - - incRules := "" - flag.StringVar(&incRules, "include", "", "Comma separated list of rules IDs to include. (see rule list)") - - excRules := "" - flag.StringVar(&excRules, "exclude", "", "Comma separated list of rules IDs to exclude. (see rule list)") - - // Custom commands / utilities to run instead of default analyzer - tools := newUtils() - flag.Var(tools, "tool", "GAS utilities to assist with rule development") - - // Setup logging - logger = log.New(os.Stderr, "[gas] ", log.LstdFlags) - - // Parse command line arguments - flag.Parse() - - // Ensure at least one file was specified - if flag.NArg() == 0 { - - fmt.Fprintf(os.Stderr, "\nError: FILE [FILE...] or './...' expected\n") - flag.Usage() - os.Exit(1) - } - - // Run utils instead of analysis - if len(tools.call) > 0 { - tools.run(flag.Args()...) - os.Exit(0) - } - - // Setup analyzer - config := buildConfig(incRules, excRules) - analyzer := gas.NewAnalyzer(config, logger) - AddRules(&analyzer, config) - - toAnalyze := getFilesToAnalyze(flag.Args(), excluded) - - for _, file := range toAnalyze { - logger.Printf(`Processing "%s"...`, file) - if err := analyzer.Process(file); err != nil { - logger.Printf(`Failed to process: "%s"`, file) - logger.Println(err) - logger.Fatalf(`Halting execution.`) - } - } - - issuesFound := len(analyzer.Issues) > 0 - // Exit quietly if nothing was found - if !issuesFound && *flagQuiet { - os.Exit(0) - } - - // Create output report - if *flagOutput != "" { - outfile, err := os.Create(*flagOutput) - if err != nil { - logger.Fatalf("Couldn't open: %s for writing. Reason - %s", *flagOutput, err) - } - defer outfile.Close() - output.CreateReport(outfile, *flagFormat, &analyzer) - } else { - output.CreateReport(os.Stdout, *flagFormat, &analyzer) - } - - // Do we have an issue? If so exit 1 - if issuesFound { - os.Exit(1) - } -} - -// getFilesToAnalyze lists all files -func getFilesToAnalyze(paths []string, excluded *fileList) []string { - //log.Println("getFilesToAnalyze: start") - var toAnalyze []string - for _, relativePath := range paths { - //log.Printf("getFilesToAnalyze: processing \"%s\"\n", path) - // get the absolute path before doing anything else - path, err := filepath.Abs(relativePath) - if err != nil { - log.Fatal(err) - } - if filepath.Base(relativePath) == "..." { - toAnalyze = append( - toAnalyze, - listFiles(filepath.Dir(path), recurse, excluded)..., - ) - } else { - var ( - finfo os.FileInfo - err error - ) - if finfo, err = os.Stat(path); err != nil { - logger.Fatal(err) - } - if !finfo.IsDir() { - if shouldInclude(path, excluded) { - toAnalyze = append(toAnalyze, path) - } - } else { - toAnalyze = listFiles(path, noRecurse, excluded) - } - } - } - //log.Println("getFilesToAnalyze: end") - return toAnalyze -} - -// listFiles returns a list of all files found that pass the shouldInclude check. -// If doRecursiveWalk it true, it will walk the tree rooted at absPath, otherwise it -// will only include files directly within the dir referenced by absPath. -func listFiles(absPath string, doRecursiveWalk recursion, excluded *fileList) []string { - var files []string - - walk := func(path string, info os.FileInfo, err error) error { - if info.IsDir() && doRecursiveWalk == noRecurse { - return filepath.SkipDir - } - if shouldInclude(path, excluded) { - files = append(files, path) - } - return nil - } - - if err := filepath.Walk(absPath, walk); err != nil { - log.Fatal(err) - } - return files -} - -// shouldInclude checks if a specific path which is expected to reference -// a regular file should be included -func shouldInclude(path string, excluded *fileList) bool { - return filepath.Ext(path) == ".go" && !excluded.Contains(path) -} diff --git a/tools/vendor/github.com/GoASTScanner/gas/output/formatter.go b/tools/vendor/github.com/GoASTScanner/gas/output/formatter.go index f10a973d..04851615 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/output/formatter.go +++ b/tools/vendor/github.com/GoASTScanner/gas/output/formatter.go @@ -17,21 +17,30 @@ package output import ( "encoding/csv" "encoding/json" + "encoding/xml" htmlTemplate "html/template" "io" - "strconv" plainTemplate "text/template" - gas "github.com/GoASTScanner/gas/core" + "github.com/GoASTScanner/gas" + "gopkg.in/yaml.v2" ) -// The output format for reported issues +// ReportFormat enumrates the output format for reported issues type ReportFormat int const ( + // ReportText is the default format that writes to stdout ReportText ReportFormat = iota // Plain text format - ReportJSON // Json format - ReportCSV // CSV format + + // ReportJSON set the output format to json + ReportJSON // Json format + + // ReportCSV set the output format to csv + ReportCSV // CSV format + + // ReportJUnitXML set the output format to junit xml + ReportJUnitXML // JUnit XML format ) var text = `Results: @@ -48,13 +57,28 @@ Summary: ` -func CreateReport(w io.Writer, format string, data *gas.Analyzer) error { +type reportInfo struct { + Issues []*gas.Issue + Stats *gas.Metrics +} + +// CreateReport generates a report based for the supplied issues and metrics given +// the specified format. The formats currently accepted are: json, csv, html and text. +func CreateReport(w io.Writer, format string, issues []*gas.Issue, metrics *gas.Metrics) error { + data := &reportInfo{ + Issues: issues, + Stats: metrics, + } var err error switch format { case "json": err = reportJSON(w, data) + case "yaml": + err = reportYAML(w, data) case "csv": err = reportCSV(w, data) + case "junit-xml": + err = reportJUnitXML(w, data) case "html": err = reportFromHTMLTemplate(w, html, data) case "text": @@ -65,7 +89,7 @@ func CreateReport(w io.Writer, format string, data *gas.Analyzer) error { return err } -func reportJSON(w io.Writer, data *gas.Analyzer) error { +func reportJSON(w io.Writer, data *reportInfo) error { raw, err := json.MarshalIndent(data, "", "\t") if err != nil { panic(err) @@ -78,13 +102,22 @@ func reportJSON(w io.Writer, data *gas.Analyzer) error { return err } -func reportCSV(w io.Writer, data *gas.Analyzer) error { +func reportYAML(w io.Writer, data *reportInfo) error { + raw, err := yaml.Marshal(data) + if err != nil { + return err + } + _, err = w.Write(raw) + return err +} + +func reportCSV(w io.Writer, data *reportInfo) error { out := csv.NewWriter(w) defer out.Flush() for _, issue := range data.Issues { err := out.Write([]string{ issue.File, - strconv.Itoa(issue.Line), + issue.Line, issue.What, issue.Severity.String(), issue.Confidence.String(), @@ -97,7 +130,26 @@ func reportCSV(w io.Writer, data *gas.Analyzer) error { return nil } -func reportFromPlaintextTemplate(w io.Writer, reportTemplate string, data *gas.Analyzer) error { +func reportJUnitXML(w io.Writer, data *reportInfo) error { + groupedData := groupDataByRules(data) + junitXMLStruct := createJUnitXMLStruct(groupedData) + + raw, err := xml.MarshalIndent(junitXMLStruct, "", "\t") + if err != nil { + return err + } + + xmlHeader := []byte("\n") + raw = append(xmlHeader, raw...) + _, err = w.Write(raw) + if err != nil { + return err + } + + return nil +} + +func reportFromPlaintextTemplate(w io.Writer, reportTemplate string, data *reportInfo) error { t, e := plainTemplate.New("gas").Parse(reportTemplate) if e != nil { return e @@ -106,7 +158,7 @@ func reportFromPlaintextTemplate(w io.Writer, reportTemplate string, data *gas.A return t.Execute(w, data) } -func reportFromHTMLTemplate(w io.Writer, reportTemplate string, data *gas.Analyzer) error { +func reportFromHTMLTemplate(w io.Writer, reportTemplate string, data *reportInfo) error { t, e := htmlTemplate.New("gas").Parse(reportTemplate) if e != nil { return e diff --git a/tools/vendor/github.com/GoASTScanner/gas/output/junit_xml_format.go b/tools/vendor/github.com/GoASTScanner/gas/output/junit_xml_format.go new file mode 100644 index 00000000..2fd5c39a --- /dev/null +++ b/tools/vendor/github.com/GoASTScanner/gas/output/junit_xml_format.go @@ -0,0 +1,74 @@ +package output + +import ( + "encoding/xml" + htmlLib "html" + "strconv" + + "github.com/GoASTScanner/gas" +) + +type junitXMLReport struct { + XMLName xml.Name `xml:"testsuites"` + Testsuites []testsuite `xml:"testsuite"` +} + +type testsuite struct { + XMLName xml.Name `xml:"testsuite"` + Name string `xml:"name,attr"` + Tests int `xml:"tests,attr"` + Testcases []testcase `xml:"testcase"` +} + +type testcase struct { + XMLName xml.Name `xml:"testcase"` + Name string `xml:"name,attr"` + Failure failure `xml:"failure"` +} + +type failure struct { + XMLName xml.Name `xml:"failure"` + Message string `xml:"message,attr"` + Text string `xml:",innerxml"` +} + +func generatePlaintext(issue *gas.Issue) string { + return "Results:\n" + + "[" + issue.File + ":" + issue.Line + "] - " + + issue.What + " (Confidence: " + strconv.Itoa(int(issue.Confidence)) + + ", Severity: " + strconv.Itoa(int(issue.Severity)) + ")\n" + "> " + htmlLib.EscapeString(issue.Code) +} + +func groupDataByRules(data *reportInfo) map[string][]*gas.Issue { + groupedData := make(map[string][]*gas.Issue) + for _, issue := range data.Issues { + if _, ok := groupedData[issue.What]; ok { + groupedData[issue.What] = append(groupedData[issue.What], issue) + } else { + groupedData[issue.What] = []*gas.Issue{issue} + } + } + return groupedData +} + +func createJUnitXMLStruct(groupedData map[string][]*gas.Issue) junitXMLReport { + var xmlReport junitXMLReport + for what, issues := range groupedData { + testsuite := testsuite{ + Name: what, + Tests: len(issues), + } + for _, issue := range issues { + testcase := testcase{ + Name: issue.File, + Failure: failure{ + Message: "Found 1 vulnerability. See stacktrace for details.", + Text: generatePlaintext(issue), + }, + } + testsuite.Testcases = append(testsuite.Testcases, testcase) + } + xmlReport.Testsuites = append(xmlReport.Testsuites, testsuite) + } + return xmlReport +} diff --git a/tools/vendor/github.com/GoASTScanner/gas/core/resolve.go b/tools/vendor/github.com/GoASTScanner/gas/resolve.go similarity index 99% rename from tools/vendor/github.com/GoASTScanner/gas/core/resolve.go rename to tools/vendor/github.com/GoASTScanner/gas/resolve.go index 185df7ac..d7c6dce7 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/core/resolve.go +++ b/tools/vendor/github.com/GoASTScanner/gas/resolve.go @@ -12,11 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -package core +package gas import "go/ast" func resolveIdent(n *ast.Ident, c *Context) bool { + if n.Obj == nil || n.Obj.Kind != ast.Var { return true } diff --git a/tools/vendor/github.com/GoASTScanner/gas/rule.go b/tools/vendor/github.com/GoASTScanner/gas/rule.go new file mode 100644 index 00000000..58e1ce94 --- /dev/null +++ b/tools/vendor/github.com/GoASTScanner/gas/rule.go @@ -0,0 +1,58 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gas + +import ( + "go/ast" + "reflect" +) + +// The Rule interface used by all rules supported by GAS. +type Rule interface { + Match(ast.Node, *Context) (*Issue, error) +} + +// RuleBuilder is used to register a rule definition with the analyzer +type RuleBuilder func(c Config) (Rule, []ast.Node) + +// A RuleSet maps lists of rules to the type of AST node they should be run on. +// The anaylzer will only invoke rules contained in the list associated with the +// type of AST node it is currently visiting. +type RuleSet map[reflect.Type][]Rule + +// NewRuleSet constructs a new RuleSet +func NewRuleSet() RuleSet { + return make(RuleSet) +} + +// Register adds a trigger for the supplied rule for the the +// specified ast nodes. +func (r RuleSet) Register(rule Rule, nodes ...ast.Node) { + for _, n := range nodes { + t := reflect.TypeOf(n) + if rules, ok := r[t]; ok { + r[t] = append(rules, rule) + } else { + r[t] = []Rule{rule} + } + } +} + +// RegisteredFor will return all rules that are registered for a +// specified ast node. +func (r RuleSet) RegisteredFor(n ast.Node) []Rule { + if rules, found := r[reflect.TypeOf(n)]; found { + return rules + } + return []Rule{} +} diff --git a/tools/vendor/github.com/GoASTScanner/gas/rulelist.go b/tools/vendor/github.com/GoASTScanner/gas/rulelist.go deleted file mode 100644 index 285c1a1b..00000000 --- a/tools/vendor/github.com/GoASTScanner/gas/rulelist.go +++ /dev/null @@ -1,91 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "go/ast" - - gas "github.com/GoASTScanner/gas/core" - "github.com/GoASTScanner/gas/rules" -) - -type RuleInfo struct { - description string - build func(map[string]interface{}) (gas.Rule, []ast.Node) -} - -// GetFullRuleList get the full list of all rules available to GAS -func GetFullRuleList() map[string]RuleInfo { - return map[string]RuleInfo{ - // misc - "G101": RuleInfo{"Look for hardcoded credentials", rules.NewHardcodedCredentials}, - "G102": RuleInfo{"Bind to all interfaces", rules.NewBindsToAllNetworkInterfaces}, - "G103": RuleInfo{"Audit the use of unsafe block", rules.NewUsingUnsafe}, - "G104": RuleInfo{"Audit errors not checked", rules.NewNoErrorCheck}, - "G105": RuleInfo{"Audit the use of big.Exp function", rules.NewUsingBigExp}, - - // injection - "G201": RuleInfo{"SQL query construction using format string", rules.NewSqlStrFormat}, - "G202": RuleInfo{"SQL query construction using string concatenation", rules.NewSqlStrConcat}, - "G203": RuleInfo{"Use of unescaped data in HTML templates", rules.NewTemplateCheck}, - "G204": RuleInfo{"Audit use of command execution", rules.NewSubproc}, - - // filesystem - "G301": RuleInfo{"Poor file permissions used when creating a directory", rules.NewMkdirPerms}, - "G302": RuleInfo{"Poor file permisions used when creation file or using chmod", rules.NewFilePerms}, - "G303": RuleInfo{"Creating tempfile using a predictable path", rules.NewBadTempFile}, - - // crypto - "G401": RuleInfo{"Detect the usage of DES, RC4, or MD5", rules.NewUsesWeakCryptography}, - "G402": RuleInfo{"Look for bad TLS connection settings", rules.NewIntermediateTlsCheck}, - "G403": RuleInfo{"Ensure minimum RSA key length of 2048 bits", rules.NewWeakKeyStrength}, - "G404": RuleInfo{"Insecure random number source (rand)", rules.NewWeakRandCheck}, - - // blacklist - "G501": RuleInfo{"Import blacklist: crypto/md5", rules.NewBlacklist_crypto_md5}, - "G502": RuleInfo{"Import blacklist: crypto/des", rules.NewBlacklist_crypto_des}, - "G503": RuleInfo{"Import blacklist: crypto/rc4", rules.NewBlacklist_crypto_rc4}, - "G504": RuleInfo{"Import blacklist: net/http/cgi", rules.NewBlacklist_net_http_cgi}, - } -} - -func AddRules(analyzer *gas.Analyzer, conf map[string]interface{}) { - var all map[string]RuleInfo - - inc := conf["include"].([]string) - exc := conf["exclude"].([]string) - - // add included rules - if len(inc) == 0 { - all = GetFullRuleList() - } else { - all = map[string]RuleInfo{} - tmp := GetFullRuleList() - for _, v := range inc { - if val, ok := tmp[v]; ok { - all[v] = val - } - } - } - - // remove excluded rules - for _, v := range exc { - delete(all, v) - } - - for _, v := range all { - analyzer.AddRule(v.build(conf)) - } -} diff --git a/tools/vendor/github.com/GoASTScanner/gas/rules/big.go b/tools/vendor/github.com/GoASTScanner/gas/rules/big.go index 8ac0d421..00c31620 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/rules/big.go +++ b/tools/vendor/github.com/GoASTScanner/gas/rules/big.go @@ -15,24 +15,27 @@ package rules import ( - gas "github.com/GoASTScanner/gas/core" "go/ast" + + "github.com/GoASTScanner/gas" ) -type UsingBigExp struct { +type usingBigExp struct { gas.MetaData pkg string calls []string } -func (r *UsingBigExp) Match(n ast.Node, c *gas.Context) (gi *gas.Issue, err error) { +func (r *usingBigExp) Match(n ast.Node, c *gas.Context) (gi *gas.Issue, err error) { if _, matched := gas.MatchCallByType(n, c, r.pkg, r.calls...); matched { return gas.NewIssue(c, n, r.What, r.Severity, r.Confidence), nil } return nil, nil } -func NewUsingBigExp(conf map[string]interface{}) (gas.Rule, []ast.Node) { - return &UsingBigExp{ + +// NewUsingBigExp detects issues with modulus == 0 for Bignum +func NewUsingBigExp(conf gas.Config) (gas.Rule, []ast.Node) { + return &usingBigExp{ pkg: "*math/big.Int", calls: []string{"Exp"}, MetaData: gas.MetaData{ diff --git a/tools/vendor/github.com/GoASTScanner/gas/rules/bind.go b/tools/vendor/github.com/GoASTScanner/gas/rules/bind.go index ba91ba65..62518ebe 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/rules/bind.go +++ b/tools/vendor/github.com/GoASTScanner/gas/rules/bind.go @@ -18,30 +18,37 @@ import ( "go/ast" "regexp" - gas "github.com/GoASTScanner/gas/core" + "github.com/GoASTScanner/gas" ) // Looks for net.Listen("0.0.0.0") or net.Listen(":8080") -type BindsToAllNetworkInterfaces struct { +type bindsToAllNetworkInterfaces struct { gas.MetaData - call *regexp.Regexp + calls gas.CallList pattern *regexp.Regexp } -func (r *BindsToAllNetworkInterfaces) Match(n ast.Node, c *gas.Context) (gi *gas.Issue, err error) { - if node := gas.MatchCall(n, r.call); node != nil { - if arg, err := gas.GetString(node.Args[1]); err == nil { - if r.pattern.MatchString(arg) { - return gas.NewIssue(c, n, r.What, r.Severity, r.Confidence), nil - } +func (r *bindsToAllNetworkInterfaces) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) { + callExpr := r.calls.ContainsCallExpr(n, c) + if callExpr == nil { + return nil, nil + } + if arg, err := gas.GetString(callExpr.Args[1]); err == nil { + if r.pattern.MatchString(arg) { + return gas.NewIssue(c, n, r.What, r.Severity, r.Confidence), nil } } - return + return nil, nil } -func NewBindsToAllNetworkInterfaces(conf map[string]interface{}) (gas.Rule, []ast.Node) { - return &BindsToAllNetworkInterfaces{ - call: regexp.MustCompile(`^(net|tls)\.Listen$`), +// NewBindsToAllNetworkInterfaces detects socket connections that are setup to +// listen on all network interfaces. +func NewBindsToAllNetworkInterfaces(conf gas.Config) (gas.Rule, []ast.Node) { + calls := gas.NewCallList() + calls.Add("net", "Listen") + calls.Add("crypto/tls", "Listen") + return &bindsToAllNetworkInterfaces{ + calls: calls, pattern: regexp.MustCompile(`^(0.0.0.0|:).*$`), MetaData: gas.MetaData{ Severity: gas.Medium, diff --git a/tools/vendor/github.com/GoASTScanner/gas/rules/blacklist.go b/tools/vendor/github.com/GoASTScanner/gas/rules/blacklist.go index 747eb4bc..fbcfff02 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/rules/blacklist.go +++ b/tools/vendor/github.com/GoASTScanner/gas/rules/blacklist.go @@ -16,64 +16,67 @@ package rules import ( "go/ast" + "strings" - gas "github.com/GoASTScanner/gas/core" + "github.com/GoASTScanner/gas" ) -type BlacklistImport struct { +type blacklistedImport struct { gas.MetaData - Path string + Blacklisted map[string]string } -func (r *BlacklistImport) Match(n ast.Node, c *gas.Context) (gi *gas.Issue, err error) { +func unquote(original string) string { + copy := strings.TrimSpace(original) + copy = strings.TrimLeft(copy, `"`) + return strings.TrimRight(copy, `"`) +} + +func (r *blacklistedImport) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) { if node, ok := n.(*ast.ImportSpec); ok { - if r.Path == node.Path.Value && node.Name.String() != "_" { - return gas.NewIssue(c, n, r.What, r.Severity, r.Confidence), nil + if description, ok := r.Blacklisted[unquote(node.Path.Value)]; ok { + return gas.NewIssue(c, node, description, r.Severity, r.Confidence), nil } } return nil, nil } -func NewBlacklist_crypto_md5(conf map[string]interface{}) (gas.Rule, []ast.Node) { - return &BlacklistImport{ +// NewBlacklistedImports reports when a blacklisted import is being used. +// Typically when a deprecated technology is being used. +func NewBlacklistedImports(conf gas.Config, blacklist map[string]string) (gas.Rule, []ast.Node) { + return &blacklistedImport{ MetaData: gas.MetaData{ - Severity: gas.High, + Severity: gas.Medium, Confidence: gas.High, - What: "Use of weak cryptographic primitive", }, - Path: `"crypto/md5"`, + Blacklisted: blacklist, }, []ast.Node{(*ast.ImportSpec)(nil)} } -func NewBlacklist_crypto_des(conf map[string]interface{}) (gas.Rule, []ast.Node) { - return &BlacklistImport{ - MetaData: gas.MetaData{ - Severity: gas.High, - Confidence: gas.High, - What: "Use of weak cryptographic primitive", - }, - Path: `"crypto/des"`, - }, []ast.Node{(*ast.ImportSpec)(nil)} +// NewBlacklistedImportMD5 fails if MD5 is imported +func NewBlacklistedImportMD5(conf gas.Config) (gas.Rule, []ast.Node) { + return NewBlacklistedImports(conf, map[string]string{ + "crypto/md5": "Blacklisted import crypto/md5: weak cryptographic primitive", + }) } -func NewBlacklist_crypto_rc4(conf map[string]interface{}) (gas.Rule, []ast.Node) { - return &BlacklistImport{ - MetaData: gas.MetaData{ - Severity: gas.High, - Confidence: gas.High, - What: "Use of weak cryptographic primitive", - }, - Path: `"crypto/rc4"`, - }, []ast.Node{(*ast.ImportSpec)(nil)} +// NewBlacklistedImportDES fails if DES is imported +func NewBlacklistedImportDES(conf gas.Config) (gas.Rule, []ast.Node) { + return NewBlacklistedImports(conf, map[string]string{ + "crypto/des": "Blacklisted import crypto/des: weak cryptographic primitive", + }) } -func NewBlacklist_net_http_cgi(conf map[string]interface{}) (gas.Rule, []ast.Node) { - return &BlacklistImport{ - MetaData: gas.MetaData{ - Severity: gas.High, - Confidence: gas.High, - What: "Go versions < 1.6.3 are vulnerable to Httpoxy attack: (CVE-2016-5386)", - }, - Path: `"net/http/cgi"`, - }, []ast.Node{(*ast.ImportSpec)(nil)} +// NewBlacklistedImportRC4 fails if DES is imported +func NewBlacklistedImportRC4(conf gas.Config) (gas.Rule, []ast.Node) { + return NewBlacklistedImports(conf, map[string]string{ + "crypto/rc4": "Blacklisted import crypto/rc4: weak cryptographic primitive", + }) +} + +// NewBlacklistedImportCGI fails if CGI is imported +func NewBlacklistedImportCGI(conf gas.Config) (gas.Rule, []ast.Node) { + return NewBlacklistedImports(conf, map[string]string{ + "net/http/cgi": "Blacklisted import net/http/cgi: Go versions < 1.6.3 are vulnerable to Httpoxy attack: (CVE-2016-5386)", + }) } diff --git a/tools/vendor/github.com/GoASTScanner/gas/rules/errors.go b/tools/vendor/github.com/GoASTScanner/gas/rules/errors.go index 2bf61c92..cda20874 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/rules/errors.go +++ b/tools/vendor/github.com/GoASTScanner/gas/rules/errors.go @@ -15,12 +15,13 @@ package rules import ( - gas "github.com/GoASTScanner/gas/core" "go/ast" "go/types" + + "github.com/GoASTScanner/gas" ) -type NoErrorCheck struct { +type noErrorCheck struct { gas.MetaData whitelist gas.CallList } @@ -29,7 +30,7 @@ func returnsError(callExpr *ast.CallExpr, ctx *gas.Context) int { if tv := ctx.Info.TypeOf(callExpr); tv != nil { switch t := tv.(type) { case *types.Tuple: - for pos := 0; pos < t.Len(); pos += 1 { + for pos := 0; pos < t.Len(); pos++ { variable := t.At(pos) if variable != nil && variable.Type().String() == "error" { return pos @@ -44,11 +45,11 @@ func returnsError(callExpr *ast.CallExpr, ctx *gas.Context) int { return -1 } -func (r *NoErrorCheck) Match(n ast.Node, ctx *gas.Context) (*gas.Issue, error) { +func (r *noErrorCheck) Match(n ast.Node, ctx *gas.Context) (*gas.Issue, error) { switch stmt := n.(type) { case *ast.AssignStmt: for _, expr := range stmt.Rhs { - if callExpr, ok := expr.(*ast.CallExpr); ok && !r.whitelist.ContainsCallExpr(callExpr, ctx) { + if callExpr, ok := expr.(*ast.CallExpr); ok && r.whitelist.ContainsCallExpr(expr, ctx) == nil { pos := returnsError(callExpr, ctx) if pos < 0 || pos >= len(stmt.Lhs) { return nil, nil @@ -59,7 +60,7 @@ func (r *NoErrorCheck) Match(n ast.Node, ctx *gas.Context) (*gas.Issue, error) { } } case *ast.ExprStmt: - if callExpr, ok := stmt.X.(*ast.CallExpr); ok && !r.whitelist.ContainsCallExpr(callExpr, ctx) { + if callExpr, ok := stmt.X.(*ast.CallExpr); ok && r.whitelist.ContainsCallExpr(stmt.X, ctx) == nil { pos := returnsError(callExpr, ctx) if pos >= 0 { return gas.NewIssue(ctx, n, r.What, r.Severity, r.Confidence), nil @@ -69,13 +70,14 @@ func (r *NoErrorCheck) Match(n ast.Node, ctx *gas.Context) (*gas.Issue, error) { return nil, nil } -func NewNoErrorCheck(conf map[string]interface{}) (gas.Rule, []ast.Node) { +// NewNoErrorCheck detects if the returned error is unchecked +func NewNoErrorCheck(conf gas.Config) (gas.Rule, []ast.Node) { // TODO(gm) Come up with sensible defaults here. Or flip it to use a // black list instead. whitelist := gas.NewCallList() whitelist.AddAll("bytes.Buffer", "Write", "WriteByte", "WriteRune", "WriteString") - whitelist.AddAll("fmt", "Print", "Printf", "Println") + whitelist.AddAll("fmt", "Print", "Printf", "Println", "Fprint", "Fprintf", "Fprintln") whitelist.Add("io.PipeWriter", "CloseWithError") if configured, ok := conf["G104"]; ok { @@ -85,7 +87,7 @@ func NewNoErrorCheck(conf map[string]interface{}) (gas.Rule, []ast.Node) { } } } - return &NoErrorCheck{ + return &noErrorCheck{ MetaData: gas.MetaData{ Severity: gas.Low, Confidence: gas.High, diff --git a/tools/vendor/github.com/GoASTScanner/gas/rules/fileperms.go b/tools/vendor/github.com/GoASTScanner/gas/rules/fileperms.go index 101c7e2a..b48720f5 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/rules/fileperms.go +++ b/tools/vendor/github.com/GoASTScanner/gas/rules/fileperms.go @@ -19,10 +19,10 @@ import ( "go/ast" "strconv" - gas "github.com/GoASTScanner/gas/core" + "github.com/GoASTScanner/gas" ) -type FilePermissions struct { +type filePermissions struct { gas.MetaData mode int64 pkg string @@ -30,7 +30,7 @@ type FilePermissions struct { } func getConfiguredMode(conf map[string]interface{}, configKey string, defaultMode int64) int64 { - var mode int64 = defaultMode + var mode = defaultMode if value, ok := conf[configKey]; ok { switch value.(type) { case int64: @@ -46,7 +46,7 @@ func getConfiguredMode(conf map[string]interface{}, configKey string, defaultMod return mode } -func (r *FilePermissions) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) { +func (r *filePermissions) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) { if callexpr, matched := gas.MatchCallByPackage(n, c, r.pkg, r.calls...); matched { modeArg := callexpr.Args[len(callexpr.Args)-1] if mode, err := gas.GetInt(modeArg); err == nil && mode > r.mode { @@ -56,9 +56,11 @@ func (r *FilePermissions) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) return nil, nil } -func NewFilePerms(conf map[string]interface{}) (gas.Rule, []ast.Node) { +// NewFilePerms creates a rule to detect file creation with a more permissive than configured +// permission mask. +func NewFilePerms(conf gas.Config) (gas.Rule, []ast.Node) { mode := getConfiguredMode(conf, "G302", 0600) - return &FilePermissions{ + return &filePermissions{ mode: mode, pkg: "os", calls: []string{"OpenFile", "Chmod"}, @@ -70,9 +72,11 @@ func NewFilePerms(conf map[string]interface{}) (gas.Rule, []ast.Node) { }, []ast.Node{(*ast.CallExpr)(nil)} } -func NewMkdirPerms(conf map[string]interface{}) (gas.Rule, []ast.Node) { - mode := getConfiguredMode(conf, "G301", 0700) - return &FilePermissions{ +// NewMkdirPerms creates a rule to detect directory creation with more permissive than +// configured permission mask. +func NewMkdirPerms(conf gas.Config) (gas.Rule, []ast.Node) { + mode := getConfiguredMode(conf, "G301", 0750) + return &filePermissions{ mode: mode, pkg: "os", calls: []string{"Mkdir", "MkdirAll"}, diff --git a/tools/vendor/github.com/GoASTScanner/gas/rules/hardcoded_credentials.go b/tools/vendor/github.com/GoASTScanner/gas/rules/hardcoded_credentials.go index 1b4b85af..bbba8ca7 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/rules/hardcoded_credentials.go +++ b/tools/vendor/github.com/GoASTScanner/gas/rules/hardcoded_credentials.go @@ -15,16 +15,16 @@ package rules import ( - gas "github.com/GoASTScanner/gas/core" "go/ast" "go/token" "regexp" - - "github.com/nbutton23/zxcvbn-go" "strconv" + + "github.com/GoASTScanner/gas" + "github.com/nbutton23/zxcvbn-go" ) -type Credentials struct { +type credentials struct { gas.MetaData pattern *regexp.Regexp entropyThreshold float64 @@ -40,7 +40,7 @@ func truncate(s string, n int) string { return s[:n] } -func (r *Credentials) isHighEntropyString(str string) bool { +func (r *credentials) isHighEntropyString(str string) bool { s := truncate(str, r.truncate) info := zxcvbn.PasswordStrength(s, []string{}) entropyPerChar := info.Entropy / float64(len(s)) @@ -49,7 +49,7 @@ func (r *Credentials) isHighEntropyString(str string) bool { entropyPerChar >= r.perCharThreshold)) } -func (r *Credentials) Match(n ast.Node, ctx *gas.Context) (*gas.Issue, error) { +func (r *credentials) Match(n ast.Node, ctx *gas.Context) (*gas.Issue, error) { switch node := n.(type) { case *ast.AssignStmt: return r.matchAssign(node, ctx) @@ -59,7 +59,7 @@ func (r *Credentials) Match(n ast.Node, ctx *gas.Context) (*gas.Issue, error) { return nil, nil } -func (r *Credentials) matchAssign(assign *ast.AssignStmt, ctx *gas.Context) (*gas.Issue, error) { +func (r *credentials) matchAssign(assign *ast.AssignStmt, ctx *gas.Context) (*gas.Issue, error) { for _, i := range assign.Lhs { if ident, ok := i.(*ast.Ident); ok { if r.pattern.MatchString(ident.Name) { @@ -76,7 +76,7 @@ func (r *Credentials) matchAssign(assign *ast.AssignStmt, ctx *gas.Context) (*ga return nil, nil } -func (r *Credentials) matchGenDecl(decl *ast.GenDecl, ctx *gas.Context) (*gas.Issue, error) { +func (r *credentials) matchGenDecl(decl *ast.GenDecl, ctx *gas.Context) (*gas.Issue, error) { if decl.Tok != token.CONST && decl.Tok != token.VAR { return nil, nil } @@ -100,12 +100,14 @@ func (r *Credentials) matchGenDecl(decl *ast.GenDecl, ctx *gas.Context) (*gas.Is return nil, nil } -func NewHardcodedCredentials(conf map[string]interface{}) (gas.Rule, []ast.Node) { +// NewHardcodedCredentials attempts to find high entropy string constants being +// assigned to variables that appear to be related to credentials. +func NewHardcodedCredentials(conf gas.Config) (gas.Rule, []ast.Node) { pattern := `(?i)passwd|pass|password|pwd|secret|token` entropyThreshold := 80.0 perCharThreshold := 3.0 ignoreEntropy := false - var truncateString int = 16 + var truncateString = 16 if val, ok := conf["G101"]; ok { conf := val.(map[string]string) if configPattern, ok := conf["pattern"]; ok { @@ -133,7 +135,7 @@ func NewHardcodedCredentials(conf map[string]interface{}) (gas.Rule, []ast.Node) } } - return &Credentials{ + return &credentials{ pattern: regexp.MustCompile(pattern), entropyThreshold: entropyThreshold, perCharThreshold: perCharThreshold, diff --git a/tools/vendor/github.com/GoASTScanner/gas/rules/rand.go b/tools/vendor/github.com/GoASTScanner/gas/rules/rand.go index 9cc99e40..ace2e024 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/rules/rand.go +++ b/tools/vendor/github.com/GoASTScanner/gas/rules/rand.go @@ -17,16 +17,16 @@ package rules import ( "go/ast" - gas "github.com/GoASTScanner/gas/core" + "github.com/GoASTScanner/gas" ) -type WeakRand struct { +type weakRand struct { gas.MetaData funcNames []string packagePath string } -func (w *WeakRand) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) { +func (w *weakRand) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) { for _, funcName := range w.funcNames { if _, matched := gas.MatchCallByPackage(n, c, w.packagePath, funcName); matched { return gas.NewIssue(c, n, w.What, w.Severity, w.Confidence), nil @@ -36,8 +36,9 @@ func (w *WeakRand) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) { return nil, nil } -func NewWeakRandCheck(conf map[string]interface{}) (gas.Rule, []ast.Node) { - return &WeakRand{ +// NewWeakRandCheck detects the use of random number generator that isn't cryptographically secure +func NewWeakRandCheck(conf gas.Config) (gas.Rule, []ast.Node) { + return &weakRand{ funcNames: []string{"Read", "Int"}, packagePath: "math/rand", MetaData: gas.MetaData{ diff --git a/tools/vendor/github.com/GoASTScanner/gas/rules/rsa.go b/tools/vendor/github.com/GoASTScanner/gas/rules/rsa.go index 510ca78c..1394da4d 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/rules/rsa.go +++ b/tools/vendor/github.com/GoASTScanner/gas/rules/rsa.go @@ -17,31 +17,33 @@ package rules import ( "fmt" "go/ast" - "regexp" - gas "github.com/GoASTScanner/gas/core" + "github.com/GoASTScanner/gas" ) -type WeakKeyStrength struct { +type weakKeyStrength struct { gas.MetaData - pattern *regexp.Regexp - bits int + calls gas.CallList + bits int } -func (w *WeakKeyStrength) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) { - if node := gas.MatchCall(n, w.pattern); node != nil { - if bits, err := gas.GetInt(node.Args[1]); err == nil && bits < (int64)(w.bits) { +func (w *weakKeyStrength) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) { + if callExpr := w.calls.ContainsCallExpr(n, c); callExpr != nil { + if bits, err := gas.GetInt(callExpr.Args[1]); err == nil && bits < (int64)(w.bits) { return gas.NewIssue(c, n, w.What, w.Severity, w.Confidence), nil } } return nil, nil } -func NewWeakKeyStrength(conf map[string]interface{}) (gas.Rule, []ast.Node) { +// NewWeakKeyStrength builds a rule that detects RSA keys < 2048 bits +func NewWeakKeyStrength(conf gas.Config) (gas.Rule, []ast.Node) { + calls := gas.NewCallList() + calls.Add("crypto/rsa", "GenerateKey") bits := 2048 - return &WeakKeyStrength{ - pattern: regexp.MustCompile(`^rsa\.GenerateKey$`), - bits: bits, + return &weakKeyStrength{ + calls: calls, + bits: bits, MetaData: gas.MetaData{ Severity: gas.Medium, Confidence: gas.High, diff --git a/tools/vendor/github.com/GoASTScanner/gas/rules/rulelist.go b/tools/vendor/github.com/GoASTScanner/gas/rules/rulelist.go new file mode 100644 index 00000000..28463680 --- /dev/null +++ b/tools/vendor/github.com/GoASTScanner/gas/rules/rulelist.go @@ -0,0 +1,102 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "github.com/GoASTScanner/gas" +) + +// RuleDefinition contains the description of a rule and a mechanism to +// create it. +type RuleDefinition struct { + Description string + Create gas.RuleBuilder +} + +// RuleList is a mapping of rule ID's to rule definitions +type RuleList map[string]RuleDefinition + +// Builders returns all the create methods for a given rule list +func (rl RuleList) Builders() []gas.RuleBuilder { + builders := make([]gas.RuleBuilder, 0, len(rl)) + for _, def := range rl { + builders = append(builders, def.Create) + } + return builders +} + +// RuleFilter can be used to include or exclude a rule depending on the return +// value of the function +type RuleFilter func(string) bool + +// NewRuleFilter is a closure that will include/exclude the rule ID's based on +// the supplied boolean value. +func NewRuleFilter(action bool, ruleIDs ...string) RuleFilter { + rulelist := make(map[string]bool) + for _, rule := range ruleIDs { + rulelist[rule] = true + } + return func(rule string) bool { + if _, found := rulelist[rule]; found { + return action + } + return !action + } +} + +// Generate the list of rules to use +func Generate(filters ...RuleFilter) RuleList { + rules := map[string]RuleDefinition{ + // misc + "G101": {"Look for hardcoded credentials", NewHardcodedCredentials}, + "G102": {"Bind to all interfaces", NewBindsToAllNetworkInterfaces}, + "G103": {"Audit the use of unsafe block", NewUsingUnsafe}, + "G104": {"Audit errors not checked", NewNoErrorCheck}, + "G105": {"Audit the use of big.Exp function", NewUsingBigExp}, + "G106": {"Audit the use of ssh.InsecureIgnoreHostKey function", NewSSHHostKey}, + + // injection + "G201": {"SQL query construction using format string", NewSQLStrFormat}, + "G202": {"SQL query construction using string concatenation", NewSQLStrConcat}, + "G203": {"Use of unescaped data in HTML templates", NewTemplateCheck}, + "G204": {"Audit use of command execution", NewSubproc}, + + // filesystem + "G301": {"Poor file permissions used when creating a directory", NewMkdirPerms}, + "G302": {"Poor file permisions used when creation file or using chmod", NewFilePerms}, + "G303": {"Creating tempfile using a predictable path", NewBadTempFile}, + + // crypto + "G401": {"Detect the usage of DES, RC4, or MD5", NewUsesWeakCryptography}, + "G402": {"Look for bad TLS connection settings", NewIntermediateTLSCheck}, + "G403": {"Ensure minimum RSA key length of 2048 bits", NewWeakKeyStrength}, + "G404": {"Insecure random number source (rand)", NewWeakRandCheck}, + + // blacklist + "G501": {"Import blacklist: crypto/md5", NewBlacklistedImportMD5}, + "G502": {"Import blacklist: crypto/des", NewBlacklistedImportDES}, + "G503": {"Import blacklist: crypto/rc4", NewBlacklistedImportRC4}, + "G504": {"Import blacklist: net/http/cgi", NewBlacklistedImportCGI}, + } + + for rule := range rules { + for _, filter := range filters { + if filter(rule) { + delete(rules, rule) + } + } + } + return rules +} diff --git a/tools/vendor/github.com/GoASTScanner/gas/rules/sql.go b/tools/vendor/github.com/GoASTScanner/gas/rules/sql.go index 9b8b79f2..c6505e37 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/rules/sql.go +++ b/tools/vendor/github.com/GoASTScanner/gas/rules/sql.go @@ -18,20 +18,32 @@ import ( "go/ast" "regexp" - gas "github.com/GoASTScanner/gas/core" + "github.com/GoASTScanner/gas" ) -type SqlStatement struct { +type sqlStatement struct { gas.MetaData - pattern *regexp.Regexp + + // Contains a list of patterns which must all match for the rule to match. + patterns []*regexp.Regexp } -type SqlStrConcat struct { - SqlStatement +// See if the string matches the patterns for the statement. +func (s sqlStatement) MatchPatterns(str string) bool { + for _, pattern := range s.patterns { + if !pattern.MatchString(str) { + return false + } + } + return true +} + +type sqlStrConcat struct { + sqlStatement } // see if we can figure out what it is -func (s *SqlStrConcat) checkObject(n *ast.Ident) bool { +func (s *sqlStrConcat) checkObject(n *ast.Ident) bool { if n.Obj != nil { return n.Obj.Kind != ast.Var && n.Obj.Kind != ast.Fun } @@ -39,10 +51,13 @@ func (s *SqlStrConcat) checkObject(n *ast.Ident) bool { } // Look for "SELECT * FROM table WHERE " + " ' OR 1=1" -func (s *SqlStrConcat) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) { +func (s *sqlStrConcat) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) { if node, ok := n.(*ast.BinaryExpr); ok { if start, ok := node.X.(*ast.BasicLit); ok { - if str, e := gas.GetString(start); s.pattern.MatchString(str) && e == nil { + if str, e := gas.GetString(start); e == nil { + if !s.MatchPatterns(str) { + return nil, nil + } if _, ok := node.Y.(*ast.BasicLit); ok { return nil, nil // string cat OK } @@ -56,10 +71,13 @@ func (s *SqlStrConcat) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) { return nil, nil } -func NewSqlStrConcat(conf map[string]interface{}) (gas.Rule, []ast.Node) { - return &SqlStrConcat{ - SqlStatement: SqlStatement{ - pattern: regexp.MustCompile(`(?)(SELECT|DELETE|INSERT|UPDATE|INTO|FROM|WHERE) `), +// NewSQLStrConcat looks for cases where we are building SQL strings via concatenation +func NewSQLStrConcat(conf gas.Config) (gas.Rule, []ast.Node) { + return &sqlStrConcat{ + sqlStatement: sqlStatement{ + patterns: []*regexp.Regexp{ + regexp.MustCompile(`(?)(SELECT|DELETE|INSERT|UPDATE|INTO|FROM|WHERE) `), + }, MetaData: gas.MetaData{ Severity: gas.Medium, Confidence: gas.High, @@ -69,31 +87,39 @@ func NewSqlStrConcat(conf map[string]interface{}) (gas.Rule, []ast.Node) { }, []ast.Node{(*ast.BinaryExpr)(nil)} } -type SqlStrFormat struct { - SqlStatement - call *regexp.Regexp +type sqlStrFormat struct { + sqlStatement + calls gas.CallList } // Looks for "fmt.Sprintf("SELECT * FROM foo where '%s', userInput)" -func (s *SqlStrFormat) Match(n ast.Node, c *gas.Context) (gi *gas.Issue, err error) { - if node := gas.MatchCall(n, s.call); node != nil { - if arg, e := gas.GetString(node.Args[0]); s.pattern.MatchString(arg) && e == nil { +func (s *sqlStrFormat) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) { + + // TODO(gm) improve confidence if database/sql is being used + if node := s.calls.ContainsCallExpr(n, c); node != nil { + if arg, e := gas.GetString(node.Args[0]); s.MatchPatterns(arg) && e == nil { return gas.NewIssue(c, n, s.What, s.Severity, s.Confidence), nil } } return nil, nil } -func NewSqlStrFormat(conf map[string]interface{}) (gas.Rule, []ast.Node) { - return &SqlStrFormat{ - call: regexp.MustCompile(`^fmt\.Sprintf$`), - SqlStatement: SqlStatement{ - pattern: regexp.MustCompile("(?)(SELECT|DELETE|INSERT|UPDATE|INTO|FROM|WHERE) "), +// NewSQLStrFormat looks for cases where we're building SQL query strings using format strings +func NewSQLStrFormat(conf gas.Config) (gas.Rule, []ast.Node) { + rule := &sqlStrFormat{ + calls: gas.NewCallList(), + sqlStatement: sqlStatement{ + patterns: []*regexp.Regexp{ + regexp.MustCompile("(?)(SELECT|DELETE|INSERT|UPDATE|INTO|FROM|WHERE) "), + regexp.MustCompile("%[^bdoxXfFp]"), + }, MetaData: gas.MetaData{ Severity: gas.Medium, Confidence: gas.High, What: "SQL string formatting", }, }, - }, []ast.Node{(*ast.CallExpr)(nil)} + } + rule.calls.AddAll("fmt", "Sprint", "Sprintf", "Sprintln") + return rule, []ast.Node{(*ast.CallExpr)(nil)} } diff --git a/tools/vendor/github.com/GoASTScanner/gas/rules/ssh.go b/tools/vendor/github.com/GoASTScanner/gas/rules/ssh.go new file mode 100644 index 00000000..99b7e279 --- /dev/null +++ b/tools/vendor/github.com/GoASTScanner/gas/rules/ssh.go @@ -0,0 +1,33 @@ +package rules + +import ( + "go/ast" + + "github.com/GoASTScanner/gas" +) + +type sshHostKey struct { + gas.MetaData + pkg string + calls []string +} + +func (r *sshHostKey) Match(n ast.Node, c *gas.Context) (gi *gas.Issue, err error) { + if _, matches := gas.MatchCallByPackage(n, c, r.pkg, r.calls...); matches { + return gas.NewIssue(c, n, r.What, r.Severity, r.Confidence), nil + } + return nil, nil +} + +// NewSSHHostKey rule detects the use of insecure ssh HostKeyCallback. +func NewSSHHostKey(conf gas.Config) (gas.Rule, []ast.Node) { + return &sshHostKey{ + pkg: "golang.org/x/crypto/ssh", + calls: []string{"InsecureIgnoreHostKey"}, + MetaData: gas.MetaData{ + What: "Use of ssh InsecureIgnoreHostKey should be audited", + Severity: gas.Medium, + Confidence: gas.High, + }, + }, []ast.Node{(*ast.CallExpr)(nil)} +} diff --git a/tools/vendor/github.com/GoASTScanner/gas/rules/subproc.go b/tools/vendor/github.com/GoASTScanner/gas/rules/subproc.go index b5a6fa2f..4ddd8bd4 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/rules/subproc.go +++ b/tools/vendor/github.com/GoASTScanner/gas/rules/subproc.go @@ -16,41 +16,43 @@ package rules import ( "go/ast" - "regexp" - "strings" + "go/types" - gas "github.com/GoASTScanner/gas/core" + "github.com/GoASTScanner/gas" ) -type Subprocess struct { - pattern *regexp.Regexp +type subprocess struct { + gas.CallList } -func (r *Subprocess) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) { - if node := gas.MatchCall(n, r.pattern); node != nil { +// TODO(gm) The only real potential for command injection with a Go project +// is something like this: +// +// syscall.Exec("/bin/sh", []string{"-c", tainted}) +// +// E.g. Input is correctly escaped but the execution context being used +// is unsafe. For example: +// +// syscall.Exec("echo", "foobar" + tainted) +func (r *subprocess) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) { + if node := r.ContainsCallExpr(n, c); node != nil { for _, arg := range node.Args { - if !gas.TryResolve(arg, c) { - what := "Subprocess launching with variable." - return gas.NewIssue(c, n, what, gas.High, gas.High), nil + if ident, ok := arg.(*ast.Ident); ok { + obj := c.Info.ObjectOf(ident) + if _, ok := obj.(*types.Var); ok && !gas.TryResolve(ident, c) { + return gas.NewIssue(c, n, "Subprocess launched with variable", gas.Medium, gas.High), nil + } } } - - // call with partially qualified command - if str, err := gas.GetString(node.Args[0]); err == nil { - if !strings.HasPrefix(str, "/") { - what := "Subprocess launching with partial path." - return gas.NewIssue(c, n, what, gas.Medium, gas.High), nil - } - } - - what := "Subprocess launching should be audited." - return gas.NewIssue(c, n, what, gas.Low, gas.High), nil + return gas.NewIssue(c, n, "Subprocess launching should be audited", gas.Low, gas.High), nil } return nil, nil } -func NewSubproc(conf map[string]interface{}) (gas.Rule, []ast.Node) { - return &Subprocess{ - pattern: regexp.MustCompile(`^exec\.Command|syscall\.Exec$`), - }, []ast.Node{(*ast.CallExpr)(nil)} +// NewSubproc detects cases where we are forking out to an external process +func NewSubproc(conf gas.Config) (gas.Rule, []ast.Node) { + rule := &subprocess{gas.NewCallList()} + rule.Add("os/exec", "Command") + rule.Add("syscall", "Exec") + return rule, []ast.Node{(*ast.CallExpr)(nil)} } diff --git a/tools/vendor/github.com/GoASTScanner/gas/rules/tempfiles.go b/tools/vendor/github.com/GoASTScanner/gas/rules/tempfiles.go index 8cbd55af..9af500dd 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/rules/tempfiles.go +++ b/tools/vendor/github.com/GoASTScanner/gas/rules/tempfiles.go @@ -18,17 +18,17 @@ import ( "go/ast" "regexp" - gas "github.com/GoASTScanner/gas/core" + "github.com/GoASTScanner/gas" ) -type BadTempFile struct { +type badTempFile struct { gas.MetaData - args *regexp.Regexp - call *regexp.Regexp + calls gas.CallList + args *regexp.Regexp } -func (t *BadTempFile) Match(n ast.Node, c *gas.Context) (gi *gas.Issue, err error) { - if node := gas.MatchCall(n, t.call); node != nil { +func (t *badTempFile) Match(n ast.Node, c *gas.Context) (gi *gas.Issue, err error) { + if node := t.calls.ContainsCallExpr(n, c); node != nil { if arg, e := gas.GetString(node.Args[0]); t.args.MatchString(arg) && e == nil { return gas.NewIssue(c, n, t.What, t.Severity, t.Confidence), nil } @@ -36,10 +36,14 @@ func (t *BadTempFile) Match(n ast.Node, c *gas.Context) (gi *gas.Issue, err erro return nil, nil } -func NewBadTempFile(conf map[string]interface{}) (gas.Rule, []ast.Node) { - return &BadTempFile{ - call: regexp.MustCompile(`ioutil\.WriteFile|os\.Create`), - args: regexp.MustCompile(`^/tmp/.*$|^/var/tmp/.*$`), +// NewBadTempFile detects direct writes to predictable path in temporary directory +func NewBadTempFile(conf gas.Config) (gas.Rule, []ast.Node) { + calls := gas.NewCallList() + calls.Add("io/ioutil", "WriteFile") + calls.Add("os", "Create") + return &badTempFile{ + calls: calls, + args: regexp.MustCompile(`^/tmp/.*$|^/var/tmp/.*$`), MetaData: gas.MetaData{ Severity: gas.Medium, Confidence: gas.High, diff --git a/tools/vendor/github.com/GoASTScanner/gas/rules/templates.go b/tools/vendor/github.com/GoASTScanner/gas/rules/templates.go index 0f1dc240..4c09ad93 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/rules/templates.go +++ b/tools/vendor/github.com/GoASTScanner/gas/rules/templates.go @@ -16,18 +16,17 @@ package rules import ( "go/ast" - "regexp" - gas "github.com/GoASTScanner/gas/core" + "github.com/GoASTScanner/gas" ) -type TemplateCheck struct { +type templateCheck struct { gas.MetaData - call *regexp.Regexp + calls gas.CallList } -func (t *TemplateCheck) Match(n ast.Node, c *gas.Context) (gi *gas.Issue, err error) { - if node := gas.MatchCall(n, t.call); node != nil { +func (t *templateCheck) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) { + if node := t.calls.ContainsCallExpr(n, c); node != nil { for _, arg := range node.Args { if _, ok := arg.(*ast.BasicLit); !ok { // basic lits are safe return gas.NewIssue(c, n, t.What, t.Severity, t.Confidence), nil @@ -37,9 +36,17 @@ func (t *TemplateCheck) Match(n ast.Node, c *gas.Context) (gi *gas.Issue, err er return nil, nil } -func NewTemplateCheck(conf map[string]interface{}) (gas.Rule, []ast.Node) { - return &TemplateCheck{ - call: regexp.MustCompile(`^template\.(HTML|JS|URL)$`), +// NewTemplateCheck constructs the template check rule. This rule is used to +// find use of tempaltes where HTML/JS escaping is not being used +func NewTemplateCheck(conf gas.Config) (gas.Rule, []ast.Node) { + + calls := gas.NewCallList() + calls.Add("html/template", "HTML") + calls.Add("html/template", "HTMLAttr") + calls.Add("html/template", "JS") + calls.Add("html/template", "URL") + return &templateCheck{ + calls: calls, MetaData: gas.MetaData{ Severity: gas.Medium, Confidence: gas.Low, diff --git a/tools/vendor/github.com/GoASTScanner/gas/rules/tls.go b/tools/vendor/github.com/GoASTScanner/gas/rules/tls.go index cbcca56c..2dce11df 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/rules/tls.go +++ b/tools/vendor/github.com/GoASTScanner/gas/rules/tls.go @@ -12,22 +12,22 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:generate tlsconfig + package rules import ( "fmt" "go/ast" - "reflect" - "regexp" - gas "github.com/GoASTScanner/gas/core" + "github.com/GoASTScanner/gas" ) -type InsecureConfigTLS struct { - MinVersion int16 - MaxVersion int16 - pattern *regexp.Regexp - goodCiphers []string +type insecureConfigTLS struct { + MinVersion int16 + MaxVersion int16 + requiredType string + goodCiphers []string } func stringInSlice(a string, list []string) bool { @@ -39,15 +39,14 @@ func stringInSlice(a string, list []string) bool { return false } -func (t *InsecureConfigTLS) processTlsCipherSuites(n ast.Node, c *gas.Context) *gas.Issue { - a := reflect.TypeOf(&ast.KeyValueExpr{}) - b := reflect.TypeOf(&ast.CompositeLit{}) - if node, ok := gas.SimpleSelect(n, a, b).(*ast.CompositeLit); ok { - for _, elt := range node.Elts { - if ident, ok := elt.(*ast.SelectorExpr); ok { +func (t *insecureConfigTLS) processTLSCipherSuites(n ast.Node, c *gas.Context) *gas.Issue { + + if ciphers, ok := n.(*ast.CompositeLit); ok { + for _, cipher := range ciphers.Elts { + if ident, ok := cipher.(*ast.SelectorExpr); ok { if !stringInSlice(ident.Sel.Name, t.goodCiphers) { - str := fmt.Sprintf("TLS Bad Cipher Suite: %s", ident.Sel.Name) - return gas.NewIssue(c, n, str, gas.High, gas.High) + err := fmt.Sprintf("TLS Bad Cipher Suite: %s", ident.Sel.Name) + return gas.NewIssue(c, ident, err, gas.High, gas.High) } } } @@ -55,9 +54,10 @@ func (t *InsecureConfigTLS) processTlsCipherSuites(n ast.Node, c *gas.Context) * return nil } -func (t *InsecureConfigTLS) processTlsConfVal(n *ast.KeyValueExpr, c *gas.Context) *gas.Issue { +func (t *insecureConfigTLS) processTLSConfVal(n *ast.KeyValueExpr, c *gas.Context) *gas.Issue { if ident, ok := n.Key.(*ast.Ident); ok { switch ident.Name { + case "InsecureSkipVerify": if node, ok := n.Value.(*ast.Ident); ok { if node.Name != "false" { @@ -97,7 +97,7 @@ func (t *InsecureConfigTLS) processTlsConfVal(n *ast.KeyValueExpr, c *gas.Contex } case "CipherSuites": - if ret := t.processTlsCipherSuites(n, c); ret != nil { + if ret := t.processTLSCipherSuites(n.Value, c); ret != nil { return ret } @@ -107,85 +107,19 @@ func (t *InsecureConfigTLS) processTlsConfVal(n *ast.KeyValueExpr, c *gas.Contex return nil } -func (t *InsecureConfigTLS) Match(n ast.Node, c *gas.Context) (gi *gas.Issue, err error) { - if node := gas.MatchCompLit(n, t.pattern); node != nil { - for _, elt := range node.Elts { - if kve, ok := elt.(*ast.KeyValueExpr); ok { - gi = t.processTlsConfVal(kve, c) - if gi != nil { - break +func (t *insecureConfigTLS) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) { + if complit, ok := n.(*ast.CompositeLit); ok && complit.Type != nil { + actualType := c.Info.TypeOf(complit.Type) + if actualType != nil && actualType.String() == t.requiredType { + for _, elt := range complit.Elts { + if kve, ok := elt.(*ast.KeyValueExpr); ok { + issue := t.processTLSConfVal(kve, c) + if issue != nil { + return issue, nil + } } } } } - return -} - -func NewModernTlsCheck(conf map[string]interface{}) (gas.Rule, []ast.Node) { - // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility - return &InsecureConfigTLS{ - pattern: regexp.MustCompile(`^tls\.Config$`), - MinVersion: 0x0303, // TLS 1.2 only - MaxVersion: 0x0303, - goodCiphers: []string{ - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - }, - }, []ast.Node{(*ast.CompositeLit)(nil)} -} - -func NewIntermediateTlsCheck(conf map[string]interface{}) (gas.Rule, []ast.Node) { - // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29 - return &InsecureConfigTLS{ - pattern: regexp.MustCompile(`^tls\.Config$`), - MinVersion: 0x0301, // TLS 1.2, 1.1, 1.0 - MaxVersion: 0x0303, - goodCiphers: []string{ - "TLS_RSA_WITH_AES_128_CBC_SHA", - "TLS_RSA_WITH_AES_256_CBC_SHA", - "TLS_RSA_WITH_AES_128_GCM_SHA256", - "TLS_RSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - }, - }, []ast.Node{(*ast.CompositeLit)(nil)} -} - -func NewCompatTlsCheck(conf map[string]interface{}) (gas.Rule, []ast.Node) { - // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_compatibility_.28default.29 - return &InsecureConfigTLS{ - pattern: regexp.MustCompile(`^tls\.Config$`), - MinVersion: 0x0301, // TLS 1.2, 1.1, 1.0 - MaxVersion: 0x0303, - goodCiphers: []string{ - "TLS_RSA_WITH_RC4_128_SHA", - "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - "TLS_RSA_WITH_AES_128_CBC_SHA", - "TLS_RSA_WITH_AES_256_CBC_SHA", - "TLS_RSA_WITH_AES_128_GCM_SHA256", - "TLS_RSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - }, - }, []ast.Node{(*ast.CompositeLit)(nil)} + return nil, nil } diff --git a/tools/vendor/github.com/GoASTScanner/gas/rules/tls_config.go b/tools/vendor/github.com/GoASTScanner/gas/rules/tls_config.go new file mode 100644 index 00000000..4f7afd3e --- /dev/null +++ b/tools/vendor/github.com/GoASTScanner/gas/rules/tls_config.go @@ -0,0 +1,132 @@ +package rules + +import ( + "go/ast" + + "github.com/GoASTScanner/gas" +) + +// NewModernTLSCheck creates a check for Modern TLS ciphers +// DO NOT EDIT - generated by tlsconfig tool +func NewModernTLSCheck(conf gas.Config) (gas.Rule, []ast.Node) { + return &insecureConfigTLS{ + requiredType: "crypto/tls.Config", + MinVersion: 0x0303, + MaxVersion: 0x0303, + goodCiphers: []string{ + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + }, + }, []ast.Node{(*ast.CompositeLit)(nil)} +} + +// NewIntermediateTLSCheck creates a check for Intermediate TLS ciphers +// DO NOT EDIT - generated by tlsconfig tool +func NewIntermediateTLSCheck(conf gas.Config) (gas.Rule, []ast.Node) { + return &insecureConfigTLS{ + requiredType: "crypto/tls.Config", + MinVersion: 0x0301, + MaxVersion: 0x0303, + goodCiphers: []string{ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256", + "TLS_DHE_RSA_WITH_AES_128_CBC_SHA", + "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256", + "TLS_DHE_RSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA", + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + "TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA", + "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_RSA_WITH_AES_256_GCM_SHA384", + "TLS_RSA_WITH_AES_128_CBC_SHA256", + "TLS_RSA_WITH_AES_256_CBC_SHA256", + "TLS_RSA_WITH_AES_128_CBC_SHA", + "TLS_RSA_WITH_AES_256_CBC_SHA", + "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + }, + }, []ast.Node{(*ast.CompositeLit)(nil)} +} + +// NewOldTLSCheck creates a check for Old TLS ciphers +// DO NOT EDIT - generated by tlsconfig tool +func NewOldTLSCheck(conf gas.Config) (gas.Rule, []ast.Node) { + return &insecureConfigTLS{ + requiredType: "crypto/tls.Config", + MinVersion: 0x0300, + MaxVersion: 0x0303, + goodCiphers: []string{ + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_DHE_DSS_WITH_AES_128_GCM_SHA256", + "TLS_DHE_DSS_WITH_AES_256_GCM_SHA384", + "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256", + "TLS_DHE_RSA_WITH_AES_128_CBC_SHA", + "TLS_DHE_DSS_WITH_AES_128_CBC_SHA256", + "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256", + "TLS_DHE_DSS_WITH_AES_256_CBC_SHA", + "TLS_DHE_RSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA", + "TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA", + "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_RSA_WITH_AES_256_GCM_SHA384", + "TLS_RSA_WITH_AES_128_CBC_SHA256", + "TLS_RSA_WITH_AES_256_CBC_SHA256", + "TLS_RSA_WITH_AES_128_CBC_SHA", + "TLS_RSA_WITH_AES_256_CBC_SHA", + "TLS_DHE_DSS_WITH_AES_256_CBC_SHA256", + "TLS_DHE_DSS_WITH_AES_128_CBC_SHA", + "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + "TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384", + "TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384", + "TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256", + "TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256", + "TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA", + "TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA", + "TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256", + "TLS_RSA_WITH_CAMELLIA_256_CBC_SHA", + "TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256", + "TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256", + "TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256", + "TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256", + "TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA", + "TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA", + "TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256", + "TLS_RSA_WITH_CAMELLIA_128_CBC_SHA", + "TLS_DHE_RSA_WITH_SEED_CBC_SHA", + "TLS_DHE_DSS_WITH_SEED_CBC_SHA", + "TLS_RSA_WITH_SEED_CBC_SHA", + }, + }, []ast.Node{(*ast.CompositeLit)(nil)} +} diff --git a/tools/vendor/github.com/GoASTScanner/gas/rules/unsafe.go b/tools/vendor/github.com/GoASTScanner/gas/rules/unsafe.go index 861f77cd..81b41c61 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/rules/unsafe.go +++ b/tools/vendor/github.com/GoASTScanner/gas/rules/unsafe.go @@ -15,25 +15,28 @@ package rules import ( - gas "github.com/GoASTScanner/gas/core" "go/ast" + + "github.com/GoASTScanner/gas" ) -type UsingUnsafe struct { +type usingUnsafe struct { gas.MetaData pkg string calls []string } -func (r *UsingUnsafe) Match(n ast.Node, c *gas.Context) (gi *gas.Issue, err error) { +func (r *usingUnsafe) Match(n ast.Node, c *gas.Context) (gi *gas.Issue, err error) { if _, matches := gas.MatchCallByPackage(n, c, r.pkg, r.calls...); matches { return gas.NewIssue(c, n, r.What, r.Severity, r.Confidence), nil } return nil, nil } -func NewUsingUnsafe(conf map[string]interface{}) (gas.Rule, []ast.Node) { - return &UsingUnsafe{ +// NewUsingUnsafe rule detects the use of the unsafe package. This is only +// really useful for auditing purposes. +func NewUsingUnsafe(conf gas.Config) (gas.Rule, []ast.Node) { + return &usingUnsafe{ pkg: "unsafe", calls: []string{"Alignof", "Offsetof", "Sizeof", "Pointer"}, MetaData: gas.MetaData{ diff --git a/tools/vendor/github.com/GoASTScanner/gas/rules/weakcrypto.go b/tools/vendor/github.com/GoASTScanner/gas/rules/weakcrypto.go index 1c859e9b..d3adfdc8 100644 --- a/tools/vendor/github.com/GoASTScanner/gas/rules/weakcrypto.go +++ b/tools/vendor/github.com/GoASTScanner/gas/rules/weakcrypto.go @@ -17,15 +17,15 @@ package rules import ( "go/ast" - gas "github.com/GoASTScanner/gas/core" + "github.com/GoASTScanner/gas" ) -type UsesWeakCryptography struct { +type usesWeakCryptography struct { gas.MetaData blacklist map[string][]string } -func (r *UsesWeakCryptography) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) { +func (r *usesWeakCryptography) Match(n ast.Node, c *gas.Context) (*gas.Issue, error) { for pkg, funcs := range r.blacklist { if _, matched := gas.MatchCallByPackage(n, c, pkg, funcs...); matched { @@ -35,13 +35,13 @@ func (r *UsesWeakCryptography) Match(n ast.Node, c *gas.Context) (*gas.Issue, er return nil, nil } -// Uses des.* md5.* or rc4.* -func NewUsesWeakCryptography(conf map[string]interface{}) (gas.Rule, []ast.Node) { +// NewUsesWeakCryptography detects uses of des.* md5.* or rc4.* +func NewUsesWeakCryptography(conf gas.Config) (gas.Rule, []ast.Node) { calls := make(map[string][]string) calls["crypto/des"] = []string{"NewCipher", "NewTripleDESCipher"} calls["crypto/md5"] = []string{"New", "Sum"} calls["crypto/rc4"] = []string{"NewCipher"} - rule := &UsesWeakCryptography{ + rule := &usesWeakCryptography{ blacklist: calls, MetaData: gas.MetaData{ Severity: gas.Medium, diff --git a/tools/vendor/github.com/GoASTScanner/gas/tools.go b/tools/vendor/github.com/GoASTScanner/gas/tools.go deleted file mode 100644 index c2658191..00000000 --- a/tools/vendor/github.com/GoASTScanner/gas/tools.go +++ /dev/null @@ -1,276 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "go/ast" - "go/importer" - "go/parser" - "go/token" - "go/types" - "os" - "strings" -) - -type command func(args ...string) -type utilities struct { - commands map[string]command - call []string -} - -// Custom commands / utilities to run instead of default analyzer -func newUtils() *utilities { - utils := make(map[string]command) - utils["ast"] = dumpAst - utils["callobj"] = dumpCallObj - utils["uses"] = dumpUses - utils["types"] = dumpTypes - utils["defs"] = dumpDefs - utils["comments"] = dumpComments - utils["imports"] = dumpImports - return &utilities{utils, make([]string, 0)} -} - -func (u *utilities) String() string { - i := 0 - keys := make([]string, len(u.commands)) - for k := range u.commands { - keys[i] = k - i++ - } - return strings.Join(keys, ", ") -} - -func (u *utilities) Set(opt string) error { - if _, ok := u.commands[opt]; !ok { - return fmt.Errorf("valid tools are: %s", u.String()) - - } - u.call = append(u.call, opt) - return nil -} - -func (u *utilities) run(args ...string) { - for _, util := range u.call { - if cmd, ok := u.commands[util]; ok { - cmd(args...) - } - } -} - -func shouldSkip(path string) bool { - st, e := os.Stat(path) - if e != nil { - // #nosec - fmt.Fprintf(os.Stderr, "Skipping: %s - %s\n", path, e) - return true - } - if st.IsDir() { - // #nosec - fmt.Fprintf(os.Stderr, "Skipping: %s - directory\n", path) - return true - } - return false -} - -func dumpAst(files ...string) { - for _, arg := range files { - // Ensure file exists and not a directory - if shouldSkip(arg) { - continue - } - - // Create the AST by parsing src. - fset := token.NewFileSet() // positions are relative to fset - f, err := parser.ParseFile(fset, arg, nil, 0) - if err != nil { - // #nosec - fmt.Fprintf(os.Stderr, "Unable to parse file %s\n", err) - continue - } - - // Print the AST. #nosec - ast.Print(fset, f) - } -} - -type context struct { - fileset *token.FileSet - comments ast.CommentMap - info *types.Info - pkg *types.Package - config *types.Config - root *ast.File -} - -func createContext(filename string) *context { - fileset := token.NewFileSet() - root, e := parser.ParseFile(fileset, filename, nil, parser.ParseComments) - if e != nil { - // #nosec - fmt.Fprintf(os.Stderr, "Unable to parse file: %s. Reason: %s\n", filename, e) - return nil - } - comments := ast.NewCommentMap(fileset, root, root.Comments) - info := &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Selections: make(map[*ast.SelectorExpr]*types.Selection), - Scopes: make(map[ast.Node]*types.Scope), - Implicits: make(map[ast.Node]types.Object), - } - config := types.Config{Importer: importer.Default()} - pkg, e := config.Check("main.go", fileset, []*ast.File{root}, info) - if e != nil { - // #nosec - fmt.Fprintf(os.Stderr, "Type check failed for file: %s. Reason: %s\n", filename, e) - return nil - } - return &context{fileset, comments, info, pkg, &config, root} -} - -func printObject(obj types.Object) { - fmt.Println("OBJECT") - if obj == nil { - fmt.Println("object is nil") - return - } - fmt.Printf(" Package = %v\n", obj.Pkg()) - if obj.Pkg() != nil { - fmt.Println(" Path = ", obj.Pkg().Path()) - fmt.Println(" Name = ", obj.Pkg().Name()) - fmt.Println(" String = ", obj.Pkg().String()) - } - fmt.Printf(" Name = %v\n", obj.Name()) - fmt.Printf(" Type = %v\n", obj.Type()) - fmt.Printf(" Id = %v\n", obj.Id()) -} - -func checkContext(ctx *context, file string) bool { - // #nosec - if ctx == nil { - fmt.Fprintln(os.Stderr, "Failed to create context for file: ", file) - return false - } - return true -} - -func dumpCallObj(files ...string) { - - for _, file := range files { - if shouldSkip(file) { - continue - } - context := createContext(file) - if !checkContext(context, file) { - return - } - ast.Inspect(context.root, func(n ast.Node) bool { - var obj types.Object - switch node := n.(type) { - case *ast.Ident: - obj = context.info.ObjectOf(node) //context.info.Uses[node] - case *ast.SelectorExpr: - obj = context.info.ObjectOf(node.Sel) //context.info.Uses[node.Sel] - default: - obj = nil - } - if obj != nil { - printObject(obj) - } - return true - }) - } -} - -func dumpUses(files ...string) { - for _, file := range files { - if shouldSkip(file) { - continue - } - context := createContext(file) - if !checkContext(context, file) { - return - } - for ident, obj := range context.info.Uses { - fmt.Printf("IDENT: %v, OBJECT: %v\n", ident, obj) - } - } -} - -func dumpTypes(files ...string) { - for _, file := range files { - if shouldSkip(file) { - continue - } - context := createContext(file) - if !checkContext(context, file) { - return - } - for expr, tv := range context.info.Types { - fmt.Printf("EXPR: %v, TYPE: %v\n", expr, tv) - } - } -} - -func dumpDefs(files ...string) { - for _, file := range files { - if shouldSkip(file) { - continue - } - context := createContext(file) - if !checkContext(context, file) { - return - } - for ident, obj := range context.info.Defs { - fmt.Printf("IDENT: %v, OBJ: %v\n", ident, obj) - } - } -} - -func dumpComments(files ...string) { - for _, file := range files { - if shouldSkip(file) { - continue - } - context := createContext(file) - if !checkContext(context, file) { - return - } - for _, group := range context.comments.Comments() { - fmt.Println(group.Text()) - } - } -} - -func dumpImports(files ...string) { - for _, file := range files { - if shouldSkip(file) { - continue - } - context := createContext(file) - if !checkContext(context, file) { - return - } - for _, pkg := range context.pkg.Imports() { - fmt.Println(pkg.Path(), pkg.Name()) - for _, name := range pkg.Scope().Names() { - fmt.Println(" => ", name) - } - } - } -} diff --git a/tools/vendor/github.com/GoASTScanner/gas/vendor.conf b/tools/vendor/github.com/GoASTScanner/gas/vendor.conf deleted file mode 100644 index 5f5b814f..00000000 --- a/tools/vendor/github.com/GoASTScanner/gas/vendor.conf +++ /dev/null @@ -1,7 +0,0 @@ -# package -github.com/GoAstScanner/gas - -# import -github.com/GoASTScanner/gas cc52ef5 -github.com/nbutton23/zxcvbn-go a22cb81 -github.com/ryanuber/go-glob v0.1 diff --git a/tools/vendor/github.com/alecthomas/gometalinter/README.md b/tools/vendor/github.com/alecthomas/gometalinter/README.md index 809a60ad..2ec15d26 100644 --- a/tools/vendor/github.com/alecthomas/gometalinter/README.md +++ b/tools/vendor/github.com/alecthomas/gometalinter/README.md @@ -3,10 +3,13 @@ +- [Installing](#installing) - [Editor integration](#editor-integration) - [Supported linters](#supported-linters) - [Configuration file](#configuration-file) -- [Installing](#installing) + - [`Format` key](#format-key) + - [Format Methods](#format-methods) + - [Adding Custom linters](#adding-custom-linters) - [Comment directives](#comment-directives) - [Quickstart](#quickstart) - [FAQ](#faq) @@ -39,6 +42,16 @@ eg. It is intended for use with editor/IDE integration. +## Installing + +There are two options for installing gometalinter. + +1. Install a stable version, eg. `go get -u gopkg.in/alecthomas/gometalinter.v2`. + I will generally only tag a new stable version when it has passed the Travis + regression tests. The downside is that the binary will be called `gometalinter.v2`. +2. Install from HEAD with: `go get -u github.com/alecthomas/gometalinter`. + This has the downside that changes to gometalinter may break. + ## Editor integration - [SublimeLinter plugin](https://github.com/alecthomas/SublimeLinter-contrib-gometalinter). @@ -91,9 +104,11 @@ Additional linters can be added through the command line with `--linter=NAME:COM ## Configuration file -gometalinter now supports a JSON configuration file which can be loaded via -`--config=`. The format of this file is determined by the `Config` struct -in [config.go](https://github.com/alecthomas/gometalinter/blob/master/config.go). +gometalinter now supports a JSON configuration file called `.gometalinter.json` that can +be placed at the root of your project. The configuration file will be automatically loaded +from the working directory or any parent directory and can be overridden by passing +`--config=` or ignored with `--no-config`. The format of this file is determined by +the `Config` struct in [config.go](https://github.com/alecthomas/gometalinter/blob/master/config.go). The configuration file mostly corresponds to command-line flags, with the following exceptions: @@ -110,6 +125,27 @@ Here is an example configuration file: } ``` +If a `.gometalinter.json` file is loaded, individual options can still be overridden by +passing command-line flags. All flags are parsed in order, meaning configuration passed +with the `--config` flag will override any command-line flags passed before and be +overridden by flags passed after. + + +#### `Format` key + +The default `Format` key places the different fields of an `Issue` into a template. this +corresponds to the `--format` option command-line flag. + +Default `Format`: +``` +Format: "{{.Path}}:{{.Line}}:{{if .Col}}{{.Col}}{{end}}:{{.Severity}}: {{.Message}} ({{.Linter}})" +``` + +#### Format Methods + +* `{{.Path.Relative}}` - equivalent to `{{.Path}}` which outputs a relative path to the file +* `{{.Path.Abs}}` - outputs an absolute path to the file + ### Adding Custom linters Linters can be added and customized from the config file using the `Linters` field. @@ -138,16 +174,6 @@ Example: $ gometalinter --linter='vet:go tool vet -printfuncs=Infof,Debugf,Warningf,Errorf:PATH:LINE:MESSAGE' . ``` -## Installing - -There are two options for installing gometalinter. - -1. Install a stable version, eg. `go get -u gopkg.in/alecthomas/gometalinter.v1`. - I will generally only tag a new stable version when it has passed the Travis - regression tests. The downside is that the binary will be called `gometalinter.v1`. -2. Install from HEAD with: `go get -u github.com/alecthomas/gometalinter`. - This has the downside that changes to gometalinter may break. - ## Comment directives gometalinter supports suppression of linter messages via comment directives. The diff --git a/tools/vendor/github.com/alecthomas/gometalinter/aggregate.go b/tools/vendor/github.com/alecthomas/gometalinter/aggregate.go index f4b44e4b..1017891c 100644 --- a/tools/vendor/github.com/alecthomas/gometalinter/aggregate.go +++ b/tools/vendor/github.com/alecthomas/gometalinter/aggregate.go @@ -25,7 +25,7 @@ func AggregateIssueChan(issues chan *Issue) chan *Issue { go func() { for issue := range issues { key := issueKey{ - path: issue.Path, + path: issue.Path.String(), line: issue.Line, col: issue.Col, message: issue.Message, diff --git a/tools/vendor/github.com/alecthomas/gometalinter/checkstyle.go b/tools/vendor/github.com/alecthomas/gometalinter/checkstyle.go index 5122604e..52ff2396 100644 --- a/tools/vendor/github.com/alecthomas/gometalinter/checkstyle.go +++ b/tools/vendor/github.com/alecthomas/gometalinter/checkstyle.go @@ -33,14 +33,13 @@ func outputToCheckstyle(issues chan *Issue) int { } status := 0 for issue := range issues { - if lastFile != nil && lastFile.Name != issue.Path { + path := issue.Path.Relative() + if lastFile != nil && lastFile.Name != path { out.Files = append(out.Files, lastFile) lastFile = nil } if lastFile == nil { - lastFile = &checkstyleFile{ - Name: issue.Path, - } + lastFile = &checkstyleFile{Name: path} } if config.Errors && issue.Severity != Error { diff --git a/tools/vendor/github.com/alecthomas/gometalinter/config.go b/tools/vendor/github.com/alecthomas/gometalinter/config.go index bdcb4d04..7790a10a 100644 --- a/tools/vendor/github.com/alecthomas/gometalinter/config.go +++ b/tools/vendor/github.com/alecthomas/gometalinter/config.go @@ -2,6 +2,8 @@ package main import ( "encoding/json" + "os" + "path/filepath" "runtime" "text/template" "time" @@ -38,6 +40,7 @@ type Config struct { // nolint: maligned Vendor bool Cyclo int LineLength int + MisspellLocale string MinConfidence float64 MinOccurrences int MinConstLength int @@ -128,6 +131,7 @@ var config = &Config{ Concurrency: runtime.NumCPU(), Cyclo: 10, LineLength: 80, + MisspellLocale: "", MinConfidence: 0.8, MinOccurrences: 3, MinConstLength: 3, @@ -135,3 +139,54 @@ var config = &Config{ Sort: []string{"none"}, Deadline: jsonDuration(time.Second * 30), } + +func loadConfigFile(filename string) error { + r, err := os.Open(filename) + if err != nil { + return err + } + defer r.Close() // nolint: errcheck + err = json.NewDecoder(r).Decode(config) + if err != nil { + return err + } + for _, disable := range config.Disable { + for i, enable := range config.Enable { + if enable == disable { + config.Enable = append(config.Enable[:i], config.Enable[i+1:]...) + break + } + } + } + return err +} + +func findDefaultConfigFile() (fullPath string, found bool, err error) { + prevPath := "" + dirPath, err := os.Getwd() + if err != nil { + return "", false, err + } + + for dirPath != prevPath { + fullPath, found, err = findConfigFileInDir(dirPath) + if err != nil || found { + return fullPath, found, err + } + prevPath, dirPath = dirPath, filepath.Dir(dirPath) + } + + return "", false, nil +} + +func findConfigFileInDir(dirPath string) (fullPath string, found bool, err error) { + fullPath = filepath.Join(dirPath, defaultConfigPath) + if _, err := os.Stat(fullPath); err != nil { + if os.IsNotExist(err) { + return "", false, nil + } + return "", false, err + } + + return fullPath, true, nil +} diff --git a/tools/vendor/github.com/alecthomas/gometalinter/directives.go b/tools/vendor/github.com/alecthomas/gometalinter/directives.go index 10f2903d..45d322e1 100644 --- a/tools/vendor/github.com/alecthomas/gometalinter/directives.go +++ b/tools/vendor/github.com/alecthomas/gometalinter/directives.go @@ -5,6 +5,7 @@ import ( "go/ast" "go/parser" "go/token" + "os" "sort" "strings" "sync" @@ -67,11 +68,12 @@ func newDirectiveParser() *directiveParser { // IsIgnored returns true if the given linter issue is ignored by a linter directive. func (d *directiveParser) IsIgnored(issue *Issue) bool { d.lock.Lock() - ranges, ok := d.files[issue.Path] + path := issue.Path.Relative() + ranges, ok := d.files[path] if !ok { - ranges = d.parseFile(issue.Path) + ranges = d.parseFile(path) sort.Sort(ranges) - d.files[issue.Path] = ranges + d.files[path] = ranges } d.lock.Unlock() for _, r := range ranges { @@ -204,10 +206,16 @@ func filterIssuesViaDirectives(directives *directiveParser, issues chan *Issue) func warnOnUnusedDirective(directives *directiveParser) []*Issue { out := []*Issue{} + + cwd, err := os.Getwd() + if err != nil { + warning("failed to get working directory %s", err) + } + for path, ranges := range directives.Unmatched() { for _, ignore := range ranges { issue, _ := NewIssue("nolint", config.formatTemplate) - issue.Path = path + issue.Path = newIssuePath(cwd, path) issue.Line = ignore.start issue.Col = ignore.col issue.Message = "nolint directive did not match any issue" diff --git a/tools/vendor/github.com/alecthomas/gometalinter/execute.go b/tools/vendor/github.com/alecthomas/gometalinter/execute.go index 57267a5d..da9eb5f3 100644 --- a/tools/vendor/github.com/alecthomas/gometalinter/execute.go +++ b/tools/vendor/github.com/alecthomas/gometalinter/execute.go @@ -5,7 +5,6 @@ import ( "fmt" "os" "os/exec" - "path/filepath" "reflect" "regexp" "strconv" @@ -82,6 +81,7 @@ func runLinters(linters map[string]*Linter, paths []string, concurrency int, exc "duplthreshold": fmt.Sprintf("%d", config.DuplThreshold), "mincyclo": fmt.Sprintf("%d", config.Cyclo), "maxlinelength": fmt.Sprintf("%d", config.LineLength), + "misspelllocale": fmt.Sprintf("%s", config.MisspellLocale), "min_confidence": fmt.Sprintf("%f", config.MinConfidence), "min_occurrences": fmt.Sprintf("%d", config.MinOccurrences), "min_const_length": fmt.Sprintf("%d", config.MinConstLength), @@ -237,8 +237,10 @@ func processOutput(dbg debugFunction, state *linterState, out []byte) { } switch name { case "path": - issue.Path = relativePath(cwd, part) - + issue.Path, err = newIssuePathFromAbsPath(cwd, part) + if err != nil { + warning("failed to make %s a relative path: %s", part, err) + } case "line": n, err := strconv.ParseInt(part, 10, 32) kingpin.FatalIfError(err, "line matched invalid integer") @@ -273,37 +275,6 @@ func processOutput(dbg debugFunction, state *linterState, out []byte) { } } -func relativePath(root, path string) string { - fallback := path - root = resolvePath(root) - path = resolvePath(path) - var err error - path, err = filepath.Rel(root, path) - if err != nil { - warning("failed to make %s a relative path: %s", fallback, err) - return fallback - } - return path -} - -func resolvePath(path string) string { - var err error - fallback := path - if !filepath.IsAbs(path) { - path, err = filepath.Abs(path) - if err != nil { - warning("failed to make %s an absolute path: %s", fallback, err) - return fallback - } - } - path, err = filepath.EvalSymlinks(path) - if err != nil { - warning("failed to resolve symlinks in %s: %s", fallback, err) - return fallback - } - return path -} - func maybeSortIssues(issues chan *Issue) chan *Issue { if reflect.DeepEqual([]string{"none"}, config.Sort) { return issues diff --git a/tools/vendor/github.com/alecthomas/gometalinter/issue.go b/tools/vendor/github.com/alecthomas/gometalinter/issue.go index 982f4504..46c609a7 100644 --- a/tools/vendor/github.com/alecthomas/gometalinter/issue.go +++ b/tools/vendor/github.com/alecthomas/gometalinter/issue.go @@ -2,8 +2,10 @@ package main import ( "bytes" + "encoding/json" "fmt" "io/ioutil" + "path/filepath" "sort" "strings" "text/template" @@ -21,13 +23,59 @@ const ( Warning Severity = "warning" ) +type IssuePath struct { + root string + path string +} + +func (i IssuePath) String() string { + return i.Relative() +} + +func (i IssuePath) Relative() string { + return i.path +} + +func (i IssuePath) Abs() string { + return filepath.Join(i.root, i.path) +} + +func (i IssuePath) MarshalJSON() ([]byte, error) { + return json.Marshal(i.String()) +} + +func newIssuePath(root, path string) IssuePath { + return IssuePath{root: root, path: path} +} + +// newIssuePathFromAbsPath returns a new issuePath from a path that may be +// an absolute path. root must be an absolute path. +func newIssuePathFromAbsPath(root, path string) (IssuePath, error) { + resolvedRoot, err := filepath.EvalSymlinks(root) + if err != nil { + return newIssuePath(root, path), err + } + + resolvedPath, err := filepath.EvalSymlinks(path) + if err != nil { + return newIssuePath(root, path), err + } + + if !filepath.IsAbs(path) { + return newIssuePath(resolvedRoot, resolvedPath), nil + } + + relPath, err := filepath.Rel(resolvedRoot, resolvedPath) + return newIssuePath(resolvedRoot, relPath), err +} + type Issue struct { - Linter string `json:"linter"` - Severity Severity `json:"severity"` - Path string `json:"path"` - Line int `json:"line"` - Col int `json:"col"` - Message string `json:"message"` + Linter string `json:"linter"` + Severity Severity `json:"severity"` + Path IssuePath `json:"path"` + Line int `json:"line"` + Col int `json:"col"` + Message string `json:"message"` formatTmpl *template.Template } @@ -50,7 +98,11 @@ func (i *Issue) String() string { if i.Col != 0 { col = fmt.Sprintf("%d", i.Col) } - return fmt.Sprintf("%s:%d:%s:%s: %s (%s)", strings.TrimSpace(i.Path), i.Line, col, i.Severity, strings.TrimSpace(i.Message), i.Linter) + return fmt.Sprintf("%s:%d:%s:%s: %s (%s)", + strings.TrimSpace(i.Path.Relative()), + i.Line, col, i.Severity, + strings.TrimSpace(i.Message), + i.Linter) } buf := new(bytes.Buffer) _ = i.formatTmpl.Execute(buf, i) @@ -76,7 +128,7 @@ func CompareIssue(l, r Issue, order []string) bool { for _, key := range order { switch { case key == "path" && l.Path != r.Path: - return l.Path < r.Path + return l.Path.String() < r.Path.String() case key == "line" && l.Line != r.Line: return l.Line < r.Line case key == "column" && l.Col != r.Col: diff --git a/tools/vendor/github.com/alecthomas/gometalinter/linters.go b/tools/vendor/github.com/alecthomas/gometalinter/linters.go index c779b8c1..53373a93 100644 --- a/tools/vendor/github.com/alecthomas/gometalinter/linters.go +++ b/tools/vendor/github.com/alecthomas/gometalinter/linters.go @@ -328,7 +328,7 @@ var defaultLinters = map[string]LinterConfig{ defaultEnabled: true, }, "misspell": { - Command: `misspell -j 1`, + Command: `misspell -j 1 --locale "{misspelllocale}"`, Pattern: `PATH:LINE:COL:MESSAGE`, InstallFrom: "github.com/client9/misspell/cmd/misspell", PartitionStrategy: partitionPathsAsFiles, diff --git a/tools/vendor/github.com/alecthomas/gometalinter/main.go b/tools/vendor/github.com/alecthomas/gometalinter/main.go index 3de9534f..8ec3ea21 100644 --- a/tools/vendor/github.com/alecthomas/gometalinter/main.go +++ b/tools/vendor/github.com/alecthomas/gometalinter/main.go @@ -21,12 +21,19 @@ var ( // Locations to look for vendored linters. vendoredSearchPaths = [][]string{ {"github.com", "alecthomas", "gometalinter", "_linters"}, - {"gopkg.in", "alecthomas", "gometalinter.v1", "_linters"}, + {"gopkg.in", "alecthomas", "gometalinter.v2", "_linters"}, } + defaultConfigPath = ".gometalinter.json" + + // Populated by goreleaser. + version = "master" + commit = "?" + date = "" ) func setupFlags(app *kingpin.Application) { app.Flag("config", "Load JSON configuration from file.").Envar("GOMETALINTER_CONFIG").Action(loadConfig).String() + app.Flag("no-config", "Disable automatic loading of config file.").Bool() app.Flag("disable", "Disable previously enabled linters.").PlaceHolder("LINTER").Short('D').Action(disableAction).Strings() app.Flag("enable", "Enable previously disabled linters.").PlaceHolder("LINTER").Short('E').Action(enableAction).Strings() app.Flag("linter", "Define a linter.").PlaceHolder("NAME:COMMAND:PATTERN").Action(cliLinterOverrides).StringMap() @@ -35,12 +42,12 @@ func setupFlags(app *kingpin.Application) { app.Flag("disable-all", "Disable all linters.").Action(disableAllAction).Bool() app.Flag("enable-all", "Enable all linters.").Action(enableAllAction).Bool() app.Flag("format", "Output format.").PlaceHolder(config.Format).StringVar(&config.Format) - app.Flag("vendored-linters", "Use vendored linters (recommended).").BoolVar(&config.VendoredLinters) + app.Flag("vendored-linters", "Use vendored linters (recommended) (DEPRECATED - use binary packages).").BoolVar(&config.VendoredLinters) app.Flag("fast", "Only run fast linters.").BoolVar(&config.Fast) - app.Flag("install", "Attempt to install all known linters.").Short('i').BoolVar(&config.Install) - app.Flag("update", "Pass -u to go tool when installing.").Short('u').BoolVar(&config.Update) - app.Flag("force", "Pass -f to go tool when installing.").Short('f').BoolVar(&config.Force) - app.Flag("download-only", "Pass -d to go tool when installing.").BoolVar(&config.DownloadOnly) + app.Flag("install", "Attempt to install all known linters (DEPRECATED - use binary packages).").Short('i').BoolVar(&config.Install) + app.Flag("update", "Pass -u to go tool when installing (DEPRECATED - use binary packages).").Short('u').BoolVar(&config.Update) + app.Flag("force", "Pass -f to go tool when installing (DEPRECATED - use binary packages).").Short('f').BoolVar(&config.Force) + app.Flag("download-only", "Pass -d to go tool when installing (DEPRECATED - use binary packages).").BoolVar(&config.DownloadOnly) app.Flag("debug", "Display messages for failed linters, etc.").Short('d').BoolVar(&config.Debug) app.Flag("concurrency", "Number of concurrent linters to run.").PlaceHolder(fmt.Sprintf("%d", runtime.NumCPU())).Short('j').IntVar(&config.Concurrency) app.Flag("exclude", "Exclude messages matching these regular expressions.").Short('e').PlaceHolder("REGEXP").StringsVar(&config.Exclude) @@ -49,6 +56,7 @@ func setupFlags(app *kingpin.Application) { app.Flag("vendor", "Enable vendoring support (skips 'vendor' directories and sets GO15VENDOREXPERIMENT=1).").BoolVar(&config.Vendor) app.Flag("cyclo-over", "Report functions with cyclomatic complexity over N (using gocyclo).").PlaceHolder("10").IntVar(&config.Cyclo) app.Flag("line-length", "Report lines longer than N (using lll).").PlaceHolder("80").IntVar(&config.LineLength) + app.Flag("misspell-locale", "Specify locale to use (using misspell).").PlaceHolder("").StringVar(&config.MisspellLocale) app.Flag("min-confidence", "Minimum confidence interval to pass to golint.").PlaceHolder(".80").FloatVar(&config.MinConfidence) app.Flag("min-occurrences", "Minimum occurrences to pass to goconst.").PlaceHolder("3").IntVar(&config.MinOccurrences) app.Flag("min-const-length", "Minimum constant length.").PlaceHolder("3").IntVar(&config.MinConstLength) @@ -81,25 +89,27 @@ func cliLinterOverrides(app *kingpin.Application, element *kingpin.ParseElement, return nil } -func loadConfig(app *kingpin.Application, element *kingpin.ParseElement, ctx *kingpin.ParseContext) error { - r, err := os.Open(*element.Value) - if err != nil { - return err +func loadDefaultConfig(app *kingpin.Application, element *kingpin.ParseElement, ctx *kingpin.ParseContext) error { + if element != nil { + return nil } - defer r.Close() // nolint: errcheck - err = json.NewDecoder(r).Decode(config) - if err != nil { - return err - } - for _, disable := range config.Disable { - for i, enable := range config.Enable { - if enable == disable { - config.Enable = append(config.Enable[:i], config.Enable[i+1:]...) - break - } + + for _, elem := range ctx.Elements { + if f := elem.OneOf.Flag; f == app.GetFlag("config") || f == app.GetFlag("no-config") { + return nil } } - return err + + configFile, found, err := findDefaultConfigFile() + if err != nil || !found { + return err + } + + return loadConfigFile(configFile) +} + +func loadConfig(app *kingpin.Application, element *kingpin.ParseElement, ctx *kingpin.ParseContext) error { + return loadConfigFile(*element.Value) } func disableAction(app *kingpin.Application, element *kingpin.ParseElement, ctx *kingpin.ParseContext) error { @@ -135,7 +145,9 @@ type debugFunction func(format string, args ...interface{}) func debug(format string, args ...interface{}) { if config.Debug { - fmt.Fprintf(os.Stderr, "DEBUG: "+format+"\n", args...) + t := time.Now().UTC() + fmt.Fprintf(os.Stderr, "DEBUG: [%s] ", t.Format(time.StampMilli)) + fmt.Fprintf(os.Stderr, format+"\n", args...) } } @@ -171,8 +183,10 @@ func formatSeverity() string { } func main() { + kingpin.Version(fmt.Sprintf("gometalinter version %s built from %s on %s", version, commit, date)) pathsArg := kingpin.Arg("path", "Directories to lint. Defaults to \".\". /... will recurse.").Strings() app := kingpin.CommandLine + app.Action(loadDefaultConfig) setupFlags(app) app.Help = fmt.Sprintf(`Aggregate and normalise the output of a whole bunch of Go linters. @@ -233,15 +247,6 @@ func processConfig(config *Config) (include *regexp.Regexp, exclude *regexp.Rege if !config.EnableGC { _ = os.Setenv("GOGC", "off") } - if config.VendoredLinters && config.Install && config.Update { - warning(`Linters are now vendored by default, --update ignored. The original -behaviour can be re-enabled with --no-vendored-linters. - -To request an update for a vendored linter file an issue at: -https://github.com/alecthomas/gometalinter/issues/new -`) - config.Update = false - } // Force sorting by path if checkstyle mode is selected // !jsonFlag check is required to handle: // gometalinter --json --checkstyle --sort=severity @@ -476,6 +481,14 @@ func addGoBinsToPath(gopaths []string) []string { // configureEnvironmentForInstall sets GOPATH and GOBIN so that vendored linters // can be installed func configureEnvironmentForInstall() { + if config.Update { + warning(`Linters are now vendored by default, --update ignored. The original +behaviour can be re-enabled with --no-vendored-linters. + +To request an update for a vendored linter file an issue at: +https://github.com/alecthomas/gometalinter/issues/new +`) + } gopaths := getGoPathList() vendorRoot := findVendoredLinters() if vendorRoot == "" { diff --git a/tools/vendor/github.com/golang/lint/golint/golint.go b/tools/vendor/github.com/golang/lint/golint/golint.go index d8360ad3..ac024b6d 100644 --- a/tools/vendor/github.com/golang/lint/golint/golint.go +++ b/tools/vendor/github.com/golang/lint/golint/golint.go @@ -16,7 +16,7 @@ import ( "path/filepath" "strings" - "github.com/golang/lint" + "golang.org/x/lint" ) var ( diff --git a/tools/vendor/github.com/golang/lint/golint/import.go b/tools/vendor/github.com/golang/lint/golint/import.go index 02a0daa6..2ba9dea7 100644 --- a/tools/vendor/github.com/golang/lint/golint/import.go +++ b/tools/vendor/github.com/golang/lint/golint/import.go @@ -22,11 +22,10 @@ import ( "strings" ) -var buildContext = build.Default - var ( - goroot = filepath.Clean(runtime.GOROOT()) - gorootSrc = filepath.Join(goroot, "src") + buildContext = build.Default + goroot = filepath.Clean(runtime.GOROOT()) + gorootSrc = filepath.Join(goroot, "src") ) // importPathsNoDotExpansion returns the import paths to use for the given diff --git a/tools/vendor/github.com/kisielk/errcheck/go.mod b/tools/vendor/github.com/kisielk/errcheck/go.mod new file mode 100644 index 00000000..56c9a975 --- /dev/null +++ b/tools/vendor/github.com/kisielk/errcheck/go.mod @@ -0,0 +1,6 @@ +module "github.com/kisielk/errcheck" + +require ( + "github.com/kisielk/gotool" v1.0.0 + "golang.org/x/tools" v0.0.0-20180221164845-07fd8470d635 +) diff --git a/tools/vendor/github.com/mattn/goveralls/README.md b/tools/vendor/github.com/mattn/goveralls/README.md index bc5608b5..c946f26f 100644 --- a/tools/vendor/github.com/mattn/goveralls/README.md +++ b/tools/vendor/github.com/mattn/goveralls/README.md @@ -137,6 +137,32 @@ test: For more information, See https://coveralls.zendesk.com/hc/en-us/articles/201342809-Go +## Sempahore + +Store your Coveralls API token in `Environment Variables`: + +``` +COVERALLS_TOKEN=your_token_goes_here +``` + +More instructions on how to do this can be found in the [Semahore documentation](https://semaphoreci.com/docs/exporting-environment-variables.html). + +Replace the `go test` line in your `Commands` with these lines: + +``` +$ go get github.com/mattn/goveralls +$ goveralls -service semaphore +``` + +`goveralls` automatically use the environment variable `COVERALLS_TOKEN` as the +default value for `-repotoken`. + +You can use the `-v` flag to see verbose output from the test suite: + +``` +$ goveralls -v -service semaphore +``` + # Authors * Yasuhiro Matsumoto (a.k.a. mattn) diff --git a/tools/vendor/github.com/mattn/goveralls/goveralls.go b/tools/vendor/github.com/mattn/goveralls/goveralls.go index 3be972db..a9889a52 100644 --- a/tools/vendor/github.com/mattn/goveralls/goveralls.go +++ b/tools/vendor/github.com/mattn/goveralls/goveralls.go @@ -54,6 +54,7 @@ var ( service = flag.String("service", "travis-ci", "The CI service or other environment in which the test suite was run. ") shallow = flag.Bool("shallow", false, "Shallow coveralls internal server errors") ignore = flag.String("ignore", "", "Comma separated files to ignore") + show = flag.Bool("show", false, "Show which package is being tested") ) // usage supplants package flag's Usage variable @@ -149,6 +150,9 @@ func getCoverage() ([]*SourceFile, error) { args = append(args, line) cmd.Args = args + if *show { + fmt.Println("goveralls:", line) + } err = cmd.Run() if err != nil { return nil, fmt.Errorf("%v: %v", err, outBuf.String()) @@ -234,6 +238,8 @@ func process() error { jobId = circleCiJobId } else if appveyorJobId := os.Getenv("APPVEYOR_JOB_ID"); appveyorJobId != "" { jobId = appveyorJobId + } else if semaphoreJobId := os.Getenv("SEMPAHORE_BUILD_NUMBER"); semaphoreJobId != "" { + jobId = semaphoreJobId } if *repotoken == "" { @@ -250,6 +256,8 @@ func process() error { pullRequest = regexp.MustCompile(`[0-9]+$`).FindString(prURL) } else if prNumber := os.Getenv("APPVEYOR_PULL_REQUEST_NUMBER"); prNumber != "" { pullRequest = prNumber + } else if prNumber := os.Getenv("PULL_REQUEST_NUMBER"); prNumber != "" { + pullRequest = prNumber } sourceFiles, err := getCoverage() diff --git a/tools/vendor/github.com/opennota/check/cmd/varcheck/varcheck.go b/tools/vendor/github.com/opennota/check/cmd/varcheck/varcheck.go index 45df4e6c..a0defb7b 100644 --- a/tools/vendor/github.com/opennota/check/cmd/varcheck/varcheck.go +++ b/tools/vendor/github.com/opennota/check/cmd/varcheck/varcheck.go @@ -24,13 +24,15 @@ import ( "sort" "strings" + "go/types" + "github.com/kisielk/gotool" "golang.org/x/tools/go/loader" - "go/types" ) var ( reportExported = flag.Bool("e", false, "Report exported variables and constants") + buildTags = flag.String("tags", "", "Build tags") ) type object struct { @@ -102,6 +104,9 @@ func (v *visitor) Visit(node ast.Node) ast.Visitor { for _, val := range node.Values { ast.Walk(v, val) } + if node.Type != nil { + ast.Walk(v, node.Type) + } return nil case *ast.FuncDecl: @@ -133,6 +138,9 @@ func main() { } ctx := build.Default + if *buildTags != "" { + ctx.BuildTags = strings.Fields(*buildTags) + } loadcfg := loader.Config{ Build: &ctx, } diff --git a/tools/vendor/github.com/stripe/safesql/README.md b/tools/vendor/github.com/stripe/safesql/README.md index c3e74784..b36f4223 100644 --- a/tools/vendor/github.com/stripe/safesql/README.md +++ b/tools/vendor/github.com/stripe/safesql/README.md @@ -31,8 +31,8 @@ How does it work? ----------------- SafeSQL uses the static analysis utilities in [go/tools][tools] to search for -all call sites of each of the `query` functions in package [database/sql][sql] -(i.e., functions which accept a `string` parameter named `query`). It then makes +all call sites of each of the `query` functions in packages ([database/sql][sql],[github.com/jinzhu/gorm][gorm],[github.com/jmoiron/sqlx][sqlx]) +(i.e., functions which accept a parameter named `query`,`sql`). It then makes sure that every such call site uses a query that is a compile-time constant. The principle behind SafeSQL's safety guarantees is that queries that are @@ -44,6 +44,8 @@ will not be allowed. [tools]: https://godoc.org/golang.org/x/tools/go [sql]: http://golang.org/pkg/database/sql/ +[sqlx]: https://github.com/jmoiron/sqlx +[gorm]: https://github.com/jinzhu/gorm False positives --------------- @@ -66,8 +68,6 @@ a fundamental limitation: SafeSQL could recursively trace the `query` argument through every intervening helper function to ensure that its argument is always constant, but this code has yet to be written. -If you use a wrapper for `database/sql` (e.g., [`sqlx`][sqlx]), it's likely -SafeSQL will not work for you because of this. The second sort of false positive is based on a limitation in the sort of analysis SafeSQL performs: there are many safe SQL statements which are not @@ -76,4 +76,3 @@ static analysis techniques (such as taint analysis) or user-provided safety annotations would be able to reduce the number of false positives, but this is expected to be a significant undertaking. -[sqlx]: https://github.com/jmoiron/sqlx diff --git a/tools/vendor/github.com/stripe/safesql/safesql.go b/tools/vendor/github.com/stripe/safesql/safesql.go index 30718798..adf8bb89 100644 --- a/tools/vendor/github.com/stripe/safesql/safesql.go +++ b/tools/vendor/github.com/stripe/safesql/safesql.go @@ -9,6 +9,7 @@ import ( "go/build" "go/types" "os" + "path/filepath" "strings" @@ -19,6 +20,27 @@ import ( "golang.org/x/tools/go/ssa/ssautil" ) +type sqlPackage struct { + packageName string + paramNames []string + enable bool +} + +var sqlPackages = []sqlPackage{ + { + packageName: "database/sql", + paramNames: []string{"query"}, + }, + { + packageName: "github.com/jinzhu/gorm", + paramNames: []string{"sql", "query"}, + }, + { + packageName: "github.com/jmoiron/sqlx", + paramNames: []string{"query"}, + }, +} + func main() { var verbose, quiet bool flag.BoolVar(&verbose, "v", false, "Verbose mode") @@ -38,21 +60,45 @@ func main() { c := loader.Config{ FindPackage: FindPackage, } - c.Import("database/sql") for _, pkg := range pkgs { c.Import(pkg) } p, err := c.Load() + if err != nil { fmt.Printf("error loading packages %v: %v\n", pkgs, err) os.Exit(2) } + + imports := getImports(p) + existOne := false + for i := range sqlPackages { + if _, exist := imports[sqlPackages[i].packageName]; exist { + if verbose { + fmt.Printf("Enabling support for %s\n", sqlPackages[i].packageName) + } + sqlPackages[i].enable = true + existOne = true + } + } + if !existOne { + fmt.Printf("No packages in %v include a supported database driver", pkgs) + os.Exit(2) + } + s := ssautil.CreateProgram(p, 0) s.Build() - qms := FindQueryMethods(p.Package("database/sql").Pkg, s) + qms := make([]*QueryMethod, 0) + + for i := range sqlPackages { + if sqlPackages[i].enable { + qms = append(qms, FindQueryMethods(sqlPackages[i], p.Package(sqlPackages[i].packageName).Pkg, s)...) + } + } + if verbose { - fmt.Println("database/sql functions that accept queries:") + fmt.Println("database driver functions that accept queries:") for _, m := range qms { fmt.Printf("- %s (param %d)\n", m.Func, m.Param) } @@ -75,6 +121,7 @@ func main() { } bad := FindNonConstCalls(res.CallGraph, qms) + if len(bad) == 0 { if !quiet { fmt.Println(`You're safe from SQL injection! Yay \o/`) @@ -82,14 +129,19 @@ func main() { return } - fmt.Printf("Found %d potentially unsafe SQL statements:\n", len(bad)) + if verbose { + fmt.Printf("Found %d potentially unsafe SQL statements:\n", len(bad)) + } + for _, ci := range bad { pos := p.Fset.Position(ci.Pos()) fmt.Printf("- %s\n", pos) } - fmt.Println("Please ensure that all SQL queries you use are compile-time constants.") - fmt.Println("You should always use parameterized queries or prepared statements") - fmt.Println("instead of building queries from strings.") + if verbose { + fmt.Println("Please ensure that all SQL queries you use are compile-time constants.") + fmt.Println("You should always use parameterized queries or prepared statements") + fmt.Println("instead of building queries from strings.") + } os.Exit(1) } @@ -104,7 +156,7 @@ type QueryMethod struct { // FindQueryMethods locates all methods in the given package (assumed to be // package database/sql) with a string parameter named "query". -func FindQueryMethods(sql *types.Package, ssa *ssa.Program) []*QueryMethod { +func FindQueryMethods(sqlPackages sqlPackage, sql *types.Package, ssa *ssa.Program) []*QueryMethod { methods := make([]*QueryMethod, 0) scope := sql.Scope() for _, name := range scope.Names() { @@ -122,7 +174,7 @@ func FindQueryMethods(sql *types.Package, ssa *ssa.Program) []*QueryMethod { continue } s := m.Type().(*types.Signature) - if num, ok := FuncHasQuery(s); ok { + if num, ok := FuncHasQuery(sqlPackages, s); ok { methods = append(methods, &QueryMethod{ Func: m, SSA: ssa.FuncValue(m), @@ -135,16 +187,16 @@ func FindQueryMethods(sql *types.Package, ssa *ssa.Program) []*QueryMethod { return methods } -var stringType types.Type = types.Typ[types.String] - // FuncHasQuery returns the offset of the string parameter named "query", or // none if no such parameter exists. -func FuncHasQuery(s *types.Signature) (offset int, ok bool) { +func FuncHasQuery(sqlPackages sqlPackage, s *types.Signature) (offset int, ok bool) { params := s.Params() for i := 0; i < params.Len(); i++ { v := params.At(i) - if v.Name() == "query" && v.Type() == stringType { - return i, true + for _, paramName := range sqlPackages.paramNames { + if v.Name() == paramName { + return i, true + } } } return 0, false @@ -164,6 +216,16 @@ func FindMains(p *loader.Program, s *ssa.Program) []*ssa.Package { return mains } +func getImports(p *loader.Program) map[string]interface{} { + pkgs := make(map[string]interface{}) + for _, pkg := range p.AllPackages { + if pkg.Importable { + pkgs[pkg.Pkg.Path()] = nil + } + } + return pkgs +} + // FindNonConstCalls returns the set of callsites of the given set of methods // for which the "query" parameter is not a compile-time constant. func FindNonConstCalls(cg *callgraph.Graph, qms []*QueryMethod) []ssa.CallInstruction { @@ -186,6 +248,18 @@ func FindNonConstCalls(cg *callgraph.Graph, qms []*QueryMethod) []ssa.CallInstru if _, ok := okFuncs[edge.Site.Parent()]; ok { continue } + + isInternalSQLPkg := false + for _, pkg := range sqlPackages { + if pkg.packageName == edge.Caller.Func.Pkg.Pkg.Path() { + isInternalSQLPkg = true + break + } + } + if isInternalSQLPkg { + continue + } + cc := edge.Site.Common() args := cc.Args // The first parameter is occasionally the receiver. @@ -195,7 +269,14 @@ func FindNonConstCalls(cg *callgraph.Graph, qms []*QueryMethod) []ssa.CallInstru panic("arg count mismatch") } v := args[m.Param] + if _, ok := v.(*ssa.Const); !ok { + if inter, ok := v.(*ssa.MakeInterface); ok && types.IsInterface(v.(*ssa.MakeInterface).Type()) { + if inter.X.Referrers() == nil || inter.X.Type() != types.Typ[types.String] { + continue + } + } + bad = append(bad, edge.Site) } } diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/CONTRIBUTING.md b/tools/vendor/github.com/tmthrgd/go-bindata/CONTRIBUTING.md new file mode 100644 index 00000000..e0732f54 --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/CONTRIBUTING.md @@ -0,0 +1,79 @@ +## Contribution guidelines. + +So you wish to contribute to this project? Fantastic! +Here are a few guidelines to help you do this in a +streamlined fashion. + + +## Bug reports + +When supplying a bug report, please consider the following guidelines. +These serve to make it easier for us to address the issue and find a solution. +Most of these are pretty self-evident, but sometimes it is still necessary +to reiterate them. + +* Be clear in the way you express the problem. Use simple language and + just enough of it to clearly define the issue. Not everyone is a native + English speaker. And while most can handle themselves pretty well, + it helps to stay away from more esoteric vocabulary. + + Be patient with non-native English speakers. If their bug reports + or comments are hard to understand, just ask for clarification. + Do not start guessing at their meaning, as this may just lead to + more confusion and misunderstandings. +* Clearly define any information which is relevant to the problem. + This includes library versions, operating system and any other + external dependencies which may be needed. +* Where applicable, provide a step-by-step listing of the way to + reproduce the problem. Make sure this is the simplest possible + way to do so. Omit any and all unneccesary steps, because they may + just complicate our understanding of the real problem. + If need be, create a whole new code project on your local machine, + which specifically tries to create the problem you are running into; + nothing more, nothing less. + + Include this program in the bug report. It often suffices to paste + the code in a [Gist](https://gist.github.com) or on the + [Go playground](http://play.golang.org). +* If possible, provide us with a listing of the steps you have already + undertaken to solve the problem. This can save us a great deal of + wasted time, trying out solutions you have already covered. + + +## Pull requests + +Bug reports are great. Supplying fixes to bugs is even better. +When submitting a pull request, the following guidelines are +good to keep in mind: + +* `go fmt`: **Always** run your code through `go fmt`, before + committing it. Code has to be readable by many different + people. And the only way this will be as painless as possible, + is if we all stick to the same code style. + + Some of our projects may have automated build-servers hooked up + to commit hooks. These will vet any submitted code and determine + if it meets a set of properties. One of which is code formatting. + These servers will outright deny a submission which has not been + run through `go fmt`, even if the code itself is correct. + + We try to maintain a zero-tolerance policy on this matter, + because consistently formatted code makes life a great deal + easier for everyone involved. +* Commit log messages: When committing changes, do so often and + clearly -- Even if you have changed only 1 character in a code + comment. This means that commit log messages should clearly state + exactly what the change does and why. If it fixes a known issue, + then mention the issue number in the commit log. E.g.: + + > Fixes return value for `foo/boo.Baz()` to be consistent with + > the rest of the API. This addresses issue #32 + + Do not pile a lot of unrelated changes into a single commit. + Pick and choose only those changes for a single commit, which are + directly related. We would much rather see a hundred commits + saying nothing but `"Runs go fmt"` in between any real fixes + than have these style changes embedded in those real fixes. + It creates a lot of noise when trying to review code. + + diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/LICENSE b/tools/vendor/github.com/tmthrgd/go-bindata/LICENSE new file mode 100644 index 00000000..62c39157 --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/LICENSE @@ -0,0 +1,54 @@ +Copyright (c) 2017, Tom Thorogood. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Tom Thorogood nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---- Portions of the source code are also covered by the following license: ---- + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/README.md b/tools/vendor/github.com/tmthrgd/go-bindata/README.md new file mode 100644 index 00000000..37249f2a --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/README.md @@ -0,0 +1,189 @@ +# go-bindata (forked by tmthrgd) + +[![GoDoc](https://godoc.org/github.com/tmthrgd/go-bindata?status.svg)](https://godoc.org/github.com/tmthrgd/go-bindata) +[![Build Status](https://travis-ci.org/tmthrgd/go-bindata.svg?branch=master)](https://travis-ci.org/tmthrgd/go-bindata) +[![Go Report Card](https://goreportcard.com/badge/github.com/tmthrgd/go-bindata)](https://goreportcard.com/report/github.com/tmthrgd/go-bindata) + +This is a rewrite of go-bindata that started life as fork of a fork. It +was forked by [lestrrat](https://github.com/lestrrat/go-bindata) as +[jteeuwen](https://github.com/jteeuwen/go-bindata) seemed to have +abandoned the project. + +Since that fork, go-bindata has been +[largely rewritten](https://github.com/tmthrgd/go-bindata/compare/3adb6a8b66f07a123c3d44e8f6c7e78bbdd029c2...master) +and has become a standalone project. While the generated code has changed, +the generated API remains backwards compatible. The +[package API](https://godoc.org/github.com/tmthrgd/go-bindata) is not +backwards compatible. The CLI remains backwards compatible, but may not be +as feature complete as the package API. + +The suggested way of using go-bindata is from a single .go file with an +ignore build tag (`// +build ignore`) run with +`//go:generate go run path/to/generate.go`. (See +[issue #2](https://github.com/tmthrgd/go-bindata/issues/2#issuecomment-290957538) +for reference). + +*Nota bene*: Most of the README that follows has not been updated to match +the changes made since rewriting go-bindata and likely does not accurately +represent the state of go-bindata. + +## bindata + +This package converts any file into manageable Go source code. Useful for +embedding binary data into a go program. The file data is optionally gzip +compressed before being converted to a raw byte slice. + +### Installation + +To install the library, use the following: + + go get -u github.com/tmthrgd/go-bindata/... + +### Accessing an asset + +To access asset data, we use the `Asset(string) ([]byte, error)` function which +is included in the generated output. + + data, err := Asset("pub/style/foo.css") + if err != nil { + // Asset was not found. + } + + // use asset data + + +### Debug vs Release builds + +When invoking the program with the `-debug` flag, the generated code does +not actually include the asset data. Instead, it generates function stubs +which load the data from the original file on disk. The asset API remains +identical between debug and release builds, so your code will not have to +change. + +This is useful during development when you expect the assets to change often. +The host application using these assets uses the same API in both cases and +will not have to care where the actual data comes from. + +An example is a Go webserver with some embedded, static web content like +HTML, JS and CSS files. While developing it, you do not want to rebuild the +whole server and restart it every time you make a change to a bit of +javascript. You just want to build and launch the server once. Then just press +refresh in the browser to see those changes. Embedding the assets with the +`debug` flag allows you to do just that. When you are finished developing and +ready for deployment, just re-invoke `go-bindata` without the `-debug` flag. +It will now embed the latest version of the assets. + + +### Lower memory footprint + +Using the `-nomemcopy` flag, will alter the way the output file is generated. +It will employ a hack that allows us to read the file data directly from +the compiled program's `.rodata` section. This ensures that when we +call our generated function, we omit unnecessary memcopies. + +The downside of this, is that it requires dependencies on the `reflect` and +`unsafe` packages. These may be restricted on platforms like AppEngine and +thus prevent you from using this mode. + +Another disadvantage is that the byte slice we create, is strictly read-only. +For most use-cases this is not a problem, but if you ever try to alter the +returned byte slice, a runtime panic is thrown. Use this mode only on target +platforms where memory constraints are an issue. + +The default behaviour is to use the old code generation method. This +prevents the two previously mentioned issues, but will employ at least one +extra memcopy and thus increase memory requirements. + +For instance, consider the following two examples: + +This would be the default mode, using an extra memcopy but gives a safe +implementation without dependencies on `reflect` and `unsafe`: + +```go +func myfile() []byte { + return []byte{0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a} +} +``` + +Here is the same functionality, but uses the `.rodata` hack. +The byte slice returned from this example can not be written to without +generating a runtime error. + +```go +var _myfile = "\x89\x50\x4e\x47\x0d\x0a\x1a" + +func myfile() []byte { + var empty [0]byte + sx := (*reflect.StringHeader)(unsafe.Pointer(&_myfile)) + b := empty[:] + bx := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bx.Data = sx.Data + bx.Len = len(_myfile) + bx.Cap = bx.Len + return b +} +``` + + +### Optional compression + +When the `-nocompress` flag is given, the supplied resource is *not* GZIP +compressed before being turned into Go code. The data should still be accessed +through a function call, so nothing changes in the usage of the generated file. + +This feature is useful if you do not care for compression, or the supplied +resource is already compressed. Doing it again would not add any value and may +even increase the size of the data. + +The default behaviour of the program is to use compression. + + +### Path prefix stripping + +The keys used in the `_bindata` map, are the same as the input file name +passed to `go-bindata`. This includes the path. In most cases, this is not +desirable, as it puts potentially sensitive information in your code base. +For this purpose, the tool supplies another command line flag `-prefix`. +This accepts a portion of a path name, which should be stripped off from +the map keys and function names. + +For example, running without the `-prefix` flag, we get: + + $ go-bindata /path/to/templates/ + + _bindata["/path/to/templates/foo.html"] = path_to_templates_foo_html + +Running with the `-prefix` flag, we get: + + $ go-bindata -prefix "/path/to/" /path/to/templates/ + + _bindata["templates/foo.html"] = templates_foo_html + + +### Build tags + +With the optional `-tags` flag, you can specify any go build tags that +must be fulfilled for the output file to be included in a build. This +is useful when including binary data in multiple formats, where the desired +format is specified at build time with the appropriate tags. + +The tags are appended to a `// +build` line in the beginning of the output file +and must follow the build tags syntax specified by the go tool. + +## Testing + +To execute the test case, run the following commands: + + go get -t -u github.com/tmthrgd/go-bindata + go test github.com/tmthrgd/go-bindata + +### Test corpus + +To generate the corpus-sha256sums needed for travis, run the following commands: + + [ -d .testcorpus ] && rm -r .testcorpus + go test -run TestCorpus -randtests 50 -corpus .testcorpus -gencorpus . + cd .testcorpus && sha256sum * > ../corpus-sha256sums; cd .. + +This must be done every time the generated code changes, but can be skipped while working +on a pull request until it is ready to merge. \ No newline at end of file diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/base32_compat.go b/tools/vendor/github.com/tmthrgd/go-bindata/base32_compat.go new file mode 100644 index 00000000..79eccced --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/base32_compat.go @@ -0,0 +1,22 @@ +// Copyright 2017 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a Modified +// BSD License that can be found in the LICENSE file. + +// +build !go1.9 + +package bindata + +import ( + "encoding/base32" + "strings" +) + +var base32Enc = base32EncodingCompat{ + base32.NewEncoding("abcdefghijklmnopqrstuvwxyz234567"), +} + +type base32EncodingCompat struct{ *base32.Encoding } + +func (enc base32EncodingCompat) EncodeToString(src []byte) string { + return strings.TrimSuffix(enc.Encoding.EncodeToString(src), "=") +} diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/base32_go19.go b/tools/vendor/github.com/tmthrgd/go-bindata/base32_go19.go new file mode 100644 index 00000000..f374e5ef --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/base32_go19.go @@ -0,0 +1,11 @@ +// Copyright 2017 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a Modified +// BSD License that can be found in the LICENSE file. + +// +build go1.9 + +package bindata + +import "encoding/base32" + +var base32Enc = base32.NewEncoding("abcdefghijklmnopqrstuvwxyz234567").WithPadding(base32.NoPadding) diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/buffers.go b/tools/vendor/github.com/tmthrgd/go-bindata/buffers.go new file mode 100644 index 00000000..a32e0d98 --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/buffers.go @@ -0,0 +1,49 @@ +// Copyright 2017 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a Modified +// BSD License that can be found in the LICENSE file. + +package bindata + +import ( + "bytes" + "io" + "os" + "sync" +) + +var bufPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +func (asset *binAsset) copy(w io.Writer) error { + rc, err := asset.Open() + if err != nil { + return err + } + + n := 4 * 1024 + if s, ok := rc.(interface { + Stat() (os.FileInfo, error) + }); ok { + if fi, err := s.Stat(); err == nil { + // Don't preallocate a huge buffer, just in case. + if size := fi.Size(); size < 1e9 { + n = int(size) + bytes.MinRead + } + } + } + + buf := bufPool.Get().(*bytes.Buffer) + buf.Grow(n) + + _, err = io.CopyBuffer(w, rc, buf.Bytes()[:buf.Cap()]) + + if closeErr := rc.Close(); err == nil { + err = closeErr + } + + bufPool.Put(buf) + return err +} diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/common.go b/tools/vendor/github.com/tmthrgd/go-bindata/common.go new file mode 100644 index 00000000..4f6bc6fd --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/common.go @@ -0,0 +1,51 @@ +// Copyright 2017 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a Modified +// BSD License that can be found in the LICENSE file. + +package bindata + +import "text/template" + +func init() { + template.Must(baseTemplate.New("common").Parse(`// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + data, _, err := AssetAndInfo(name) + return data, err +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + + return names +} + +{{- if $.Restore}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + return restore.Asset(dir, name, AssetAndInfo) +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + return restore.Assets(dir, name, AssetDir, AssetAndInfo) +} +{{- end}}`)) +} diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/config.go b/tools/vendor/github.com/tmthrgd/go-bindata/config.go new file mode 100644 index 00000000..78c7533d --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/config.go @@ -0,0 +1,209 @@ +// Copyright 2017 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a Modified +// BSD License that can be found in the LICENSE file. + +package bindata + +import ( + "errors" + "hash" + "os" + + "github.com/tmthrgd/go-bindata/internal/identifier" +) + +// HashFormat specifies which format to use when hashing names. +type HashFormat int + +const ( + // NameUnchanged leaves the file name unchanged. + NameUnchanged HashFormat = iota + // DirHash formats names like path/to/hash/name.ext. + DirHash + // NameHashSuffix formats names like path/to/name-hash.ext. + NameHashSuffix + // HashWithExt formats names like path/to/hash.ext. + HashWithExt +) + +func (hf HashFormat) String() string { + switch hf { + case NameUnchanged: + return "unchanged" + case DirHash: + return "dir" + case NameHashSuffix: + return "namesuffix" + case HashWithExt: + return "hashext" + default: + return "unknown" + } +} + +// HashEncoding specifies which encoding to use when hashing names. +type HashEncoding int + +const ( + // HexHash uses hexadecimal encoding. + HexHash HashEncoding = iota + // Base32Hash uses unpadded, lowercase standard base32 + // encoding (see RFC 4648). + Base32Hash + // Base64Hash uses an unpadded URL-safe base64 encoding + // defined in RFC 4648. + Base64Hash +) + +func (he HashEncoding) String() string { + switch he { + case HexHash: + return "hex" + case Base32Hash: + return "base32" + case Base64Hash: + return "base64" + default: + return "unknown" + } +} + +// GenerateOptions defines a set of options to use +// when generating the Go code. +type GenerateOptions struct { + // Name of the package to use. + Package string + + // Tags specify a set of optional build tags, which should be + // included in the generated output. The tags are appended to a + // `// +build` line in the beginning of the output file + // and must follow the build tags syntax specified by the go tool. + Tags string + + // MemCopy will alter the way the output file is generated. + // + // If false, it will employ a hack that allows us to read the file data directly + // from the compiled program's `.rodata` section. This ensures that when we call + // call our generated function, we omit unnecessary mem copies. + // + // The downside of this, is that it requires dependencies on the `reflect` and + // `unsafe` packages. These may be restricted on platforms like AppEngine and + // thus prevent you from using this mode. + // + // Another disadvantage is that the byte slice we create, is strictly read-only. + // For most use-cases this is not a problem, but if you ever try to alter the + // returned byte slice, a runtime panic is thrown. Use this mode only on target + // platforms where memory constraints are an issue. + // + // The default behaviour is to use the old code generation method. This + // prevents the two previously mentioned issues, but will employ at least one + // extra memcopy and thus increase memory requirements. + // + // For instance, consider the following two examples: + // + // This would be the default mode, using an extra memcopy but gives a safe + // implementation without dependencies on `reflect` and `unsafe`: + // + // func myfile() []byte { + // return []byte{0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a} + // } + // + // Here is the same functionality, but uses the `.rodata` hack. + // The byte slice returned from this example can not be written to without + // generating a runtime error. + // + // var _myfile = "\x89\x50\x4e\x47\x0d\x0a\x1a" + // + // func myfile() []byte { + // var empty [0]byte + // sx := (*reflect.StringHeader)(unsafe.Pointer(&_myfile)) + // b := empty[:] + // bx := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + // bx.Data = sx.Data + // bx.Len = len(_myfile) + // bx.Cap = bx.Len + // return b + // } + MemCopy bool + + // Compress means the assets are GZIP compressed before being turned into + // Go code. The generated function will automatically unzip the file data + // when called. Defaults to true. + Compress bool + + // Perform a debug build. This generates an asset file, which + // loads the asset contents directly from disk at their original + // location, instead of embedding the contents in the code. + // + // This is mostly useful if you anticipate that the assets are + // going to change during your development cycle. You will always + // want your code to access the latest version of the asset. + // Only in release mode, will the assets actually be embedded + // in the code. The default behaviour is Release mode. + Debug bool + + // Perform a dev build, which is nearly identical to the debug option. The + // only difference is that instead of absolute file paths in generated code, + // it expects a variable, `rootDir`, to be set in the generated code's + // package (the author needs to do this manually), which it then prepends to + // an asset's name to construct the file path on disk. + // + // This is mainly so you can push the generated code file to a shared + // repository. + Dev bool + + // When true, the AssetDir API will be provided. + AssetDir bool + + // When true, only gzip decompress the data on first use. + DecompressOnce bool + + // [Deprecated]: use github.com/tmthrgd/go-bindata/restore. + Restore bool + + // When false, size, mode and modtime are not preserved from files + Metadata bool + // When nonzero, use this as mode for all files. + Mode os.FileMode + // When nonzero, use this as unix timestamp for all files. + ModTime int64 + + // Hash is used to produce a hash of the file. + Hash hash.Hash + // Which of the given name hashing formats to use. + HashFormat HashFormat + // The length of the hash to use, defaults to 16 characters. + HashLength uint + // The encoding to use to encode the name hash. + HashEncoding HashEncoding +} + +// validate ensures the config has sane values. +// Part of which means checking if certain file/directory paths exist. +func (opts *GenerateOptions) validate() error { + if len(opts.Package) == 0 { + return errors.New("go-bindata: missing package name") + } + + if identifier.Identifier(opts.Package) != opts.Package { + return errors.New("go-bindata: package name is not valid identifier") + } + + if opts.Metadata && (opts.Mode != 0 && opts.ModTime != 0) { + return errors.New("go-bindata: if Metadata is true, one of Mode or ModTime must be zero") + } + + if opts.Mode&^os.ModePerm != 0 { + return errors.New("go-bindata: invalid mode specified") + } + + if opts.Hash != nil && (opts.Debug || opts.Dev) { + return errors.New("go-bindata: Hash is not compatible with Debug and Dev") + } + + if opts.Restore && !opts.AssetDir { + return errors.New("go-bindata: Restore cannot be used without AssetDir") + } + + return nil +} diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/corpus-sha256sums b/tools/vendor/github.com/tmthrgd/go-bindata/corpus-sha256sums new file mode 100644 index 00000000..d918b6ab --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/corpus-sha256sums @@ -0,0 +1,68 @@ +fc42e0de8a85c266577354c445ac1c1e442518a5b4c8ee9805412e43122f71fb asset-dir.go +e75b6c1141dc6b286c9fe93d6a94ff417af042e3b619db1a7d8f58451e984e34 compress.go +528410b942177ea42119c5f23e39085cbf9dfce8ba96d509f5345609e59ce309 copy.go +4b6c823a2574f6c5f1cdc3437092b06297c0c6708e6e418c336a704a3bd6d3eb debug.go +1c0b2f7a7f97f82a4d89ce82b20a851772ddc7c8865528867bf6ca1977c7ac45 decompress-once.go +036697010d259574c5a15edb33f1e286af58a40e1ab91200b2a4b7d9fc40f187 default.go +63cb3ba6ffc51486843abe7eb2370c07a0d05146fc34a2379b364d68a31b49ef dev.go +b3d63994930c24602ebbe40437d07bdda9720d1cde48df43befd8a553e90504b hash-copy.go +33f7e152f69bc9e9881d74c82e3e27a2c130d1592b96727d3467615ee700788e hash-dir.go +3b8ff973c806b675eb75c227a0fe5d708dd2b4b8e3174b98391cbf3480877ece hash-enc-b32.go +38395de6c76cbf57eb6fc028b3d317ba2b2a5d9a843fb970df3a6f62b09a1fce hash-enc-b64.go +cbb867c075721ae0b7f85d00227e81f607f6310135379a5f4d98fb298909f56a hash-hashext.go +a9af7ffafc51642ef48af1d7622ff6a88c0c0c1b831cae2509ed769b12b52097 hash-suffix.go +362b96b80fae7b41934a33d8514d71664acb041624c981c61c172d4037502eaf hash-unchanged.go +e0512fb269ace4db9df83c075d9f82fceb63e3551a3c25e2dc551f67dc7c72f4 metadata.go +6cf3e7ac27ab257f89f37b9353ccb10cab8011072e9209cdf1700af82b0b1ac4 old-default.go +bf6f8e4f11b730a5ac3b13040d2ba50785296b6f993f1e0640fdc3c3e53f4d85 package.go +a59cbba67616477980ca99dc678a8e66270494235f84a203372841737535f824 random-#10.go +3ef50db7180413665ea96ebb63b6656168937a106fa9dbf0120cc2c40e166b20 random-#11.go +ac6d298a012ddf45ff33655d37b9e64c6a75ee2756038b0896b3f98a38a29c66 random-#12.go +a65c8e35b650cd922b85804314234415adc0732d9b46a2a95be5616e725a9341 random-#13.go +fad9e724379617bddb4131bcd93e2608f867782acae1439d9419dc356701ab0d random-#14.go +e96ccf210c4b1b8e51942ae8674d9dd3f38f71920bc54dc3590362e6ec8b7faf random-#15.go +d8a1df711301ba25dfb90204a953aa0722e7c821e03b90f84797dfafec418de3 random-#16.go +9256675f720083bf23aa380a0365a35b8719591bfb2f51b7e306e74c7a34d38a random-#17.go +f50b6362f47eeb7cfdadd4ab2f309fd2c99c76031c9e7335ff6ce06ab3c70285 random-#18.go +cc477c7647957306931d72b521a8d6919edd41f2bccf2dd8508381bf9f5105fe random-#19.go +5bd5a4569b63a4a3d2399c943fa5d6d813a0e1e97b36b009503549e34668fb81 random-#1.go +8a8a0fc1a2e417cba60eb025d3ce741897e69906b9133cdc10b7ac61bbb27bf4 random-#20.go +43d34aaad7e9a6c6192126bb941f94305bca8b61c77581662bc7ce5f2dbcbdc9 random-#21.go +b4798deab1655ad7b5e8b5b7cd1f51b444ff4a2560107fd9787535ab1d3d7f80 random-#22.go +6ee3706961f8d6e80ed40e8603678371524259dc30cfeb4d335a354f4ebf7f07 random-#23.go +4d74619c90d2c9385e28ef2321964878a88395f3b6968d67c003e3cb16a822b1 random-#24.go +34742bb4fc1edaea5a4fe679d1c0717b54445c52421e31b06fe73e8744bf0734 random-#25.go +a61cf74fdcd8b6bbbcbf02a3c58a52be164603fc6082facf7158c132bf5824aa random-#26.go +2d78d2006d49fcdd45d7b8f6b4318cd799f194067bf3fc635b33178161c7d63c random-#27.go +d348b22e3fdc3c98da7a7564483b08742cef36f54f3e30500ecf69ccd0b97777 random-#28.go +ebedf92840958164911c44045dfce0d3c7ed18973a3893bcfb6f9239e13b31b8 random-#29.go +c5ca8a9518e5f0926d48a621d595f48d76cb2891a40c0b22f3534a3a91031a4f random-#2.go +a0c77fa7246dd63e5f5dc4663d52cf6abf0e425a86726ebb96973f5b709f97c6 random-#30.go +d8d8ef43d718981ead2725cd15a17ef6fc33956dbca65bc9fd5ef5cf6b8da066 random-#31.go +254b340abe80d09fd7c6ba21bd7050b705462d7b6d640789489a3dfb3c1620de random-#32.go +d5dc83c6b344fd3374438c59fe1d2e42e6f5c15fcc11aeb2248f6a7f6882b596 random-#33.go +073ecfa891403d7a87d170fa6c9394ee2a2fff6d8fe621d68b3da0fdad2438e4 random-#34.go +7a293b4be49a2813643a74d32c91cc0a1cf1010a760d5d6ae40e3a94da70d70d random-#35.go +02fac0bed12bce3cf4ffb00376079f5b816634bc1b59a451643c1e8c0e860837 random-#36.go +201e6fab72a37d75665107b09dee6fb84722d4265fc8400dc855f6d5a449b27d random-#37.go +38d1db1022acb1e92725e6bb8caf8bcfbd5dea0ae85f8bd903c6c16993d51ee5 random-#38.go +a7ba47ad58d95821fede9cf11339b35e19c4173eb35131a459ed431dbb02a573 random-#39.go +42ad847d45c941ca674e660a581867c07f1f568242cac008589cdee8500474e2 random-#3.go +c38ad926d43afa044e784fc41a1f1576a1207713c390c1cc9d5f5c800ad7c056 random-#40.go +e300d3ccfbc656eefebd30e6b341a081163aea6b8e1e2d2bbad8ee9b5b82a1b5 random-#41.go +fc7d235f1f12d9d2d96be00810e66572a63adb86ff7cba17e77bc45018ade66a random-#42.go +56415a962fdd0a3453526620ad8dff8b90b5731f3b5f8247c0d33e35ae343002 random-#43.go +4ca790becea62b89cb687f81819f09f27274a2f64f6ca0ab919b07577d3ea12f random-#44.go +154ba30b7bd61dac5234e5616e2b5097722001234119442bcde4e4aa5db0a43a random-#45.go +86bbb3ad9cbba89cb1262677d490dd3a87ca091892010381fffdd2af8ad74761 random-#46.go +0d6b42a9702911ef1593047197dd19d0c6402a9b4542e53697281109b8eca566 random-#47.go +6384db969ed03ae0704657fbf3ad5b25a169f2166ee8cd643daa9d887af59aae random-#48.go +bf3ba0bc573208fdcc6a48067d4fbfb965da54f6b641752e192197a2db480120 random-#49.go +af77fcf2ac300945c9c726d0f1de13366522f4f6cb44190d036a86c140eb54d1 random-#4.go +f672c3e1c8b3449695167bfad4b68dd7aff36b4d0148a1d82e429c3eb24be99c random-#50.go +7a9900d444ae8ec5e241a58dd2cca600d1dc2d1101d8f23e3dc65fcd2c2e560f random-#5.go +dd19e896cdb36b1e697f5e2e9e8ebf313c3ee2bd92412097f9ec1742635f11cf random-#6.go +f458f51ae48736aaa14fd0720322114c303ebab93d2dc6dddc33a1fb759407f1 random-#7.go +eb785847c9dbdd927b98e71376d00aca87e887a380b67ce8448c39b0caff5d98 random-#8.go +a27d2578f5aefa2086c3a296f1b1af989e126cce226a4a04172a37a7048f39eb random-#9.go +b1278bab5b1741bef1f32893397b8b1c61fa59182d95ca0c236c8995e24d9aa7 tags.go diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/corpus_test_travis.sh b/tools/vendor/github.com/tmthrgd/go-bindata/corpus_test_travis.sh new file mode 100755 index 00000000..e86f07b3 --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/corpus_test_travis.sh @@ -0,0 +1,7 @@ +#!/bin/bash +set -ev + +if [ "${TRAVIS_PULL_REQUEST}" = "false" ]; then + go test -v -race -run TestCorpus -randtests 50 -corpus .travis-corpus -gencorpus . + cd .travis-corpus && sha256sum -c --quiet --strict ../corpus-sha256sums +fi \ No newline at end of file diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/debug.go b/tools/vendor/github.com/tmthrgd/go-bindata/debug.go new file mode 100644 index 00000000..0b430905 --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/debug.go @@ -0,0 +1,84 @@ +// Copyright 2017 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a Modified +// BSD License that can be found in the LICENSE file. + +package bindata + +import "text/template" + +func init() { + template.Must(template.Must(template.Must(baseTemplate.New("debug").Funcs(template.FuncMap{ + "format": formatTemplate, + }).Parse(`import ( + "io/ioutil" + "os" + "path/filepath" +{{- if $.AssetDir}} + "strings" +{{- end}} +{{- if $.Restore}} + + "github.com/tmthrgd/go-bindata/restore" +{{- end}} +) + +// AssetAndInfo loads and returns the asset and asset info for the +// given name. It returns an error if the asset could not be found +// or could not be loaded. +func AssetAndInfo(name string) ([]byte, os.FileInfo, error) { + path, ok := _bindata[filepath.ToSlash(name)] + if !ok { + return nil, nil, &os.PathError{Op: "open", Path: name, Err: os.ErrNotExist} + } + +{{- if $.Dev}} + + path = filepath.Join(rootDir, path) +{{- end}} + + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, nil, err + } + + fi, err := os.Stat(path) + if err != nil { + return nil, nil, err + } + + return data, fi, nil +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + path, ok := _bindata[filepath.ToSlash(name)] + if !ok { + return nil, &os.PathError{Op: "open", Path: name, Err: os.ErrNotExist} + } + +{{- if $.Dev}} + + path = filepath.Join(rootDir, path) +{{- end}} + return os.Stat(path) +} + +// _bindata is a table, mapping each file to its path. +{{if $.Dev -}} + {{format "bindata-dev" $}} +{{- else -}} + {{format "bindata-debug" $}} +{{- end}}`)).New("bindata-debug").Parse(` +var _bindata = map[string]string{ +{{range .Assets -}} + {{printf "%q" .Name}}: {{printf "%q" .AbsolutePath}}, +{{end -}} +}`)).New("bindata-dev").Parse(` +var _bindata = map[string]string{ +{{range .Assets -}} + {{printf "%q" .Name}}: {{printf "%q" .Name}}, +{{end -}} +}`)) +} diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/doc.go b/tools/vendor/github.com/tmthrgd/go-bindata/doc.go new file mode 100644 index 00000000..894415fc --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/doc.go @@ -0,0 +1,128 @@ +// Copyright 2017 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a Modified +// BSD License that can be found in the LICENSE file. + +/* +Package bindata converts any file into manageable Go source code. Useful for +embedding binary data into a go program. The file data is optionally gzip +compressed before being converted to a raw byte slice. + +The following paragraphs cover some of the customization options +which can be specified in the Config struct, which must be passed into +the Translate() call. + + +Debug vs Release builds + +When used with the `Debug` option, the generated code does not actually include +the asset data. Instead, it generates function stubs which load the data from +the original file on disk. The asset API remains identical between debug and +release builds, so your code will not have to change. + +This is useful during development when you expect the assets to change often. +The host application using these assets uses the same API in both cases and +will not have to care where the actual data comes from. + +An example is a Go webserver with some embedded, static web content like +HTML, JS and CSS files. While developing it, you do not want to rebuild the +whole server and restart it every time you make a change to a bit of +javascript. You just want to build and launch the server once. Then just press +refresh in the browser to see those changes. Embedding the assets with the +`debug` flag allows you to do just that. When you are finished developing and +ready for deployment, just re-invoke `go-bindata` without the `-debug` flag. +It will now embed the latest version of the assets. + + +Lower memory footprint + +The `MemCopy` option will alter the way the output file is generated. +If false, it will employ a hack that allows us to read the file data directly +from the compiled program's `.rodata` section. This ensures that when we call +call our generated function, we omit unnecessary memcopies. + +The downside of this, is that it requires dependencies on the `reflect` and +`unsafe` packages. These may be restricted on platforms like AppEngine and +thus prevent you from using this mode. + +Another disadvantage is that the byte slice we create, is strictly read-only. +For most use-cases this is not a problem, but if you ever try to alter the +returned byte slice, a runtime panic is thrown. Use this mode only on target +platforms where memory constraints are an issue. + +The default behaviour is to use the old code generation method. This +prevents the two previously mentioned issues, but will employ at least one +extra memcopy and thus increase memory requirements. + +For instance, consider the following two examples: + +This would be the default mode, using an extra memcopy but gives a safe +implementation without dependencies on `reflect` and `unsafe`: + + func myfile() []byte { + return []byte{0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a} + } + +Here is the same functionality, but uses the `.rodata` hack. +The byte slice returned from this example can not be written to without +generating a runtime error. + + var _myfile = "\x89\x50\x4e\x47\x0d\x0a\x1a" + + func myfile() []byte { + var empty [0]byte + sx := (*reflect.StringHeader)(unsafe.Pointer(&_myfile)) + b := empty[:] + bx := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bx.Data = sx.Data + bx.Len = len(_myfile) + bx.Cap = bx.Len + return b + } + + +Optional compression + +The Compress option indicates that the supplied assets are GZIP compressed before +being turned into Go code. The data should still be accessed through a function +call, so nothing changes in the API. + +This feature is useful if you do not care for compression, or the supplied +resource is already compressed. Doing it again would not add any value and may +even increase the size of the data. + +The default behaviour of the program is to use compression. + + +Path prefix stripping + +The keys used in the `_bindata` map are the same as the input file name +passed to `go-bindata`. This includes the path. In most cases, this is not +desirable, as it puts potentially sensitive information in your code base. +For this purpose, the tool supplies another command line flag `-prefix`. +This accepts a portion of a path name, which should be stripped off from +the map keys and function names. + +For example, running without the `-prefix` flag, we get: + + $ go-bindata /path/to/templates/ + + _bindata["/path/to/templates/foo.html"] = path_to_templates_foo_html + +Running with the `-prefix` flag, we get: + + $ go-bindata -prefix "/path/to/" /path/to/templates/ + + _bindata["templates/foo.html"] = templates_foo_html + + +Build tags + +With the optional Tags field, you can specify any go build tags that +must be fulfilled for the output file to be included in a build. This +is useful when including binary data in multiple formats, where the desired +format is specified at build time with the appropriate tags. + +The tags are appended to a `// +build` line in the beginning of the output file +and must follow the build tags syntax specified by the go tool. +*/ +package bindata diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/files.go b/tools/vendor/github.com/tmthrgd/go-bindata/files.go new file mode 100644 index 00000000..91ca9c64 --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/files.go @@ -0,0 +1,130 @@ +// Copyright 2017 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a Modified +// BSD License that can be found in the LICENSE file. + +package bindata + +import ( + "io" + "os" + "path/filepath" + "regexp" + "strings" +) + +// File represents a single asset file. +type File interface { + // Name returns the name by which asset is referenced. + Name() string + // Path returns the relative path to the file. + Path() string + // AbsolutePath returns the absolute path to the file. + AbsolutePath() string + + // Open returns an io.ReadCloser for reading the file. + Open() (io.ReadCloser, error) + // Stat returns an os.FileInfo interface representing the file. + Stat() (os.FileInfo, error) +} + +// Files represents a collection of asset files. +type Files []File + +type osFile struct { + name string + path string +} + +func (f *osFile) Name() string { + return f.name +} + +func (f *osFile) Path() string { + return f.path +} + +func (f *osFile) AbsolutePath() string { + path, err := filepath.Abs(f.path) + if err != nil { + return f.path + } + + return path +} + +func (f *osFile) Open() (io.ReadCloser, error) { + return os.Open(f.path) +} + +func (f *osFile) Stat() (os.FileInfo, error) { + return os.Stat(f.path) +} + +// FindFilesOptions defines a set of options to use +// when searching for files. +type FindFilesOptions struct { + // Prefix defines a path prefix which should be stripped from all + // file names when generating the keys in the table of contents. + // For example, running without the `-prefix` flag, we get: + // + // $ go-bindata /path/to/templates + // go_bindata["/path/to/templates/foo.html"] = _path_to_templates_foo_html + // + // Running with the `-prefix` flag, we get: + // + // $ go-bindata -prefix "/path/to/" /path/to/templates/foo.html + // go_bindata["templates/foo.html"] = templates_foo_html + Prefix string + + // Recursive defines whether subdirectories of Path + // should be recursively included in the conversion. + Recursive bool + + // Ignores any filenames matching the regex pattern specified, e.g. + // path/to/file.ext will ignore only that file, or \\.gitignore + // will match any .gitignore file. + // + // This parameter can be provided multiple times. + Ignore []*regexp.Regexp +} + +// FindFiles adds all files inside a directory to the +// generated output. +func FindFiles(path string, opts *FindFilesOptions) (files Files, err error) { + if opts == nil { + opts = new(FindFilesOptions) + } + + if err = filepath.Walk(path, func(assetPath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() { + if !opts.Recursive && assetPath != path { + return filepath.SkipDir + } + + return nil + } + + for _, re := range opts.Ignore { + if re.MatchString(assetPath) { + return nil + } + } + + name := strings.TrimPrefix(filepath.ToSlash( + strings.TrimPrefix(assetPath, opts.Prefix)), "/") + if name == "" { + panic("should be impossible") + } + + files = append(files, &osFile{name, assetPath}) + return nil + }); err != nil { + return nil, err + } + + return +} diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/format.go b/tools/vendor/github.com/tmthrgd/go-bindata/format.go new file mode 100644 index 00000000..d6bb30a1 --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/format.go @@ -0,0 +1,47 @@ +// Copyright 2017 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a Modified +// BSD License that can be found in the LICENSE file. + +package bindata + +import ( + "bytes" + "go/parser" + "go/printer" + "go/token" +) + +var printerConfig = printer.Config{ + Mode: printer.UseSpaces | printer.TabIndent, + Tabwidth: 8, +} + +func formatTemplate(name string, data interface{}) (string, error) { + buf := bufPool.Get().(*bytes.Buffer) + defer func() { + buf.Reset() + bufPool.Put(buf) + }() + + buf.WriteString("package main;") + + if err := baseTemplate.ExecuteTemplate(buf, name, data); err != nil { + return "", err + } + + fset := token.NewFileSet() + + f, err := parser.ParseFile(fset, "", buf, parser.ParseComments) + if err != nil { + return "", err + } + + buf.Reset() + + if err = printerConfig.Fprint(buf, fset, f); err != nil { + return "", err + } + + out := string(bytes.TrimSpace(buf.Bytes()[len("package main\n"):])) + return out, nil +} diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/generate.go b/tools/vendor/github.com/tmthrgd/go-bindata/generate.go new file mode 100644 index 00000000..21785d26 --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/generate.go @@ -0,0 +1,76 @@ +// Copyright 2017 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a Modified +// BSD License that can be found in the LICENSE file. + +package bindata + +import ( + "io" + "text/template" +) + +// binAsset holds information about a single asset to be processed. +type binAsset struct { + File + + opts *GenerateOptions + Hash []byte // Generated hash of file. + mangledName string +} + +// Generate writes the generated Go code to w. +func (f Files) Generate(w io.Writer, opts *GenerateOptions) error { + if opts == nil { + opts = &GenerateOptions{Package: "main"} + } + + err := opts.validate() + if err != nil { + return err + } + + assets := make([]binAsset, 0, len(f)) + for i, file := range f { + asset := binAsset{ + File: file, + + opts: opts, + } + + if opts.Hash != nil { + if i != 0 { + opts.Hash.Reset() + } + + if err = asset.copy(opts.Hash); err != nil { + return err + } + + asset.Hash = opts.Hash.Sum(nil) + } + + assets = append(assets, asset) + } + + return baseTemplate.Execute(w, struct { + *GenerateOptions + Assets []binAsset + }{opts, assets}) +} + +var baseTemplate = template.Must(template.New("base").Parse(` +{{- template "header" .}} + +{{if or $.Debug $.Dev -}} +{{- template "debug" . -}} +{{- else -}} +{{- template "release" . -}} +{{- end}} + +{{template "common" . -}} + +{{- if $.AssetDir}} + +{{template "tree" . -}} +{{- end}} +`)) diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/go-bindata/appendRegexValue.go b/tools/vendor/github.com/tmthrgd/go-bindata/go-bindata/appendRegexValue.go new file mode 100644 index 00000000..fbf4ac05 --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/go-bindata/appendRegexValue.go @@ -0,0 +1,44 @@ +// Copyright 2017 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a Modified +// BSD License that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "regexp" +) + +type appendRegexValue []*regexp.Regexp + +func (ar *appendRegexValue) String() string { + if ar == nil { + return "" + } + + var buf bytes.Buffer + + for i, r := range *ar { + if i != 0 { + buf.WriteString(", ") + } + + buf.WriteString(r.String()) + } + + return buf.String() +} + +func (ar *appendRegexValue) Set(value string) error { + r, err := regexp.Compile(value) + if err != nil { + return err + } + + if *ar == nil { + *ar = make([]*regexp.Regexp, 0, 1) + } + + *ar = append(*ar, r) + return nil +} diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/go-bindata/main.go b/tools/vendor/github.com/tmthrgd/go-bindata/go-bindata/main.go new file mode 100644 index 00000000..2f8af1a3 --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/go-bindata/main.go @@ -0,0 +1,178 @@ +// Copyright 2017 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a Modified +// BSD License that can be found in the LICENSE file. + +package main + +import ( + "errors" + "flag" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/tmthrgd/go-bindata" + "github.com/tmthrgd/go-bindata/internal/identifier" +) + +func must(err error) { + if err == nil { + return + } + + fmt.Fprintf(os.Stderr, "go-bindata: %v\n", err) + os.Exit(1) +} + +func main() { + genOpts, findOpts, output := parseArgs() + + var all bindata.Files + + for i := 0; i < flag.NArg(); i++ { + var path string + path, findOpts.Recursive = parseInput(flag.Arg(i)) + + files, err := bindata.FindFiles(path, findOpts) + must(err) + + all = append(all, files...) + } + + f, err := os.OpenFile(output, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) + must(err) + + defer f.Close() + + must(all.Generate(f, genOpts)) +} + +// parseArgs create s a new, filled configuration instance +// by reading and parsing command line options. +// +// This function exits the program with an error, if +// any of the command line options are incorrect. +func parseArgs() (genOpts *bindata.GenerateOptions, findOpts *bindata.FindFilesOptions, output string) { + flag.Usage = func() { + fmt.Printf("Usage: %s [options] \n\n", os.Args[0]) + flag.PrintDefaults() + } + + var version bool + flag.BoolVar(&version, "version", false, "Displays version information.") + + flag.StringVar(&output, "o", "./bindata.go", "Optional name of the output file to be generated.") + + genOpts = &bindata.GenerateOptions{ + Package: "main", + MemCopy: true, + Compress: true, + Metadata: true, + Restore: true, + AssetDir: true, + DecompressOnce: true, + } + findOpts = new(bindata.FindFilesOptions) + + var noMemCopy, noCompress, noMetadata bool + var mode uint + flag.BoolVar(&genOpts.Debug, "debug", genOpts.Debug, "Do not embed the assets, but provide the embedding API. Contents will still be loaded from disk.") + flag.BoolVar(&genOpts.Dev, "dev", genOpts.Dev, "Similar to debug, but does not emit absolute paths. Expects a rootDir variable to already exist in the generated code's package.") + flag.StringVar(&genOpts.Tags, "tags", genOpts.Tags, "Optional set of build tags to include.") + flag.StringVar(&findOpts.Prefix, "prefix", "", "Optional path prefix to strip off asset names.") + flag.StringVar(&genOpts.Package, "pkg", genOpts.Package, "Package name to use in the generated code.") + flag.BoolVar(&noMemCopy, "nomemcopy", !genOpts.MemCopy, "Use a .rodata hack to get rid of unnecessary memcopies. Refer to the documentation to see what implications this carries.") + flag.BoolVar(&noCompress, "nocompress", !genOpts.Compress, "Assets will *not* be GZIP compressed when this flag is specified.") + flag.BoolVar(&noMetadata, "nometadata", !genOpts.Metadata, "Assets will not preserve size, mode, and modtime info.") + flag.UintVar(&mode, "mode", uint(genOpts.Mode), "Optional file mode override for all files.") + flag.Int64Var(&genOpts.ModTime, "modtime", genOpts.ModTime, "Optional modification unix timestamp override for all files.") + flag.Var((*appendRegexValue)(&findOpts.Ignore), "ignore", "Regex pattern to ignore") + + flag.Parse() + + if version { + fmt.Fprintf(os.Stderr, "go-bindata (Go runtime %s).\n", runtime.Version()) + io.WriteString(os.Stderr, "Copyright (c) 2010-2013, Jim Teeuwen.\n") + io.WriteString(os.Stderr, "Copyright (c) 2017, Tom Thorogood.\n") + os.Exit(0) + } + + // Make sure we have input paths. + if flag.NArg() == 0 { + io.WriteString(os.Stderr, "Missing \n\n") + flag.Usage() + os.Exit(1) + } + + if output == "" { + var err error + output, err = filepath.Abs("bindata.go") + must(err) + } + + genOpts.MemCopy = !noMemCopy + genOpts.Compress = !noCompress + genOpts.Metadata = !noMetadata && (genOpts.Mode == 0 || genOpts.ModTime == 0) + + genOpts.Mode = os.FileMode(mode) + + var pkgSet, outputSet bool + flag.Visit(func(f *flag.Flag) { + switch f.Name { + case "pkg": + pkgSet = true + case "o": + outputSet = true + } + }) + + // Change pkg to containing directory of output. If output flag is set and package flag is not. + if outputSet && !pkgSet { + pkg := identifier.Identifier(filepath.Base(filepath.Dir(output))) + if pkg != "" { + genOpts.Package = pkg + } + } + + if !genOpts.MemCopy && genOpts.Compress { + io.WriteString(os.Stderr, "The use of -nomemcopy without -nocompress is deprecated.\n") + } + + must(validateOutput(output)) + return +} + +func validateOutput(output string) error { + stat, err := os.Lstat(output) + if err == nil { + if stat.IsDir() { + return errors.New("output path is a directory") + } + + return nil + } else if !os.IsNotExist(err) { + return err + } + + // File does not exist. This is fine, just make + // sure the directory it is to be in exists. + if dir, _ := filepath.Split(output); dir != "" { + return os.MkdirAll(dir, 0744) + } + + return nil +} + +// parseInput determines whether the given path has a recursive indicator and +// returns a new path with the recursive indicator chopped off if it does. +// +// ex: +// /path/to/foo/... -> (/path/to/foo, true) +// /path/to/bar -> (/path/to/bar, false) +func parseInput(input string) (path string, recursive bool) { + return filepath.Clean(strings.TrimSuffix(input, "/...")), + strings.HasSuffix(input, "/...") +} diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/header.go b/tools/vendor/github.com/tmthrgd/go-bindata/header.go new file mode 100644 index 00000000..3e9e42d4 --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/header.go @@ -0,0 +1,73 @@ +// Copyright 2017 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a Modified +// BSD License that can be found in the LICENSE file. + +package bindata + +import ( + "path/filepath" + "strings" + "text/template" + "unicode" +) + +func init() { + template.Must(baseTemplate.New("header").Funcs(template.FuncMap{ + "trimright": func(s string) string { + return strings.TrimRightFunc(s, unicode.IsSpace) + }, + "toslash": filepath.ToSlash, + }).Parse(`{{- /* This makes e.g. Github ignore diffs in generated files. */ -}} +// Code generated by go-bindata. DO NOT EDIT. +{{if $.Dev -}} + // debug: dev +{{else if $.Debug -}} + // debug: true +{{end -}} +{{- if $.MemCopy -}} + // memcopy: true +{{end -}} +{{- if $.Compress -}} + // compress: true +{{end -}} +{{- if and $.Compress $.DecompressOnce -}} + // decompress: once +{{end -}} +{{- if $.Metadata -}} + // metadata: true +{{end -}} +{{- if $.Mode -}} + // mode: {{printf "%04o" $.Mode}} +{{end -}} +{{- if $.ModTime -}} + // modtime: {{$.ModTime}} +{{end -}} +{{- if $.AssetDir -}} + // asset-dir: true +{{end -}} +{{- if $.Restore -}} + // restore: true +{{end -}} +{{- if $.Hash -}} +{{- if $.HashFormat -}} + // hash-format: {{$.HashFormat}} +{{else -}} + // hash-format: unchanged +{{end -}} +{{- if and $.HashFormat $.HashLength (ne $.HashLength 16) -}} + // hash-length: {{$.HashLength}} +{{end -}} +{{- if and $.HashFormat $.HashEncoding -}} + // hash-encoding: {{$.HashEncoding}} +{{end -}} +{{- end -}} +// sources: +{{range .Assets -}} + // {{toslash (trimright .Path)}} +{{end}} +{{if $.Tags -}} // +build {{$.Tags}} + +{{end -}} + +package {{$.Package}}`)) +} diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/internal/identifier/identifier.go b/tools/vendor/github.com/tmthrgd/go-bindata/internal/identifier/identifier.go new file mode 100644 index 00000000..ff7a7be5 --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/internal/identifier/identifier.go @@ -0,0 +1,31 @@ +// Copyright 2017 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a Modified +// BSD License that can be found in the LICENSE file. + +package identifier + +import ( + "strings" + "unicode" + "unicode/utf8" +) + +// Identifier removes all characters from a string that are not valid in +// an identifier according to the Go Programming Language Specification. +// +// The logic in the switch statement was taken from go/source package: +// https://github.com/golang/go/blob/a1a688fa0012f7ce3a37e9ac0070461fe8e3f28e/src/go/scanner/scanner.go#L257-#L271 +func Identifier(val string) string { + return strings.TrimLeftFunc(strings.Map(func(ch rune) rune { + switch { + case 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || + ch >= utf8.RuneSelf && unicode.IsLetter(ch): + return ch + case '0' <= ch && ch <= '9' || + ch >= utf8.RuneSelf && unicode.IsDigit(ch): + return ch + default: + return -1 + } + }, val), unicode.IsDigit) +} diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/name.go b/tools/vendor/github.com/tmthrgd/go-bindata/name.go new file mode 100644 index 00000000..cd41369a --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/name.go @@ -0,0 +1,61 @@ +// Copyright 2017 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a Modified +// BSD License that can be found in the LICENSE file. + +package bindata + +import ( + "encoding/base64" + "encoding/hex" + "path" + "strings" +) + +// Name applies name hashing if required. It returns the original +// name for NoHash and NameUnchanged and returns the mangledName +// otherwise. +func (asset *binAsset) Name() string { + if asset.Hash == nil || asset.opts.HashFormat == NameUnchanged { + return asset.File.Name() + } else if asset.mangledName != "" { + return asset.mangledName + } + + var enc string + switch asset.opts.HashEncoding { + case HexHash: + enc = hex.EncodeToString(asset.Hash) + case Base32Hash: + enc = base32Enc.EncodeToString(asset.Hash) + case Base64Hash: + enc = base64.RawURLEncoding.EncodeToString(asset.Hash) + default: + panic("unreachable") + } + + l := asset.opts.HashLength + if l == 0 { + l = 16 + } + + if l < uint(len(enc)) { + enc = enc[:l] + } + + dir, file := path.Split(asset.File.Name()) + ext := path.Ext(file) + + switch asset.opts.HashFormat { + case DirHash: + asset.mangledName = path.Join(dir, enc, file) + case NameHashSuffix: + file = strings.TrimSuffix(file, ext) + asset.mangledName = path.Join(dir, file+"-"+enc+ext) + case HashWithExt: + asset.mangledName = path.Join(dir, enc+ext) + default: + panic("unreachable") + } + + return asset.mangledName +} diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/release.go b/tools/vendor/github.com/tmthrgd/go-bindata/release.go new file mode 100644 index 00000000..c57dd9fe --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/release.go @@ -0,0 +1,328 @@ +// Copyright 2017 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a Modified +// BSD License that can be found in the LICENSE file. + +package bindata + +import ( + "bytes" + "compress/flate" + "io" + "path" + "strings" + "sync" + "text/template" +) + +var flatePool sync.Pool + +func writeWrappedString(write func(io.Writer) error, indent string, wrapAt int) (string, error) { + buf := bufPool.Get().(*bytes.Buffer) + defer func() { + buf.Reset() + bufPool.Put(buf) + }() + + buf.WriteString("(\"\" +\n") + buf.WriteString(indent) + buf.WriteByte('"') + + if err := write(&stringWriter{ + Writer: buf, + Indent: indent, + WrapAt: wrapAt, + }); err != nil { + return "", err + } + + buf.WriteString("\")") + + s := buf.String() + + if strings.IndexByte(s[1:], '(') == -1 { + s = s[1 : len(s)-1] + } + + return s, nil +} + +func init() { + template.Must(template.Must(baseTemplate.New("release").Funcs(template.FuncMap{ + "base": path.Base, + "wrap": func(data []byte, indent string, wrapAt int) (string, error) { + return writeWrappedString(func(w io.Writer) error { + _, err := w.Write(data) + return err + }, indent, wrapAt) + }, + "read": func(asset binAsset, indent string, wrapAt int) (string, error) { + return writeWrappedString(asset.copy, indent, wrapAt) + }, + "flate": func(asset binAsset, indent string, wrapAt int) (out string, err error) { + return writeWrappedString(func(w io.Writer) error { + fw, _ := flatePool.Get().(*flate.Writer) + if fw != nil { + fw.Reset(w) + } else if fw, err = flate.NewWriter(w, flate.BestCompression); err != nil { + return err + } + defer flatePool.Put(fw) + + if err := asset.copy(fw); err != nil { + return err + } + + return fw.Close() + }, indent, wrapAt) + }, + "format": formatTemplate, + }).Parse(` +{{- $unsafeRead := and (not $.Compress) (not $.MemCopy) -}} +import ( +{{- if $.Compress}} + "bytes" + "compress/flate" + "io" +{{- end}} + "os" + "path/filepath" +{{- if $unsafeRead}} + "reflect" +{{- end}} +{{- if or $.Compress $.AssetDir}} + "strings" +{{- end}} +{{- if and $.Compress $.DecompressOnce}} + "sync" +{{- end}} + "time" +{{- if $unsafeRead}} + "unsafe" +{{- end}} +{{- if $.Restore}} + + "github.com/tmthrgd/go-bindata/restore" +{{- end}} +) + +{{if $unsafeRead -}} +func bindataRead(data string) []byte { + var empty [0]byte + sx := (*reflect.StringHeader)(unsafe.Pointer(&data)) + b := empty[:] + bx := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bx.Data = sx.Data + bx.Len = len(data) + bx.Cap = bx.Len + return b +} + +{{end -}} + +type asset struct { + name string +{{- if and $.Hash $.HashFormat}} + orig string +{{- end}} + data string +{{- if $.Compress}} + size int64 +{{- end -}} +{{- if and $.Metadata (not $.Mode)}} + mode os.FileMode +{{- end -}} +{{- if and $.Metadata (not $.ModTime)}} + time time.Time +{{- end -}} +{{- if $.Hash}} + hash string +{{- end}} +{{- if and $.Compress $.DecompressOnce}} + + once sync.Once + bytes []byte + err error +{{- end}} +} + +func (a *asset) Name() string { + return a.name +} + +func (a *asset) Size() int64 { +{{- if $.Compress}} + return a.size +{{- else}} + return int64(len(a.data)) +{{- end}} +} + +func (a *asset) Mode() os.FileMode { +{{- if $.Mode}} + return {{printf "%04o" $.Mode}} +{{- else if $.Metadata}} + return a.mode +{{- else}} + return 0 +{{- end}} +} + +func (a *asset) ModTime() time.Time { +{{- if $.ModTime}} + return time.Unix({{$.ModTime}}, 0) +{{- else if $.Metadata}} + return a.time +{{- else}} + return time.Time{} +{{- end}} +} + +func (*asset) IsDir() bool { + return false +} + +func (*asset) Sys() interface{} { + return nil +} + +{{- if $.Hash}} + +func (a *asset) OriginalName() string { +{{- if $.HashFormat}} + return a.orig +{{- else}} + return a.name +{{- end}} +} + +func (a *asset) FileHash() []byte { +{{- if $unsafeRead}} + return bindataRead(a.hash) +{{- else}} + return []byte(a.hash) +{{- end}} +} + +type FileInfo interface { + os.FileInfo + + OriginalName() string + FileHash() []byte +} +{{- end}} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]*asset{ +{{range $.Assets}} {{printf "%q" .Name}}: &asset{ + name: {{printf "%q" (base .Name)}}, + {{- if and $.Hash $.HashFormat}} + orig: {{printf "%q" .File.Name}}, + {{- end}} + data: {{if $.Compress -}} + {{flate . "\t\t\t" 24}} + {{- else -}} + {{read . "\t\t\t" 24}} + {{- end}}, + + {{- if or $.Metadata $.Compress -}} + {{- $info := .Stat -}} + + {{- if $.Compress}} + size: {{$info.Size}}, + {{- end -}} + + {{- if and $.Metadata (not $.Mode)}} + mode: {{printf "%04o" $info.Mode}}, + {{- end -}} + + {{- if and $.Metadata (not $.ModTime)}} + {{$mod := $info.ModTime -}} + time: time.Unix({{$mod.Unix}}, {{$mod.Nanosecond}}), + {{- end -}} + {{- end -}} + + {{- if $.Hash}} + hash: {{wrap .Hash "\t\t\t" 24}}, + {{- end}} + }, +{{end -}} +} + +// AssetAndInfo loads and returns the asset and asset info for the +// given name. It returns an error if the asset could not be found +// or could not be loaded. +func AssetAndInfo(name string) ([]byte, os.FileInfo, error) { + a, ok := _bindata[filepath.ToSlash(name)] + if !ok { + return nil, nil, &os.PathError{Op: "open", Path: name, Err: os.ErrNotExist} + } +{{if and $.Compress $.DecompressOnce}} + a.once.Do(func() { + fr := flate.NewReader(strings.NewReader(a.data)) + + var buf bytes.Buffer + if _, a.err = io.Copy(&buf, fr); a.err != nil { + return + } + + if a.err = fr.Close(); a.err == nil { + a.bytes = buf.Bytes() + } + }) + if a.err != nil { + return nil, nil, &os.PathError{Op: "read", Path: name, Err: a.err} + } + + return a.bytes, a, nil +{{- else if $.Compress}} + fr := flate.NewReader(strings.NewReader(a.data)) + + var buf bytes.Buffer + if _, err := io.Copy(&buf, fr); err != nil { + return nil, nil, &os.PathError{Op: "read", Path: name, Err: err} + } + + if err := fr.Close(); err != nil { + return nil, nil, &os.PathError{Op: "read", Path: name, Err: err} + } + + return buf.Bytes(), a, nil +{{- else if $unsafeRead}} + return bindataRead(a.data), a, nil +{{- else}} + return []byte(a.data), a, nil +{{- end}} +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + a, ok := _bindata[filepath.ToSlash(name)] + if !ok { + return nil, &os.PathError{Op: "open", Path: name, Err: os.ErrNotExist} + } + + return a, nil +} + +{{- if and $.Hash $.HashFormat}} + +{{format "hashnames" $}} + +// AssetName returns the hashed name associated with an asset of a +// given name. +func AssetName(name string) (string, error) { + if name, ok := _hashNames[filepath.ToSlash(name)]; ok { + return name, nil + } + + return "", &os.PathError{Op: "open", Path: name, Err: os.ErrNotExist} +} +{{- end}}`)).New("hashnames").Parse(` +var _hashNames = map[string]string{ +{{range .Assets -}} + {{printf "%q" .File.Name}}: {{printf "%q" .Name}}, +{{end -}} +}`)) +} diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/stringwriter.go b/tools/vendor/github.com/tmthrgd/go-bindata/stringwriter.go new file mode 100644 index 00000000..132fe876 --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/stringwriter.go @@ -0,0 +1,71 @@ +// Copyright 2017 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a Modified +// BSD License that can be found in the LICENSE file. + +package bindata + +import "io" + +var ( + stringWriterLinePrefix = []byte(`"`) + stringWriterLineSuffix = []byte("\" +\n") + stringWriterParensLineSuffix = []byte("\") + (\"\" +\n") +) + +type stringWriter struct { + io.Writer + Indent string + WrapAt int + c, l int +} + +func (w *stringWriter) Write(p []byte) (n int, err error) { + buf := [4]byte{'\\', 'x', 0, 0} + + for _, b := range p { + const lowerHex = "0123456789abcdef" + buf[2] = lowerHex[b/16] + buf[3] = lowerHex[b%16] + + if _, err = w.Writer.Write(buf[:]); err != nil { + return + } + + n++ + w.c++ + + if w.WrapAt == 0 || w.c%w.WrapAt != 0 { + continue + } + + w.l++ + + suffix := stringWriterLineSuffix + if w.l%500 == 0 { + // As per https://golang.org/issue/18078, the compiler has trouble + // compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN, + // for large N. We insert redundant, explicit parentheses to work around + // that, lowering the N at any given step: (s0 + s1 + ... + s499) + (s500 + + // ... + s1999) + etc + (etc + ... + sN). + // + // This fix was taken from the fix applied to x/text in + // https://github.com/golang/text/commit/5c6cf4f9a2. + + suffix = stringWriterParensLineSuffix + } + + if _, err = w.Writer.Write(suffix); err != nil { + return + } + + if _, err = io.WriteString(w.Writer, w.Indent); err != nil { + return + } + + if _, err = w.Writer.Write(stringWriterLinePrefix); err != nil { + return + } + } + + return +} diff --git a/tools/vendor/github.com/tmthrgd/go-bindata/tree.go b/tools/vendor/github.com/tmthrgd/go-bindata/tree.go new file mode 100644 index 00000000..daf19ce2 --- /dev/null +++ b/tools/vendor/github.com/tmthrgd/go-bindata/tree.go @@ -0,0 +1,98 @@ +// Copyright 2017 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a Modified +// BSD License that can be found in the LICENSE file. + +package bindata + +import ( + "strings" + "text/template" +) + +type assetTree struct { + Asset binAsset + Children map[string]*assetTree + Depth int +} + +func newAssetTree() *assetTree { + return &assetTree{ + Children: make(map[string]*assetTree), + } +} + +func (node *assetTree) child(name string) *assetTree { + rv, ok := node.Children[name] + if !ok { + rv = newAssetTree() + rv.Depth = node.Depth + 1 + node.Children[name] = rv + } + + return rv +} + +func init() { + template.Must(template.Must(baseTemplate.New("tree").Funcs(template.FuncMap{ + "tree": func(toc []binAsset) *assetTree { + tree := newAssetTree() + for _, asset := range toc { + node := tree + for _, name := range strings.Split(asset.Name(), "/") { + node = node.child(name) + } + + node.Asset = asset + } + + return tree + }, + "format": formatTemplate, + }).Parse(`// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + + if name != "" { + var ok bool + for _, p := range strings.Split(filepath.ToSlash(name), "/") { + if node, ok = node[p]; !ok { + return nil, &os.PathError{Op: "open", Path: name, Err: os.ErrNotExist} + } + } + } + + if len(node) == 0 { + return nil, &os.PathError{Op: "open", Path: name, Err: os.ErrNotExist} + } + + rv := make([]string, 0, len(node)) + for name := range node { + rv = append(rv, name) + } + + return rv, nil +} + +type bintree map[string]bintree + +{{format "bintree" (tree .Assets)}}`)).New("bintree").Parse(` +{{- if not .Depth -}} +var _bintree = {{end -}} +bintree{ +{{range $k, $v := .Children -}} + {{printf "%q" $k}}: {{template "bintree" $v}}, +{{end -}} +}`)) +} diff --git a/tools/vendor/golang.org/x/lint/CONTRIBUTING.md b/tools/vendor/golang.org/x/lint/CONTRIBUTING.md new file mode 100644 index 00000000..971da126 --- /dev/null +++ b/tools/vendor/golang.org/x/lint/CONTRIBUTING.md @@ -0,0 +1,15 @@ +# Contributing to Golint + +## Before filing an issue: + +### Are you having trouble building golint? + +Check you have the latest version of its dependencies. Run +``` +go get -u github.com/golang/lint +``` +If you still have problems, consider searching for existing issues before filing a new issue. + +## Before sending a pull request: + +Have you understood the purpose of golint? Make sure to carefully read `README`. diff --git a/tools/vendor/golang.org/x/lint/LICENSE b/tools/vendor/golang.org/x/lint/LICENSE new file mode 100644 index 00000000..65d761bc --- /dev/null +++ b/tools/vendor/golang.org/x/lint/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/tools/vendor/golang.org/x/lint/README.md b/tools/vendor/golang.org/x/lint/README.md new file mode 100644 index 00000000..d622617c --- /dev/null +++ b/tools/vendor/golang.org/x/lint/README.md @@ -0,0 +1,85 @@ +Golint is a linter for Go source code. + +[![Build Status](https://travis-ci.org/golang/lint.svg?branch=master)](https://travis-ci.org/golang/lint) + +## Installation + +Golint requires Go 1.6 or later. + + go get -u golang.org/x/lint/golint + +## Usage + +Invoke `golint` with one or more filenames, directories, or packages named +by its import path. Golint uses the same +[import path syntax](https://golang.org/cmd/go/#hdr-Import_path_syntax) as +the `go` command and therefore +also supports relative import paths like `./...`. Additionally the `...` +wildcard can be used as suffix on relative and absolute file paths to recurse +into them. + +The output of this tool is a list of suggestions in Vim quickfix format, +which is accepted by lots of different editors. + +## Purpose + +Golint differs from gofmt. Gofmt reformats Go source code, whereas +golint prints out style mistakes. + +Golint differs from govet. Govet is concerned with correctness, whereas +golint is concerned with coding style. Golint is in use at Google, and it +seeks to match the accepted style of the open source Go project. + +The suggestions made by golint are exactly that: suggestions. +Golint is not perfect, and has both false positives and false negatives. +Do not treat its output as a gold standard. We will not be adding pragmas +or other knobs to suppress specific warnings, so do not expect or require +code to be completely "lint-free". +In short, this tool is not, and will never be, trustworthy enough for its +suggestions to be enforced automatically, for example as part of a build process. +Golint makes suggestions for many of the mechanically checkable items listed in +[Effective Go](https://golang.org/doc/effective_go.html) and the +[CodeReviewComments wiki page](https://golang.org/wiki/CodeReviewComments). + +## Scope + +Golint is meant to carry out the stylistic conventions put forth in +[Effective Go](https://golang.org/doc/effective_go.html) and +[CodeReviewComments](https://golang.org/wiki/CodeReviewComments). +Changes that are not aligned with those documents will not be considered. + +## Contributions + +Contributions to this project are welcome provided they are [in scope](#scope), +though please send mail before starting work on anything major. +Contributors retain their copyright, so we need you to fill out +[a short form](https://developers.google.com/open-source/cla/individual) +before we can accept your contribution. + +## Vim + +Add this to your ~/.vimrc: + + set rtp+=$GOPATH/src/github.com/golang/lint/misc/vim + +If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value. + +Running `:Lint` will run golint on the current file and populate the quickfix list. + +Optionally, add this to your `~/.vimrc` to automatically run `golint` on `:w` + + autocmd BufWritePost,FileWritePost *.go execute 'Lint' | cwindow + + +## Emacs + +Add this to your `.emacs` file: + + (add-to-list 'load-path (concat (getenv "GOPATH") "/src/github.com/golang/lint/misc/emacs")) + (require 'golint) + +If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value. + +Running M-x golint will run golint on the current file. + +For more usage, see [Compilation-Mode](http://www.gnu.org/software/emacs/manual/html_node/emacs/Compilation-Mode.html). diff --git a/tools/vendor/golang.org/x/lint/lint.go b/tools/vendor/golang.org/x/lint/lint.go new file mode 100644 index 00000000..cc6fef2d --- /dev/null +++ b/tools/vendor/golang.org/x/lint/lint.go @@ -0,0 +1,1708 @@ +// Copyright (c) 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package lint contains a linter for Go source code. +package lint + +import ( + "bufio" + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "go/types" + "regexp" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/go/gcexportdata" +) + +const styleGuideBase = "https://golang.org/wiki/CodeReviewComments" + +// A Linter lints Go source code. +type Linter struct { +} + +// Problem represents a problem in some source code. +type Problem struct { + Position token.Position // position in source file + Text string // the prose that describes the problem + Link string // (optional) the link to the style guide for the problem + Confidence float64 // a value in (0,1] estimating the confidence in this problem's correctness + LineText string // the source line + Category string // a short name for the general category of the problem + + // If the problem has a suggested fix (the minority case), + // ReplacementLine is a full replacement for the relevant line of the source file. + ReplacementLine string +} + +func (p *Problem) String() string { + if p.Link != "" { + return p.Text + "\n\n" + p.Link + } + return p.Text +} + +type byPosition []Problem + +func (p byPosition) Len() int { return len(p) } +func (p byPosition) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p byPosition) Less(i, j int) bool { + pi, pj := p[i].Position, p[j].Position + + if pi.Filename != pj.Filename { + return pi.Filename < pj.Filename + } + if pi.Line != pj.Line { + return pi.Line < pj.Line + } + if pi.Column != pj.Column { + return pi.Column < pj.Column + } + + return p[i].Text < p[j].Text +} + +// Lint lints src. +func (l *Linter) Lint(filename string, src []byte) ([]Problem, error) { + return l.LintFiles(map[string][]byte{filename: src}) +} + +// LintFiles lints a set of files of a single package. +// The argument is a map of filename to source. +func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) { + pkg := &pkg{ + fset: token.NewFileSet(), + files: make(map[string]*file), + } + var pkgName string + for filename, src := range files { + if isGenerated(src) { + continue // See issue #239 + } + f, err := parser.ParseFile(pkg.fset, filename, src, parser.ParseComments) + if err != nil { + return nil, err + } + if pkgName == "" { + pkgName = f.Name.Name + } else if f.Name.Name != pkgName { + return nil, fmt.Errorf("%s is in package %s, not %s", filename, f.Name.Name, pkgName) + } + pkg.files[filename] = &file{ + pkg: pkg, + f: f, + fset: pkg.fset, + src: src, + filename: filename, + } + } + if len(pkg.files) == 0 { + return nil, nil + } + return pkg.lint(), nil +} + +var ( + genHdr = []byte("// Code generated ") + genFtr = []byte(" DO NOT EDIT.") +) + +// isGenerated reports whether the source file is generated code +// according the rules from https://golang.org/s/generatedcode. +func isGenerated(src []byte) bool { + sc := bufio.NewScanner(bytes.NewReader(src)) + for sc.Scan() { + b := sc.Bytes() + if bytes.HasPrefix(b, genHdr) && bytes.HasSuffix(b, genFtr) && len(b) >= len(genHdr)+len(genFtr) { + return true + } + } + return false +} + +// pkg represents a package being linted. +type pkg struct { + fset *token.FileSet + files map[string]*file + + typesPkg *types.Package + typesInfo *types.Info + + // sortable is the set of types in the package that implement sort.Interface. + sortable map[string]bool + // main is whether this is a "main" package. + main bool + + problems []Problem +} + +func (p *pkg) lint() []Problem { + if err := p.typeCheck(); err != nil { + /* TODO(dsymonds): Consider reporting these errors when golint operates on entire packages. + if e, ok := err.(types.Error); ok { + pos := p.fset.Position(e.Pos) + conf := 1.0 + if strings.Contains(e.Msg, "can't find import: ") { + // Golint is probably being run in a context that doesn't support + // typechecking (e.g. package files aren't found), so don't warn about it. + conf = 0 + } + if conf > 0 { + p.errorfAt(pos, conf, category("typechecking"), e.Msg) + } + + // TODO(dsymonds): Abort if !e.Soft? + } + */ + } + + p.scanSortable() + p.main = p.isMain() + + for _, f := range p.files { + f.lint() + } + + sort.Sort(byPosition(p.problems)) + + return p.problems +} + +// file represents a file being linted. +type file struct { + pkg *pkg + f *ast.File + fset *token.FileSet + src []byte + filename string +} + +func (f *file) isTest() bool { return strings.HasSuffix(f.filename, "_test.go") } + +func (f *file) lint() { + f.lintPackageComment() + f.lintImports() + f.lintBlankImports() + f.lintExported() + f.lintNames() + f.lintVarDecls() + f.lintElses() + f.lintIfError() + f.lintRanges() + f.lintErrorf() + f.lintErrors() + f.lintErrorStrings() + f.lintReceiverNames() + f.lintIncDec() + f.lintErrorReturn() + f.lintUnexportedReturn() + f.lintTimeNames() + f.lintContextKeyTypes() + f.lintContextArgs() +} + +type link string +type category string + +// The variadic arguments may start with link and category types, +// and must end with a format string and any arguments. +// It returns the new Problem. +func (f *file) errorf(n ast.Node, confidence float64, args ...interface{}) *Problem { + pos := f.fset.Position(n.Pos()) + if pos.Filename == "" { + pos.Filename = f.filename + } + return f.pkg.errorfAt(pos, confidence, args...) +} + +func (p *pkg) errorfAt(pos token.Position, confidence float64, args ...interface{}) *Problem { + problem := Problem{ + Position: pos, + Confidence: confidence, + } + if pos.Filename != "" { + // The file might not exist in our mapping if a //line directive was encountered. + if f, ok := p.files[pos.Filename]; ok { + problem.LineText = srcLine(f.src, pos) + } + } + +argLoop: + for len(args) > 1 { // always leave at least the format string in args + switch v := args[0].(type) { + case link: + problem.Link = string(v) + case category: + problem.Category = string(v) + default: + break argLoop + } + args = args[1:] + } + + problem.Text = fmt.Sprintf(args[0].(string), args[1:]...) + + p.problems = append(p.problems, problem) + return &p.problems[len(p.problems)-1] +} + +var newImporter = func(fset *token.FileSet) types.ImporterFrom { + return gcexportdata.NewImporter(fset, make(map[string]*types.Package)) +} + +func (p *pkg) typeCheck() error { + config := &types.Config{ + // By setting a no-op error reporter, the type checker does as much work as possible. + Error: func(error) {}, + Importer: newImporter(p.fset), + } + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + } + var anyFile *file + var astFiles []*ast.File + for _, f := range p.files { + anyFile = f + astFiles = append(astFiles, f.f) + } + pkg, err := config.Check(anyFile.f.Name.Name, p.fset, astFiles, info) + // Remember the typechecking info, even if config.Check failed, + // since we will get partial information. + p.typesPkg = pkg + p.typesInfo = info + return err +} + +func (p *pkg) typeOf(expr ast.Expr) types.Type { + if p.typesInfo == nil { + return nil + } + return p.typesInfo.TypeOf(expr) +} + +func (p *pkg) isNamedType(typ types.Type, importPath, name string) bool { + n, ok := typ.(*types.Named) + if !ok { + return false + } + tn := n.Obj() + return tn != nil && tn.Pkg() != nil && tn.Pkg().Path() == importPath && tn.Name() == name +} + +// scopeOf returns the tightest scope encompassing id. +func (p *pkg) scopeOf(id *ast.Ident) *types.Scope { + var scope *types.Scope + if obj := p.typesInfo.ObjectOf(id); obj != nil { + scope = obj.Parent() + } + if scope == p.typesPkg.Scope() { + // We were given a top-level identifier. + // Use the file-level scope instead of the package-level scope. + pos := id.Pos() + for _, f := range p.files { + if f.f.Pos() <= pos && pos < f.f.End() { + scope = p.typesInfo.Scopes[f.f] + break + } + } + } + return scope +} + +func (p *pkg) scanSortable() { + p.sortable = make(map[string]bool) + + // bitfield for which methods exist on each type. + const ( + Len = 1 << iota + Less + Swap + ) + nmap := map[string]int{"Len": Len, "Less": Less, "Swap": Swap} + has := make(map[string]int) + for _, f := range p.files { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { + return true + } + // TODO(dsymonds): We could check the signature to be more precise. + recv := receiverType(fn) + if i, ok := nmap[fn.Name.Name]; ok { + has[recv] |= i + } + return false + }) + } + for typ, ms := range has { + if ms == Len|Less|Swap { + p.sortable[typ] = true + } + } +} + +func (p *pkg) isMain() bool { + for _, f := range p.files { + if f.isMain() { + return true + } + } + return false +} + +func (f *file) isMain() bool { + if f.f.Name.Name == "main" { + return true + } + return false +} + +// lintPackageComment checks package comments. It complains if +// there is no package comment, or if it is not of the right form. +// This has a notable false positive in that a package comment +// could rightfully appear in a different file of the same package, +// but that's not easy to fix since this linter is file-oriented. +func (f *file) lintPackageComment() { + if f.isTest() { + return + } + + const ref = styleGuideBase + "#package-comments" + prefix := "Package " + f.f.Name.Name + " " + + // Look for a detached package comment. + // First, scan for the last comment that occurs before the "package" keyword. + var lastCG *ast.CommentGroup + for _, cg := range f.f.Comments { + if cg.Pos() > f.f.Package { + // Gone past "package" keyword. + break + } + lastCG = cg + } + if lastCG != nil && strings.HasPrefix(lastCG.Text(), prefix) { + endPos := f.fset.Position(lastCG.End()) + pkgPos := f.fset.Position(f.f.Package) + if endPos.Line+1 < pkgPos.Line { + // There isn't a great place to anchor this error; + // the start of the blank lines between the doc and the package statement + // is at least pointing at the location of the problem. + pos := token.Position{ + Filename: endPos.Filename, + // Offset not set; it is non-trivial, and doesn't appear to be needed. + Line: endPos.Line + 1, + Column: 1, + } + f.pkg.errorfAt(pos, 0.9, link(ref), category("comments"), "package comment is detached; there should be no blank lines between it and the package statement") + return + } + } + + if f.f.Doc == nil { + f.errorf(f.f, 0.2, link(ref), category("comments"), "should have a package comment, unless it's in another file for this package") + return + } + s := f.f.Doc.Text() + if ts := strings.TrimLeft(s, " \t"); ts != s { + f.errorf(f.f.Doc, 1, link(ref), category("comments"), "package comment should not have leading space") + s = ts + } + // Only non-main packages need to keep to this form. + if !f.pkg.main && !strings.HasPrefix(s, prefix) { + f.errorf(f.f.Doc, 1, link(ref), category("comments"), `package comment should be of the form "%s..."`, prefix) + } +} + +// lintBlankImports complains if a non-main package has blank imports that are +// not documented. +func (f *file) lintBlankImports() { + // In package main and in tests, we don't complain about blank imports. + if f.pkg.main || f.isTest() { + return + } + + // The first element of each contiguous group of blank imports should have + // an explanatory comment of some kind. + for i, imp := range f.f.Imports { + pos := f.fset.Position(imp.Pos()) + + if !isBlank(imp.Name) { + continue // Ignore non-blank imports. + } + if i > 0 { + prev := f.f.Imports[i-1] + prevPos := f.fset.Position(prev.Pos()) + if isBlank(prev.Name) && prevPos.Line+1 == pos.Line { + continue // A subsequent blank in a group. + } + } + + // This is the first blank import of a group. + if imp.Doc == nil && imp.Comment == nil { + ref := "" + f.errorf(imp, 1, link(ref), category("imports"), "a blank import should be only in a main or test package, or have a comment justifying it") + } + } +} + +// lintImports examines import blocks. +func (f *file) lintImports() { + for i, is := range f.f.Imports { + _ = i + if is.Name != nil && is.Name.Name == "." && !f.isTest() { + f.errorf(is, 1, link(styleGuideBase+"#import-dot"), category("imports"), "should not use dot imports") + } + + } +} + +const docCommentsLink = styleGuideBase + "#doc-comments" + +// lintExported examines the exported names. +// It complains if any required doc comments are missing, +// or if they are not of the right form. The exact rules are in +// lintFuncDoc, lintTypeDoc and lintValueSpecDoc; this function +// also tracks the GenDecl structure being traversed to permit +// doc comments for constants to be on top of the const block. +// It also complains if the names stutter when combined with +// the package name. +func (f *file) lintExported() { + if f.isTest() { + return + } + + var lastGen *ast.GenDecl // last GenDecl entered. + + // Set of GenDecls that have already had missing comments flagged. + genDeclMissingComments := make(map[*ast.GenDecl]bool) + + f.walk(func(node ast.Node) bool { + switch v := node.(type) { + case *ast.GenDecl: + if v.Tok == token.IMPORT { + return false + } + // token.CONST, token.TYPE or token.VAR + lastGen = v + return true + case *ast.FuncDecl: + f.lintFuncDoc(v) + if v.Recv == nil { + // Only check for stutter on functions, not methods. + // Method names are not used package-qualified. + f.checkStutter(v.Name, "func") + } + // Don't proceed inside funcs. + return false + case *ast.TypeSpec: + // inside a GenDecl, which usually has the doc + doc := v.Doc + if doc == nil { + doc = lastGen.Doc + } + f.lintTypeDoc(v, doc) + f.checkStutter(v.Name, "type") + // Don't proceed inside types. + return false + case *ast.ValueSpec: + f.lintValueSpecDoc(v, lastGen, genDeclMissingComments) + return false + } + return true + }) +} + +var ( + allCapsRE = regexp.MustCompile(`^[A-Z0-9_]+$`) + anyCapsRE = regexp.MustCompile(`[A-Z]`) +) + +// knownNameExceptions is a set of names that are known to be exempt from naming checks. +// This is usually because they are constrained by having to match names in the +// standard library. +var knownNameExceptions = map[string]bool{ + "LastInsertId": true, // must match database/sql + "kWh": true, +} + +// lintNames examines all names in the file. +// It complains if any use underscores or incorrect known initialisms. +func (f *file) lintNames() { + // Package names need slightly different handling than other names. + if strings.Contains(f.f.Name.Name, "_") && !strings.HasSuffix(f.f.Name.Name, "_test") { + f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("naming"), "don't use an underscore in package name") + } + if anyCapsRE.MatchString(f.f.Name.Name) { + f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("mixed-caps"), "don't use MixedCaps in package name; %s should be %s", f.f.Name.Name, strings.ToLower(f.f.Name.Name)) + } + + check := func(id *ast.Ident, thing string) { + if id.Name == "_" { + return + } + if knownNameExceptions[id.Name] { + return + } + + // Handle two common styles from other languages that don't belong in Go. + if len(id.Name) >= 5 && allCapsRE.MatchString(id.Name) && strings.Contains(id.Name, "_") { + f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use ALL_CAPS in Go names; use CamelCase") + return + } + if len(id.Name) > 2 && id.Name[0] == 'k' && id.Name[1] >= 'A' && id.Name[1] <= 'Z' { + should := string(id.Name[1]+'a'-'A') + id.Name[2:] + f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use leading k in Go names; %s %s should be %s", thing, id.Name, should) + } + + should := lintName(id.Name) + if id.Name == should { + return + } + + if len(id.Name) > 2 && strings.Contains(id.Name[1:], "_") { + f.errorf(id, 0.9, link("http://golang.org/doc/effective_go.html#mixed-caps"), category("naming"), "don't use underscores in Go names; %s %s should be %s", thing, id.Name, should) + return + } + f.errorf(id, 0.8, link(styleGuideBase+"#initialisms"), category("naming"), "%s %s should be %s", thing, id.Name, should) + } + checkList := func(fl *ast.FieldList, thing string) { + if fl == nil { + return + } + for _, f := range fl.List { + for _, id := range f.Names { + check(id, thing) + } + } + } + f.walk(func(node ast.Node) bool { + switch v := node.(type) { + case *ast.AssignStmt: + if v.Tok == token.ASSIGN { + return true + } + for _, exp := range v.Lhs { + if id, ok := exp.(*ast.Ident); ok { + check(id, "var") + } + } + case *ast.FuncDecl: + if f.isTest() && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) { + return true + } + + thing := "func" + if v.Recv != nil { + thing = "method" + } + + // Exclude naming warnings for functions that are exported to C but + // not exported in the Go API. + // See https://github.com/golang/lint/issues/144. + if ast.IsExported(v.Name.Name) || !isCgoExported(v) { + check(v.Name, thing) + } + + checkList(v.Type.Params, thing+" parameter") + checkList(v.Type.Results, thing+" result") + case *ast.GenDecl: + if v.Tok == token.IMPORT { + return true + } + var thing string + switch v.Tok { + case token.CONST: + thing = "const" + case token.TYPE: + thing = "type" + case token.VAR: + thing = "var" + } + for _, spec := range v.Specs { + switch s := spec.(type) { + case *ast.TypeSpec: + check(s.Name, thing) + case *ast.ValueSpec: + for _, id := range s.Names { + check(id, thing) + } + } + } + case *ast.InterfaceType: + // Do not check interface method names. + // They are often constrainted by the method names of concrete types. + for _, x := range v.Methods.List { + ft, ok := x.Type.(*ast.FuncType) + if !ok { // might be an embedded interface name + continue + } + checkList(ft.Params, "interface method parameter") + checkList(ft.Results, "interface method result") + } + case *ast.RangeStmt: + if v.Tok == token.ASSIGN { + return true + } + if id, ok := v.Key.(*ast.Ident); ok { + check(id, "range var") + } + if id, ok := v.Value.(*ast.Ident); ok { + check(id, "range var") + } + case *ast.StructType: + for _, f := range v.Fields.List { + for _, id := range f.Names { + check(id, "struct field") + } + } + } + return true + }) +} + +// lintName returns a different name if it should be different. +func lintName(name string) (should string) { + // Fast path for simple cases: "_" and all lowercase. + if name == "_" { + return name + } + allLower := true + for _, r := range name { + if !unicode.IsLower(r) { + allLower = false + break + } + } + if allLower { + return name + } + + // Split camelCase at any lower->upper transition, and split on underscores. + // Check each word for common initialisms. + runes := []rune(name) + w, i := 0, 0 // index of start of word, scan + for i+1 <= len(runes) { + eow := false // whether we hit the end of a word + if i+1 == len(runes) { + eow = true + } else if runes[i+1] == '_' { + // underscore; shift the remainder forward over any run of underscores + eow = true + n := 1 + for i+n+1 < len(runes) && runes[i+n+1] == '_' { + n++ + } + + // Leave at most one underscore if the underscore is between two digits + if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) { + n-- + } + + copy(runes[i+1:], runes[i+n+1:]) + runes = runes[:len(runes)-n] + } else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) { + // lower->non-lower + eow = true + } + i++ + if !eow { + continue + } + + // [w,i) is a word. + word := string(runes[w:i]) + if u := strings.ToUpper(word); commonInitialisms[u] { + // Keep consistent case, which is lowercase only at the start. + if w == 0 && unicode.IsLower(runes[w]) { + u = strings.ToLower(u) + } + // All the common initialisms are ASCII, + // so we can replace the bytes exactly. + copy(runes[w:], []rune(u)) + } else if w > 0 && strings.ToLower(word) == word { + // already all lowercase, and not the first word, so uppercase the first character. + runes[w] = unicode.ToUpper(runes[w]) + } + w = i + } + return string(runes) +} + +// commonInitialisms is a set of common initialisms. +// Only add entries that are highly unlikely to be non-initialisms. +// For instance, "ID" is fine (Freudian code is rare), but "AND" is not. +var commonInitialisms = map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTP": true, + "HTTPS": true, + "ID": true, + "IP": true, + "JSON": true, + "LHS": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, +} + +// lintTypeDoc examines the doc comment on a type. +// It complains if they are missing from an exported type, +// or if they are not of the standard form. +func (f *file) lintTypeDoc(t *ast.TypeSpec, doc *ast.CommentGroup) { + if !ast.IsExported(t.Name.Name) { + return + } + if doc == nil { + f.errorf(t, 1, link(docCommentsLink), category("comments"), "exported type %v should have comment or be unexported", t.Name) + return + } + + s := doc.Text() + articles := [...]string{"A", "An", "The"} + for _, a := range articles { + if strings.HasPrefix(s, a+" ") { + s = s[len(a)+1:] + break + } + } + if !strings.HasPrefix(s, t.Name.Name+" ") { + f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported type %v should be of the form "%v ..." (with optional leading article)`, t.Name, t.Name) + } +} + +var commonMethods = map[string]bool{ + "Error": true, + "Read": true, + "ServeHTTP": true, + "String": true, + "Write": true, +} + +// lintFuncDoc examines doc comments on functions and methods. +// It complains if they are missing, or not of the right form. +// It has specific exclusions for well-known methods (see commonMethods above). +func (f *file) lintFuncDoc(fn *ast.FuncDecl) { + if !ast.IsExported(fn.Name.Name) { + // func is unexported + return + } + kind := "function" + name := fn.Name.Name + if fn.Recv != nil && len(fn.Recv.List) > 0 { + // method + kind = "method" + recv := receiverType(fn) + if !ast.IsExported(recv) { + // receiver is unexported + return + } + if commonMethods[name] { + return + } + switch name { + case "Len", "Less", "Swap": + if f.pkg.sortable[recv] { + return + } + } + name = recv + "." + name + } + if fn.Doc == nil { + f.errorf(fn, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment or be unexported", kind, name) + return + } + s := fn.Doc.Text() + prefix := fn.Name.Name + " " + if !strings.HasPrefix(s, prefix) { + f.errorf(fn.Doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix) + } +} + +// lintValueSpecDoc examines package-global variables and constants. +// It complains if they are not individually declared, +// or if they are not suitably documented in the right form (unless they are in a block that is commented). +func (f *file) lintValueSpecDoc(vs *ast.ValueSpec, gd *ast.GenDecl, genDeclMissingComments map[*ast.GenDecl]bool) { + kind := "var" + if gd.Tok == token.CONST { + kind = "const" + } + + if len(vs.Names) > 1 { + // Check that none are exported except for the first. + for _, n := range vs.Names[1:] { + if ast.IsExported(n.Name) { + f.errorf(vs, 1, category("comments"), "exported %s %s should have its own declaration", kind, n.Name) + return + } + } + } + + // Only one name. + name := vs.Names[0].Name + if !ast.IsExported(name) { + return + } + + if vs.Doc == nil && gd.Doc == nil { + if genDeclMissingComments[gd] { + return + } + block := "" + if kind == "const" && gd.Lparen.IsValid() { + block = " (or a comment on this block)" + } + f.errorf(vs, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment%s or be unexported", kind, name, block) + genDeclMissingComments[gd] = true + return + } + // If this GenDecl has parens and a comment, we don't check its comment form. + if gd.Lparen.IsValid() && gd.Doc != nil { + return + } + // The relevant text to check will be on either vs.Doc or gd.Doc. + // Use vs.Doc preferentially. + doc := vs.Doc + if doc == nil { + doc = gd.Doc + } + prefix := name + " " + if !strings.HasPrefix(doc.Text(), prefix) { + f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix) + } +} + +func (f *file) checkStutter(id *ast.Ident, thing string) { + pkg, name := f.f.Name.Name, id.Name + if !ast.IsExported(name) { + // unexported name + return + } + // A name stutters if the package name is a strict prefix + // and the next character of the name starts a new word. + if len(name) <= len(pkg) { + // name is too short to stutter. + // This permits the name to be the same as the package name. + return + } + if !strings.EqualFold(pkg, name[:len(pkg)]) { + return + } + // We can assume the name is well-formed UTF-8. + // If the next rune after the package name is uppercase or an underscore + // the it's starting a new word and thus this name stutters. + rem := name[len(pkg):] + if next, _ := utf8.DecodeRuneInString(rem); next == '_' || unicode.IsUpper(next) { + f.errorf(id, 0.8, link(styleGuideBase+"#package-names"), category("naming"), "%s name will be used as %s.%s by other packages, and that stutters; consider calling this %s", thing, pkg, name, rem) + } +} + +// zeroLiteral is a set of ast.BasicLit values that are zero values. +// It is not exhaustive. +var zeroLiteral = map[string]bool{ + "false": true, // bool + // runes + `'\x00'`: true, + `'\000'`: true, + // strings + `""`: true, + "``": true, + // numerics + "0": true, + "0.": true, + "0.0": true, + "0i": true, +} + +// lintVarDecls examines variable declarations. It complains about declarations with +// redundant LHS types that can be inferred from the RHS. +func (f *file) lintVarDecls() { + var lastGen *ast.GenDecl // last GenDecl entered. + + f.walk(func(node ast.Node) bool { + switch v := node.(type) { + case *ast.GenDecl: + if v.Tok != token.CONST && v.Tok != token.VAR { + return false + } + lastGen = v + return true + case *ast.ValueSpec: + if lastGen.Tok == token.CONST { + return false + } + if len(v.Names) > 1 || v.Type == nil || len(v.Values) == 0 { + return false + } + rhs := v.Values[0] + // An underscore var appears in a common idiom for compile-time interface satisfaction, + // as in "var _ Interface = (*Concrete)(nil)". + if isIdent(v.Names[0], "_") { + return false + } + // If the RHS is a zero value, suggest dropping it. + zero := false + if lit, ok := rhs.(*ast.BasicLit); ok { + zero = zeroLiteral[lit.Value] + } else if isIdent(rhs, "nil") { + zero = true + } + if zero { + f.errorf(rhs, 0.9, category("zero-value"), "should drop = %s from declaration of var %s; it is the zero value", f.render(rhs), v.Names[0]) + return false + } + lhsTyp := f.pkg.typeOf(v.Type) + rhsTyp := f.pkg.typeOf(rhs) + + if !validType(lhsTyp) || !validType(rhsTyp) { + // Type checking failed (often due to missing imports). + return false + } + + if !types.Identical(lhsTyp, rhsTyp) { + // Assignment to a different type is not redundant. + return false + } + + // The next three conditions are for suppressing the warning in situations + // where we were unable to typecheck. + + // If the LHS type is an interface, don't warn, since it is probably a + // concrete type on the RHS. Note that our feeble lexical check here + // will only pick up interface{} and other literal interface types; + // that covers most of the cases we care to exclude right now. + if _, ok := v.Type.(*ast.InterfaceType); ok { + return false + } + // If the RHS is an untyped const, only warn if the LHS type is its default type. + if defType, ok := f.isUntypedConst(rhs); ok && !isIdent(v.Type, defType) { + return false + } + + f.errorf(v.Type, 0.8, category("type-inference"), "should omit type %s from declaration of var %s; it will be inferred from the right-hand side", f.render(v.Type), v.Names[0]) + return false + } + return true + }) +} + +func validType(T types.Type) bool { + return T != nil && + T != types.Typ[types.Invalid] && + !strings.Contains(T.String(), "invalid type") // good but not foolproof +} + +// lintElses examines else blocks. It complains about any else block whose if block ends in a return. +func (f *file) lintElses() { + // We don't want to flag if { } else if { } else { } constructions. + // They will appear as an IfStmt whose Else field is also an IfStmt. + // Record such a node so we ignore it when we visit it. + ignore := make(map[*ast.IfStmt]bool) + + f.walk(func(node ast.Node) bool { + ifStmt, ok := node.(*ast.IfStmt) + if !ok || ifStmt.Else == nil { + return true + } + if ignore[ifStmt] { + return true + } + if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { + ignore[elseif] = true + return true + } + if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok { + // only care about elses without conditions + return true + } + if len(ifStmt.Body.List) == 0 { + return true + } + shortDecl := false // does the if statement have a ":=" initialization statement? + if ifStmt.Init != nil { + if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE { + shortDecl = true + } + } + lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1] + if _, ok := lastStmt.(*ast.ReturnStmt); ok { + extra := "" + if shortDecl { + extra = " (move short variable declaration to its own line if necessary)" + } + f.errorf(ifStmt.Else, 1, link(styleGuideBase+"#indent-error-flow"), category("indent"), "if block ends with a return statement, so drop this else and outdent its block"+extra) + } + return true + }) +} + +// lintRanges examines range clauses. It complains about redundant constructions. +func (f *file) lintRanges() { + f.walk(func(node ast.Node) bool { + rs, ok := node.(*ast.RangeStmt) + if !ok { + return true + } + + if isIdent(rs.Key, "_") && (rs.Value == nil || isIdent(rs.Value, "_")) { + p := f.errorf(rs.Key, 1, category("range-loop"), "should omit values from range; this loop is equivalent to `for range ...`") + + newRS := *rs // shallow copy + newRS.Value = nil + newRS.Key = nil + p.ReplacementLine = f.firstLineOf(&newRS, rs) + + return true + } + + if isIdent(rs.Value, "_") { + p := f.errorf(rs.Value, 1, category("range-loop"), "should omit 2nd value from range; this loop is equivalent to `for %s %s range ...`", f.render(rs.Key), rs.Tok) + + newRS := *rs // shallow copy + newRS.Value = nil + p.ReplacementLine = f.firstLineOf(&newRS, rs) + } + + return true + }) +} + +// lintErrorf examines errors.New and testing.Error calls. It complains if its only argument is an fmt.Sprintf invocation. +func (f *file) lintErrorf() { + f.walk(func(node ast.Node) bool { + ce, ok := node.(*ast.CallExpr) + if !ok || len(ce.Args) != 1 { + return true + } + isErrorsNew := isPkgDot(ce.Fun, "errors", "New") + var isTestingError bool + se, ok := ce.Fun.(*ast.SelectorExpr) + if ok && se.Sel.Name == "Error" { + if typ := f.pkg.typeOf(se.X); typ != nil { + isTestingError = typ.String() == "*testing.T" + } + } + if !isErrorsNew && !isTestingError { + return true + } + arg := ce.Args[0] + ce, ok = arg.(*ast.CallExpr) + if !ok || !isPkgDot(ce.Fun, "fmt", "Sprintf") { + return true + } + errorfPrefix := "fmt" + if isTestingError { + errorfPrefix = f.render(se.X) + } + p := f.errorf(node, 1, category("errors"), "should replace %s(fmt.Sprintf(...)) with %s.Errorf(...)", f.render(se), errorfPrefix) + + m := f.srcLineWithMatch(ce, `^(.*)`+f.render(se)+`\(fmt\.Sprintf\((.*)\)\)(.*)$`) + if m != nil { + p.ReplacementLine = m[1] + errorfPrefix + ".Errorf(" + m[2] + ")" + m[3] + } + + return true + }) +} + +// lintErrors examines global error vars. It complains if they aren't named in the standard way. +func (f *file) lintErrors() { + for _, decl := range f.f.Decls { + gd, ok := decl.(*ast.GenDecl) + if !ok || gd.Tok != token.VAR { + continue + } + for _, spec := range gd.Specs { + spec := spec.(*ast.ValueSpec) + if len(spec.Names) != 1 || len(spec.Values) != 1 { + continue + } + ce, ok := spec.Values[0].(*ast.CallExpr) + if !ok { + continue + } + if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { + continue + } + + id := spec.Names[0] + prefix := "err" + if id.IsExported() { + prefix = "Err" + } + if !strings.HasPrefix(id.Name, prefix) { + f.errorf(id, 0.9, category("naming"), "error var %s should have name of the form %sFoo", id.Name, prefix) + } + } + } +} + +func lintErrorString(s string) (isClean bool, conf float64) { + const basicConfidence = 0.8 + const capConfidence = basicConfidence - 0.2 + first, firstN := utf8.DecodeRuneInString(s) + last, _ := utf8.DecodeLastRuneInString(s) + if last == '.' || last == ':' || last == '!' || last == '\n' { + return false, basicConfidence + } + if unicode.IsUpper(first) { + // People use proper nouns and exported Go identifiers in error strings, + // so decrease the confidence of warnings for capitalization. + if len(s) <= firstN { + return false, capConfidence + } + // Flag strings starting with something that doesn't look like an initialism. + if second, _ := utf8.DecodeRuneInString(s[firstN:]); !unicode.IsUpper(second) { + return false, capConfidence + } + } + return true, 0 +} + +// lintErrorStrings examines error strings. +// It complains if they are capitalized or end in punctuation or a newline. +func (f *file) lintErrorStrings() { + f.walk(func(node ast.Node) bool { + ce, ok := node.(*ast.CallExpr) + if !ok { + return true + } + if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { + return true + } + if len(ce.Args) < 1 { + return true + } + str, ok := ce.Args[0].(*ast.BasicLit) + if !ok || str.Kind != token.STRING { + return true + } + s, _ := strconv.Unquote(str.Value) // can assume well-formed Go + if s == "" { + return true + } + clean, conf := lintErrorString(s) + if clean { + return true + } + + f.errorf(str, conf, link(styleGuideBase+"#error-strings"), category("errors"), + "error strings should not be capitalized or end with punctuation or a newline") + return true + }) +} + +// lintReceiverNames examines receiver names. It complains about inconsistent +// names used for the same type and names such as "this". +func (f *file) lintReceiverNames() { + typeReceiver := map[string]string{} + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { + return true + } + names := fn.Recv.List[0].Names + if len(names) < 1 { + return true + } + name := names[0].Name + const ref = styleGuideBase + "#receiver-names" + if name == "_" { + f.errorf(n, 1, link(ref), category("naming"), `receiver name should not be an underscore, omit the name if it is unused`) + return true + } + if name == "this" || name == "self" { + f.errorf(n, 1, link(ref), category("naming"), `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`) + return true + } + recv := receiverType(fn) + if prev, ok := typeReceiver[recv]; ok && prev != name { + f.errorf(n, 1, link(ref), category("naming"), "receiver name %s should be consistent with previous receiver name %s for %s", name, prev, recv) + return true + } + typeReceiver[recv] = name + return true + }) +} + +// lintIncDec examines statements that increment or decrement a variable. +// It complains if they don't use x++ or x--. +func (f *file) lintIncDec() { + f.walk(func(n ast.Node) bool { + as, ok := n.(*ast.AssignStmt) + if !ok { + return true + } + if len(as.Lhs) != 1 { + return true + } + if !isOne(as.Rhs[0]) { + return true + } + var suffix string + switch as.Tok { + case token.ADD_ASSIGN: + suffix = "++" + case token.SUB_ASSIGN: + suffix = "--" + default: + return true + } + f.errorf(as, 0.8, category("unary-op"), "should replace %s with %s%s", f.render(as), f.render(as.Lhs[0]), suffix) + return true + }) +} + +// lintErrorReturn examines function declarations that return an error. +// It complains if the error isn't the last parameter. +func (f *file) lintErrorReturn() { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Type.Results == nil { + return true + } + ret := fn.Type.Results.List + if len(ret) <= 1 { + return true + } + // An error return parameter should be the last parameter. + // Flag any error parameters found before the last. + for _, r := range ret[:len(ret)-1] { + if isIdent(r.Type, "error") { + f.errorf(fn, 0.9, category("arg-order"), "error should be the last type when returning multiple items") + break // only flag one + } + } + return true + }) +} + +// lintUnexportedReturn examines exported function declarations. +// It complains if any return an unexported type. +func (f *file) lintUnexportedReturn() { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok { + return true + } + if fn.Type.Results == nil { + return false + } + if !fn.Name.IsExported() { + return false + } + thing := "func" + if fn.Recv != nil && len(fn.Recv.List) > 0 { + thing = "method" + if !ast.IsExported(receiverType(fn)) { + // Don't report exported methods of unexported types, + // such as private implementations of sort.Interface. + return false + } + } + for _, ret := range fn.Type.Results.List { + typ := f.pkg.typeOf(ret.Type) + if exportedType(typ) { + continue + } + f.errorf(ret.Type, 0.8, category("unexported-type-in-api"), + "exported %s %s returns unexported type %s, which can be annoying to use", + thing, fn.Name.Name, typ) + break // only flag one + } + return false + }) +} + +// exportedType reports whether typ is an exported type. +// It is imprecise, and will err on the side of returning true, +// such as for composite types. +func exportedType(typ types.Type) bool { + switch T := typ.(type) { + case *types.Named: + // Builtin types have no package. + return T.Obj().Pkg() == nil || T.Obj().Exported() + case *types.Map: + return exportedType(T.Key()) && exportedType(T.Elem()) + case interface { + Elem() types.Type + }: // array, slice, pointer, chan + return exportedType(T.Elem()) + } + // Be conservative about other types, such as struct, interface, etc. + return true +} + +// timeSuffixes is a list of name suffixes that imply a time unit. +// This is not an exhaustive list. +var timeSuffixes = []string{ + "Sec", "Secs", "Seconds", + "Msec", "Msecs", + "Milli", "Millis", "Milliseconds", + "Usec", "Usecs", "Microseconds", + "MS", "Ms", +} + +func (f *file) lintTimeNames() { + f.walk(func(node ast.Node) bool { + v, ok := node.(*ast.ValueSpec) + if !ok { + return true + } + for _, name := range v.Names { + origTyp := f.pkg.typeOf(name) + // Look for time.Duration or *time.Duration; + // the latter is common when using flag.Duration. + typ := origTyp + if pt, ok := typ.(*types.Pointer); ok { + typ = pt.Elem() + } + if !f.pkg.isNamedType(typ, "time", "Duration") { + continue + } + suffix := "" + for _, suf := range timeSuffixes { + if strings.HasSuffix(name.Name, suf) { + suffix = suf + break + } + } + if suffix == "" { + continue + } + f.errorf(v, 0.9, category("time"), "var %s is of type %v; don't use unit-specific suffix %q", name.Name, origTyp, suffix) + } + return true + }) +} + +// lintContextKeyTypes checks for call expressions to context.WithValue with +// basic types used for the key argument. +// See: https://golang.org/issue/17293 +func (f *file) lintContextKeyTypes() { + f.walk(func(node ast.Node) bool { + switch node := node.(type) { + case *ast.CallExpr: + f.checkContextKeyType(node) + } + + return true + }) +} + +// checkContextKeyType reports an error if the call expression calls +// context.WithValue with a key argument of basic type. +func (f *file) checkContextKeyType(x *ast.CallExpr) { + sel, ok := x.Fun.(*ast.SelectorExpr) + if !ok { + return + } + pkg, ok := sel.X.(*ast.Ident) + if !ok || pkg.Name != "context" { + return + } + if sel.Sel.Name != "WithValue" { + return + } + + // key is second argument to context.WithValue + if len(x.Args) != 3 { + return + } + key := f.pkg.typesInfo.Types[x.Args[1]] + + if ktyp, ok := key.Type.(*types.Basic); ok && ktyp.Kind() != types.Invalid { + f.errorf(x, 1.0, category("context"), fmt.Sprintf("should not use basic type %s as key in context.WithValue", key.Type)) + } +} + +// lintContextArgs examines function declarations that contain an +// argument with a type of context.Context +// It complains if that argument isn't the first parameter. +func (f *file) lintContextArgs() { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || len(fn.Type.Params.List) <= 1 { + return true + } + // A context.Context should be the first parameter of a function. + // Flag any that show up after the first. + for _, arg := range fn.Type.Params.List[1:] { + if isPkgDot(arg.Type, "context", "Context") { + f.errorf(fn, 0.9, link("https://golang.org/pkg/context/"), category("arg-order"), "context.Context should be the first parameter of a function") + break // only flag one + } + } + return true + }) +} + +// containsComments returns whether the interval [start, end) contains any +// comments without "// MATCH " prefix. +func (f *file) containsComments(start, end token.Pos) bool { + for _, cgroup := range f.f.Comments { + comments := cgroup.List + if comments[0].Slash >= end { + // All comments starting with this group are after end pos. + return false + } + if comments[len(comments)-1].Slash < start { + // Comments group ends before start pos. + continue + } + for _, c := range comments { + if start <= c.Slash && c.Slash < end && !strings.HasPrefix(c.Text, "// MATCH ") { + return true + } + } + } + return false +} + +func (f *file) lintIfError() { + f.walk(func(node ast.Node) bool { + switch v := node.(type) { + case *ast.BlockStmt: + for i := 0; i < len(v.List)-1; i++ { + // if var := whatever; var != nil { return var } + s, ok := v.List[i].(*ast.IfStmt) + if !ok || s.Body == nil || len(s.Body.List) != 1 || s.Else != nil { + continue + } + assign, ok := s.Init.(*ast.AssignStmt) + if !ok || len(assign.Lhs) != 1 || !(assign.Tok == token.DEFINE || assign.Tok == token.ASSIGN) { + continue + } + id, ok := assign.Lhs[0].(*ast.Ident) + if !ok { + continue + } + expr, ok := s.Cond.(*ast.BinaryExpr) + if !ok || expr.Op != token.NEQ { + continue + } + if lhs, ok := expr.X.(*ast.Ident); !ok || lhs.Name != id.Name { + continue + } + if rhs, ok := expr.Y.(*ast.Ident); !ok || rhs.Name != "nil" { + continue + } + r, ok := s.Body.List[0].(*ast.ReturnStmt) + if !ok || len(r.Results) != 1 { + continue + } + if r, ok := r.Results[0].(*ast.Ident); !ok || r.Name != id.Name { + continue + } + + // return nil + r, ok = v.List[i+1].(*ast.ReturnStmt) + if !ok || len(r.Results) != 1 { + continue + } + if r, ok := r.Results[0].(*ast.Ident); !ok || r.Name != "nil" { + continue + } + + // check if there are any comments explaining the construct, don't emit an error if there are some. + if f.containsComments(s.Pos(), r.Pos()) { + continue + } + + f.errorf(v.List[i], 0.9, "redundant if ...; err != nil check, just return error instead.") + } + } + return true + }) +} + +// receiverType returns the named type of the method receiver, sans "*", +// or "invalid-type" if fn.Recv is ill formed. +func receiverType(fn *ast.FuncDecl) string { + switch e := fn.Recv.List[0].Type.(type) { + case *ast.Ident: + return e.Name + case *ast.StarExpr: + if id, ok := e.X.(*ast.Ident); ok { + return id.Name + } + } + // The parser accepts much more than just the legal forms. + return "invalid-type" +} + +func (f *file) walk(fn func(ast.Node) bool) { + ast.Walk(walker(fn), f.f) +} + +func (f *file) render(x interface{}) string { + var buf bytes.Buffer + if err := printer.Fprint(&buf, f.fset, x); err != nil { + panic(err) + } + return buf.String() +} + +func (f *file) debugRender(x interface{}) string { + var buf bytes.Buffer + if err := ast.Fprint(&buf, f.fset, x, nil); err != nil { + panic(err) + } + return buf.String() +} + +// walker adapts a function to satisfy the ast.Visitor interface. +// The function return whether the walk should proceed into the node's children. +type walker func(ast.Node) bool + +func (w walker) Visit(node ast.Node) ast.Visitor { + if w(node) { + return w + } + return nil +} + +func isIdent(expr ast.Expr, ident string) bool { + id, ok := expr.(*ast.Ident) + return ok && id.Name == ident +} + +// isBlank returns whether id is the blank identifier "_". +// If id == nil, the answer is false. +func isBlank(id *ast.Ident) bool { return id != nil && id.Name == "_" } + +func isPkgDot(expr ast.Expr, pkg, name string) bool { + sel, ok := expr.(*ast.SelectorExpr) + return ok && isIdent(sel.X, pkg) && isIdent(sel.Sel, name) +} + +func isOne(expr ast.Expr) bool { + lit, ok := expr.(*ast.BasicLit) + return ok && lit.Kind == token.INT && lit.Value == "1" +} + +func isCgoExported(f *ast.FuncDecl) bool { + if f.Recv != nil || f.Doc == nil { + return false + } + + cgoExport := regexp.MustCompile(fmt.Sprintf("(?m)^//export %s$", regexp.QuoteMeta(f.Name.Name))) + for _, c := range f.Doc.List { + if cgoExport.MatchString(c.Text) { + return true + } + } + return false +} + +var basicTypeKinds = map[types.BasicKind]string{ + types.UntypedBool: "bool", + types.UntypedInt: "int", + types.UntypedRune: "rune", + types.UntypedFloat: "float64", + types.UntypedComplex: "complex128", + types.UntypedString: "string", +} + +// isUntypedConst reports whether expr is an untyped constant, +// and indicates what its default type is. +// scope may be nil. +func (f *file) isUntypedConst(expr ast.Expr) (defType string, ok bool) { + // Re-evaluate expr outside of its context to see if it's untyped. + // (An expr evaluated within, for example, an assignment context will get the type of the LHS.) + exprStr := f.render(expr) + tv, err := types.Eval(f.fset, f.pkg.typesPkg, expr.Pos(), exprStr) + if err != nil { + return "", false + } + if b, ok := tv.Type.(*types.Basic); ok { + if dt, ok := basicTypeKinds[b.Kind()]; ok { + return dt, true + } + } + + return "", false +} + +// firstLineOf renders the given node and returns its first line. +// It will also match the indentation of another node. +func (f *file) firstLineOf(node, match ast.Node) string { + line := f.render(node) + if i := strings.Index(line, "\n"); i >= 0 { + line = line[:i] + } + return f.indentOf(match) + line +} + +func (f *file) indentOf(node ast.Node) string { + line := srcLine(f.src, f.fset.Position(node.Pos())) + for i, r := range line { + switch r { + case ' ', '\t': + default: + return line[:i] + } + } + return line // unusual or empty line +} + +func (f *file) srcLineWithMatch(node ast.Node, pattern string) (m []string) { + line := srcLine(f.src, f.fset.Position(node.Pos())) + line = strings.TrimSuffix(line, "\n") + rx := regexp.MustCompile(pattern) + return rx.FindStringSubmatch(line) +} + +// srcLine returns the complete line at p, including the terminating newline. +func srcLine(src []byte, p token.Position) string { + // Run to end of line in both directions if not at line start/end. + lo, hi := p.Offset, p.Offset+1 + for lo > 0 && src[lo-1] != '\n' { + lo-- + } + for hi < len(src) && src[hi-1] != '\n' { + hi++ + } + return string(src[lo:hi]) +} diff --git a/tools/vendor/golang.org/x/tools/go/callgraph/rta/rta.go b/tools/vendor/golang.org/x/tools/go/callgraph/rta/rta.go new file mode 100644 index 00000000..1a2dadf5 --- /dev/null +++ b/tools/vendor/golang.org/x/tools/go/callgraph/rta/rta.go @@ -0,0 +1,459 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This package provides Rapid Type Analysis (RTA) for Go, a fast +// algorithm for call graph construction and discovery of reachable code +// (and hence dead code) and runtime types. The algorithm was first +// described in: +// +// David F. Bacon and Peter F. Sweeney. 1996. +// Fast static analysis of C++ virtual function calls. (OOPSLA '96) +// http://doi.acm.org/10.1145/236337.236371 +// +// The algorithm uses dynamic programming to tabulate the cross-product +// of the set of known "address taken" functions with the set of known +// dynamic calls of the same type. As each new address-taken function +// is discovered, call graph edges are added from each known callsite, +// and as each new call site is discovered, call graph edges are added +// from it to each known address-taken function. +// +// A similar approach is used for dynamic calls via interfaces: it +// tabulates the cross-product of the set of known "runtime types", +// i.e. types that may appear in an interface value, or be derived from +// one via reflection, with the set of known "invoke"-mode dynamic +// calls. As each new "runtime type" is discovered, call edges are +// added from the known call sites, and as each new call site is +// discovered, call graph edges are added to each compatible +// method. +// +// In addition, we must consider all exported methods of any runtime type +// as reachable, since they may be called via reflection. +// +// Each time a newly added call edge causes a new function to become +// reachable, the code of that function is analyzed for more call sites, +// address-taken functions, and runtime types. The process continues +// until a fixed point is achieved. +// +// The resulting call graph is less precise than one produced by pointer +// analysis, but the algorithm is much faster. For example, running the +// cmd/callgraph tool on its own source takes ~2.1s for RTA and ~5.4s +// for points-to analysis. +// +package rta // import "golang.org/x/tools/go/callgraph/rta" + +// TODO(adonovan): test it by connecting it to the interpreter and +// replacing all "unreachable" functions by a special intrinsic, and +// ensure that that intrinsic is never called. + +import ( + "fmt" + "go/types" + + "golang.org/x/tools/go/callgraph" + "golang.org/x/tools/go/ssa" + "golang.org/x/tools/go/types/typeutil" +) + +// A Result holds the results of Rapid Type Analysis, which includes the +// set of reachable functions/methods, runtime types, and the call graph. +// +type Result struct { + // CallGraph is the discovered callgraph. + // It does not include edges for calls made via reflection. + CallGraph *callgraph.Graph + + // Reachable contains the set of reachable functions and methods. + // This includes exported methods of runtime types, since + // they may be accessed via reflection. + // The value indicates whether the function is address-taken. + // + // (We wrap the bool in a struct to avoid inadvertent use of + // "if Reachable[f] {" to test for set membership.) + Reachable map[*ssa.Function]struct{ AddrTaken bool } + + // RuntimeTypes contains the set of types that are needed at + // runtime, for interfaces or reflection. + // + // The value indicates whether the type is inaccessible to reflection. + // Consider: + // type A struct{B} + // fmt.Println(new(A)) + // Types *A, A and B are accessible to reflection, but the unnamed + // type struct{B} is not. + RuntimeTypes typeutil.Map +} + +// Working state of the RTA algorithm. +type rta struct { + result *Result + + prog *ssa.Program + + worklist []*ssa.Function // list of functions to visit + + // addrTakenFuncsBySig contains all address-taken *Functions, grouped by signature. + // Keys are *types.Signature, values are map[*ssa.Function]bool sets. + addrTakenFuncsBySig typeutil.Map + + // dynCallSites contains all dynamic "call"-mode call sites, grouped by signature. + // Keys are *types.Signature, values are unordered []ssa.CallInstruction. + dynCallSites typeutil.Map + + // invokeSites contains all "invoke"-mode call sites, grouped by interface. + // Keys are *types.Interface (never *types.Named), + // Values are unordered []ssa.CallInstruction sets. + invokeSites typeutil.Map + + // The following two maps together define the subset of the + // m:n "implements" relation needed by the algorithm. + + // concreteTypes maps each concrete type to the set of interfaces that it implements. + // Keys are types.Type, values are unordered []*types.Interface. + // Only concrete types used as MakeInterface operands are included. + concreteTypes typeutil.Map + + // interfaceTypes maps each interface type to + // the set of concrete types that implement it. + // Keys are *types.Interface, values are unordered []types.Type. + // Only interfaces used in "invoke"-mode CallInstructions are included. + interfaceTypes typeutil.Map +} + +// addReachable marks a function as potentially callable at run-time, +// and ensures that it gets processed. +func (r *rta) addReachable(f *ssa.Function, addrTaken bool) { + reachable := r.result.Reachable + n := len(reachable) + v := reachable[f] + if addrTaken { + v.AddrTaken = true + } + reachable[f] = v + if len(reachable) > n { + // First time seeing f. Add it to the worklist. + r.worklist = append(r.worklist, f) + } +} + +// addEdge adds the specified call graph edge, and marks it reachable. +// addrTaken indicates whether to mark the callee as "address-taken". +func (r *rta) addEdge(site ssa.CallInstruction, callee *ssa.Function, addrTaken bool) { + r.addReachable(callee, addrTaken) + + if g := r.result.CallGraph; g != nil { + if site.Parent() == nil { + panic(site) + } + from := g.CreateNode(site.Parent()) + to := g.CreateNode(callee) + callgraph.AddEdge(from, site, to) + } +} + +// ---------- addrTakenFuncs × dynCallSites ---------- + +// visitAddrTakenFunc is called each time we encounter an address-taken function f. +func (r *rta) visitAddrTakenFunc(f *ssa.Function) { + // Create two-level map (Signature -> Function -> bool). + S := f.Signature + funcs, _ := r.addrTakenFuncsBySig.At(S).(map[*ssa.Function]bool) + if funcs == nil { + funcs = make(map[*ssa.Function]bool) + r.addrTakenFuncsBySig.Set(S, funcs) + } + if !funcs[f] { + // First time seeing f. + funcs[f] = true + + // If we've seen any dyncalls of this type, mark it reachable, + // and add call graph edges. + sites, _ := r.dynCallSites.At(S).([]ssa.CallInstruction) + for _, site := range sites { + r.addEdge(site, f, true) + } + } +} + +// visitDynCall is called each time we encounter a dynamic "call"-mode call. +func (r *rta) visitDynCall(site ssa.CallInstruction) { + S := site.Common().Signature() + + // Record the call site. + sites, _ := r.dynCallSites.At(S).([]ssa.CallInstruction) + r.dynCallSites.Set(S, append(sites, site)) + + // For each function of signature S that we know is address-taken, + // mark it reachable. We'll add the callgraph edges later. + funcs, _ := r.addrTakenFuncsBySig.At(S).(map[*ssa.Function]bool) + for g := range funcs { + r.addEdge(site, g, true) + } +} + +// ---------- concrete types × invoke sites ---------- + +// addInvokeEdge is called for each new pair (site, C) in the matrix. +func (r *rta) addInvokeEdge(site ssa.CallInstruction, C types.Type) { + // Ascertain the concrete method of C to be called. + imethod := site.Common().Method + cmethod := r.prog.MethodValue(r.prog.MethodSets.MethodSet(C).Lookup(imethod.Pkg(), imethod.Name())) + r.addEdge(site, cmethod, true) +} + +// visitInvoke is called each time the algorithm encounters an "invoke"-mode call. +func (r *rta) visitInvoke(site ssa.CallInstruction) { + I := site.Common().Value.Type().Underlying().(*types.Interface) + + // Record the invoke site. + sites, _ := r.invokeSites.At(I).([]ssa.CallInstruction) + r.invokeSites.Set(I, append(sites, site)) + + // Add callgraph edge for each existing + // address-taken concrete type implementing I. + for _, C := range r.implementations(I) { + r.addInvokeEdge(site, C) + } +} + +// ---------- main algorithm ---------- + +// visitFunc processes function f. +func (r *rta) visitFunc(f *ssa.Function) { + var space [32]*ssa.Value // preallocate space for common case + + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + rands := instr.Operands(space[:0]) + + switch instr := instr.(type) { + case ssa.CallInstruction: + call := instr.Common() + if call.IsInvoke() { + r.visitInvoke(instr) + } else if g := call.StaticCallee(); g != nil { + r.addEdge(instr, g, false) + } else if _, ok := call.Value.(*ssa.Builtin); !ok { + r.visitDynCall(instr) + } + + // Ignore the call-position operand when + // looking for address-taken Functions. + // Hack: assume this is rands[0]. + rands = rands[1:] + + case *ssa.MakeInterface: + r.addRuntimeType(instr.X.Type(), false) + } + + // Process all address-taken functions. + for _, op := range rands { + if g, ok := (*op).(*ssa.Function); ok { + r.visitAddrTakenFunc(g) + } + } + } + } +} + +// Analyze performs Rapid Type Analysis, starting at the specified root +// functions. It returns nil if no roots were specified. +// +// If buildCallGraph is true, Result.CallGraph will contain a call +// graph; otherwise, only the other fields (reachable functions) are +// populated. +// +func Analyze(roots []*ssa.Function, buildCallGraph bool) *Result { + if len(roots) == 0 { + return nil + } + + r := &rta{ + result: &Result{Reachable: make(map[*ssa.Function]struct{ AddrTaken bool })}, + prog: roots[0].Prog, + } + + if buildCallGraph { + // TODO(adonovan): change callgraph API to eliminate the + // notion of a distinguished root node. Some callgraphs + // have many roots, or none. + r.result.CallGraph = callgraph.New(roots[0]) + } + + hasher := typeutil.MakeHasher() + r.result.RuntimeTypes.SetHasher(hasher) + r.addrTakenFuncsBySig.SetHasher(hasher) + r.dynCallSites.SetHasher(hasher) + r.invokeSites.SetHasher(hasher) + r.concreteTypes.SetHasher(hasher) + r.interfaceTypes.SetHasher(hasher) + + // Visit functions, processing their instructions, and adding + // new functions to the worklist, until a fixed point is + // reached. + var shadow []*ssa.Function // for efficiency, we double-buffer the worklist + r.worklist = append(r.worklist, roots...) + for len(r.worklist) > 0 { + shadow, r.worklist = r.worklist, shadow[:0] + for _, f := range shadow { + r.visitFunc(f) + } + } + return r.result +} + +// interfaces(C) returns all currently known interfaces implemented by C. +func (r *rta) interfaces(C types.Type) []*types.Interface { + // Ascertain set of interfaces C implements + // and update 'implements' relation. + var ifaces []*types.Interface + r.interfaceTypes.Iterate(func(I types.Type, concs interface{}) { + if I := I.(*types.Interface); types.Implements(C, I) { + concs, _ := concs.([]types.Type) + r.interfaceTypes.Set(I, append(concs, C)) + ifaces = append(ifaces, I) + } + }) + r.concreteTypes.Set(C, ifaces) + return ifaces +} + +// implementations(I) returns all currently known concrete types that implement I. +func (r *rta) implementations(I *types.Interface) []types.Type { + var concs []types.Type + if v := r.interfaceTypes.At(I); v != nil { + concs = v.([]types.Type) + } else { + // First time seeing this interface. + // Update the 'implements' relation. + r.concreteTypes.Iterate(func(C types.Type, ifaces interface{}) { + if types.Implements(C, I) { + ifaces, _ := ifaces.([]*types.Interface) + r.concreteTypes.Set(C, append(ifaces, I)) + concs = append(concs, C) + } + }) + r.interfaceTypes.Set(I, concs) + } + return concs +} + +// addRuntimeType is called for each concrete type that can be the +// dynamic type of some interface or reflect.Value. +// Adapted from needMethods in go/ssa/builder.go +// +func (r *rta) addRuntimeType(T types.Type, skip bool) { + if prev, ok := r.result.RuntimeTypes.At(T).(bool); ok { + if skip && !prev { + r.result.RuntimeTypes.Set(T, skip) + } + return + } + r.result.RuntimeTypes.Set(T, skip) + + mset := r.prog.MethodSets.MethodSet(T) + + if _, ok := T.Underlying().(*types.Interface); !ok { + // T is a new concrete type. + for i, n := 0, mset.Len(); i < n; i++ { + sel := mset.At(i) + m := sel.Obj() + + if m.Exported() { + // Exported methods are always potentially callable via reflection. + r.addReachable(r.prog.MethodValue(sel), true) + } + } + + // Add callgraph edge for each existing dynamic + // "invoke"-mode call via that interface. + for _, I := range r.interfaces(T) { + sites, _ := r.invokeSites.At(I).([]ssa.CallInstruction) + for _, site := range sites { + r.addInvokeEdge(site, T) + } + } + } + + // Precondition: T is not a method signature (*Signature with Recv()!=nil). + // Recursive case: skip => don't call makeMethods(T). + // Each package maintains its own set of types it has visited. + + var n *types.Named + switch T := T.(type) { + case *types.Named: + n = T + case *types.Pointer: + n, _ = T.Elem().(*types.Named) + } + if n != nil { + owner := n.Obj().Pkg() + if owner == nil { + return // built-in error type + } + } + + // Recursion over signatures of each exported method. + for i := 0; i < mset.Len(); i++ { + if mset.At(i).Obj().Exported() { + sig := mset.At(i).Type().(*types.Signature) + r.addRuntimeType(sig.Params(), true) // skip the Tuple itself + r.addRuntimeType(sig.Results(), true) // skip the Tuple itself + } + } + + switch t := T.(type) { + case *types.Basic: + // nop + + case *types.Interface: + // nop---handled by recursion over method set. + + case *types.Pointer: + r.addRuntimeType(t.Elem(), false) + + case *types.Slice: + r.addRuntimeType(t.Elem(), false) + + case *types.Chan: + r.addRuntimeType(t.Elem(), false) + + case *types.Map: + r.addRuntimeType(t.Key(), false) + r.addRuntimeType(t.Elem(), false) + + case *types.Signature: + if t.Recv() != nil { + panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv())) + } + r.addRuntimeType(t.Params(), true) // skip the Tuple itself + r.addRuntimeType(t.Results(), true) // skip the Tuple itself + + case *types.Named: + // A pointer-to-named type can be derived from a named + // type via reflection. It may have methods too. + r.addRuntimeType(types.NewPointer(T), false) + + // Consider 'type T struct{S}' where S has methods. + // Reflection provides no way to get from T to struct{S}, + // only to S, so the method set of struct{S} is unwanted, + // so set 'skip' flag during recursion. + r.addRuntimeType(t.Underlying(), true) + + case *types.Array: + r.addRuntimeType(t.Elem(), false) + + case *types.Struct: + for i, n := 0, t.NumFields(); i < n; i++ { + r.addRuntimeType(t.Field(i).Type(), false) + } + + case *types.Tuple: + for i, n := 0, t.Len(); i < n; i++ { + r.addRuntimeType(t.At(i).Type(), false) + } + + default: + panic(T) + } +} diff --git a/tools/vendor/mvdan.cc/interfacer/README.md b/tools/vendor/mvdan.cc/interfacer/README.md index bafa40b0..c55eca76 100644 --- a/tools/vendor/mvdan.cc/interfacer/README.md +++ b/tools/vendor/mvdan.cc/interfacer/README.md @@ -21,7 +21,7 @@ func ProcessInput(f *os.File) error { ``` ```sh -$ interfacer $(go list ./... | grep -v /vendor/) +$ interfacer ./... foo.go:10:19: f can be io.Reader ``` diff --git a/tools/vendor/mvdan.cc/interfacer/main.go b/tools/vendor/mvdan.cc/interfacer/main.go index bc86977a..966ad72b 100644 --- a/tools/vendor/mvdan.cc/interfacer/main.go +++ b/tools/vendor/mvdan.cc/interfacer/main.go @@ -6,12 +6,18 @@ package main // import "mvdan.cc/interfacer" import ( "flag" "fmt" + "go/build" "os" + "golang.org/x/tools/go/buildutil" + "mvdan.cc/interfacer/check" ) -var _ = flag.Bool("v", false, "print the names of packages as they are checked") +func init() { + flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", + buildutil.TagsFlagDoc) +} func main() { flag.Parse() diff --git a/tools/vendor/mvdan.cc/unparam/check/check.go b/tools/vendor/mvdan.cc/unparam/check/check.go index 33cc3e15..7531861b 100644 --- a/tools/vendor/mvdan.cc/unparam/check/check.go +++ b/tools/vendor/mvdan.cc/unparam/check/check.go @@ -23,6 +23,7 @@ import ( "golang.org/x/tools/go/callgraph" "golang.org/x/tools/go/callgraph/cha" + "golang.org/x/tools/go/callgraph/rta" "golang.org/x/tools/go/loader" "golang.org/x/tools/go/ssa" "golang.org/x/tools/go/ssa/ssautil" @@ -31,7 +32,7 @@ import ( "mvdan.cc/lint" ) -func UnusedParams(tests, exported, debug bool, args ...string) ([]string, error) { +func UnusedParams(tests bool, algo string, exported, debug bool, args ...string) ([]string, error) { wd, err := os.Getwd() if err != nil { return nil, err @@ -39,6 +40,7 @@ func UnusedParams(tests, exported, debug bool, args ...string) ([]string, error) c := &Checker{ wd: wd, tests: tests, + algo: algo, exported: exported, } if debug { @@ -54,6 +56,7 @@ type Checker struct { wd string tests bool + algo string exported bool debugLog io.Writer @@ -128,6 +131,13 @@ func generatedDoc(text string) bool { strings.Contains(text, "DO NOT EDIT") } +func eqlConsts(v1, v2 constant.Value) bool { + if v1 == nil || v2 == nil { + return v1 == v2 + } + return constant.Compare(v1, token.EQL, v2) +} + var stdSizes = types.SizesFor("gc", "amd64") func (c *Checker) Check() ([]lint.Issue, error) { @@ -150,15 +160,34 @@ func (c *Checker) Check() ([]lint.Issue, error) { }) } } - cg := cha.CallGraph(c.prog) + var cg *callgraph.Graph + switch c.algo { + case "cha": + cg = cha.CallGraph(c.prog) + case "rta": + mains, err := mainPackages(c.prog, wantPkg) + if err != nil { + return nil, err + } + var roots []*ssa.Function + for _, main := range mains { + roots = append(roots, main.Func("init"), main.Func("main")) + } + result := rta.Analyze(roots, true) + cg = result.CallGraph + default: + return nil, fmt.Errorf("unknown call graph construction algorithm: %q", c.algo) + } + cg.DeleteSyntheticNodes() var issues []lint.Issue -funcLoop: for fn := range ssautil.AllFunctions(c.prog) { - if fn.Pkg == nil { // builtin? + switch { + case fn.Pkg == nil: // builtin? continue - } - if len(fn.Blocks) == 0 { // stub + case fn.Name() == "init": + continue + case len(fn.Blocks) == 0: // stub continue } info := wantPkg[fn.Pkg.Pkg] @@ -182,31 +211,13 @@ funcLoop: c.debug(" skip - dummy implementation\n") continue } - for _, edge := range cg.Nodes[fn].In { - call := edge.Site.Value() - if receivesExtractedArgs(fn.Signature, call) { - // called via function(results()) - c.debug(" skip - type is required via call\n") - continue funcLoop - } - caller := edge.Caller.Func - switch { - case len(caller.FreeVars) == 1 && strings.HasSuffix(caller.Name(), "$bound"): - // passing method via someFunc(type.method) - fallthrough - case len(caller.FreeVars) == 0 && strings.HasSuffix(caller.Name(), "$thunk"): - // passing method via someFunc(recv.method) - c.debug(" skip - type is required via call\n") - continue funcLoop - } - switch edge.Site.Common().Value.(type) { - case *ssa.Function: - default: - // called via a parameter or field, type - // is set in stone. - c.debug(" skip - type is required via call\n") - continue funcLoop - } + var calls []*callgraph.Edge + if node := cg.Nodes[fn]; node != nil { + calls = node.In + } + if requiredViaCall(fn, calls) { + c.debug(" skip - type is required via call\n") + continue } if c.multipleImpls(info, fn) { c.debug(" skip - multiple implementations via build tags\n") @@ -214,7 +225,7 @@ funcLoop: } results := fn.Signature.Results() - seenConsts := make([]constant.Value, results.Len()) + seenConsts := make([]*constant.Value, results.Len()) seenParams := make([]*ssa.Parameter, results.Len()) numRets := 0 allRetsExtracting := true @@ -231,9 +242,9 @@ funcLoop: seenParams[i] = nil switch { case numRets == 0: - seenConsts[i] = x.Value + seenConsts[i] = &x.Value case seenConsts[i] == nil: - case !constant.Compare(seenConsts[i], token.EQL, x.Value): + case !eqlConsts(*seenConsts[i], x.Value): seenConsts[i] = nil } case *ssa.Parameter: @@ -258,7 +269,20 @@ funcLoop: numRets++ } for i, val := range seenConsts { - if val == nil || numRets < 2 { + if val == nil { + // no consistent returned constant + continue + } + if *val != nil && numRets == 1 { + // just one non-nil return (too many + // false positives) + continue + } + valStr := "nil" // always returned untyped nil + if *val != nil { + valStr = (*val).String() + } + if calledInReturn(calls) { continue } res := results.At(i) @@ -266,11 +290,10 @@ funcLoop: issues = append(issues, Issue{ pos: res.Pos(), fname: fn.RelString(fn.Package().Pkg), - msg: fmt.Sprintf("result %s is always %s", name, val.String()), + msg: fmt.Sprintf("result %s is always %s", name, valStr), }) } - callers := cg.Nodes[fn].In resLoop: for i := 0; i < results.Len(); i++ { if allRetsExtracting { @@ -284,7 +307,7 @@ funcLoop: continue } count := 0 - for _, edge := range callers { + for _, edge := range calls { val := edge.Site.Value() if val == nil { // e.g. go statement count++ @@ -330,7 +353,7 @@ funcLoop: continue } reason := "is unused" - if valStr := c.receivesSameValues(cg.Nodes[fn].In, par, i); valStr != "" { + if valStr := c.receivesSameValues(calls, par, i); valStr != "" { reason = fmt.Sprintf("always receives %s", valStr) } else if anyRealUse(par, i) { c.debug(" skip - used somewhere in the func body\n") @@ -355,6 +378,53 @@ funcLoop: return issues, nil } +func mainPackages(prog *ssa.Program, wantPkg map[*types.Package]*loader.PackageInfo) ([]*ssa.Package, error) { + mains := make([]*ssa.Package, 0, len(wantPkg)) + for tpkg := range wantPkg { + pkg := prog.Package(tpkg) + if tpkg.Name() == "main" && pkg.Func("main") != nil { + mains = append(mains, pkg) + } + } + if len(mains) == 0 { + return nil, fmt.Errorf("no main packages") + } + return mains, nil +} + +func calledInReturn(calls []*callgraph.Edge) bool { + for _, edge := range calls { + val := edge.Site.Value() + if val == nil { // e.g. go statement + continue + } + refs := *val.Referrers() + if len(refs) == 0 { // no use of return values + continue + } + allReturnExtracts := true + for _, instr := range refs { + switch x := instr.(type) { + case *ssa.Return: + return true + case *ssa.Extract: + refs := *x.Referrers() + if len(refs) != 1 { + allReturnExtracts = false + break + } + if _, ok := refs[0].(*ssa.Return); !ok { + allReturnExtracts = false + } + } + } + if allReturnExtracts { + return true + } + } + return false +} + func nodeStr(node ast.Node) string { var buf bytes.Buffer fset := token.NewFileSet() @@ -395,7 +465,7 @@ func (c *Checker) receivesSameValues(in []*callgraph.Edge, par *ssa.Parameter, p seen = cnst.Value // first constant seenOrig = origArg count = 1 - } else if !constant.Compare(seen, token.EQL, cnst.Value) { + } else if !eqlConsts(seen, cnst.Value) { return "" // different constants } else { count++ @@ -471,8 +541,9 @@ func dummyImpl(blk *ssa.BasicBlock) bool { for _, val := range instr.Operands(ops[:0]) { switch x := (*val).(type) { case nil, *ssa.Const, *ssa.ChangeType, *ssa.Alloc, - *ssa.MakeInterface, *ssa.Function, - *ssa.Global, *ssa.IndexAddr, *ssa.Slice, + *ssa.MakeInterface, *ssa.MakeMap, + *ssa.Function, *ssa.Global, + *ssa.IndexAddr, *ssa.Slice, *ssa.UnOp, *ssa.Parameter: case *ssa.Call: if rxHarmlessCall.MatchString(x.Call.Value.String()) { @@ -552,9 +623,6 @@ func (c *Checker) multipleImpls(info *loader.PackageInfo, fn *ssa.Function) bool return false } path := c.prog.Fset.Position(fn.Pos()).Filename - if path == "" { // generated func, like init - return false - } count := c.declCounts(filepath.Dir(path), info.Pkg.Name()) name := fn.Name() if fn.Signature.Recv() != nil { @@ -601,3 +669,20 @@ func paramDesc(i int, v *types.Var) string { } return fmt.Sprintf("%d (%s)", i, v.Type().String()) } + +func requiredViaCall(fn *ssa.Function, calls []*callgraph.Edge) bool { + for _, edge := range calls { + call := edge.Site.Value() + if receivesExtractedArgs(fn.Signature, call) { + // called via function(results()) + return true + } + _, ok := edge.Site.Common().Value.(*ssa.Function) + if !ok { + // called via a parameter or field, type + // is set in stone. + return true + } + } + return false +} diff --git a/tools/vendor/mvdan.cc/unparam/main.go b/tools/vendor/mvdan.cc/unparam/main.go index 09102c74..bd0f02cc 100644 --- a/tools/vendor/mvdan.cc/unparam/main.go +++ b/tools/vendor/mvdan.cc/unparam/main.go @@ -6,24 +6,34 @@ package main // import "mvdan.cc/unparam" import ( "flag" "fmt" + "go/build" "os" + "golang.org/x/tools/go/buildutil" + "mvdan.cc/unparam/check" ) var ( + algo = flag.String("algo", "cha", `call graph construction algorithm (cha, rta). +in general, use cha for libraries, and rta for programs with main packages.`) tests = flag.Bool("tests", true, "include tests") exported = flag.Bool("exported", false, "inspect exported functions") debug = flag.Bool("debug", false, "debug prints") ) +func init() { + flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", + buildutil.TagsFlagDoc) +} + func main() { flag.Usage = func() { fmt.Fprintln(os.Stderr, "usage: unparam [flags] [package ...]") flag.PrintDefaults() } flag.Parse() - warns, err := check.UnusedParams(*tests, *exported, *debug, flag.Args()...) + warns, err := check.UnusedParams(*tests, *algo, *exported, *debug, flag.Args()...) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) diff --git a/tools/vendor/vendor.json b/tools/vendor/vendor.json index c30494bf..6e8eed99 100644 --- a/tools/vendor/vendor.json +++ b/tools/vendor/vendor.json @@ -9,10 +9,16 @@ "revisionTime": "2017-02-19T07:16:37Z" }, { - "checksumSHA1": "IL9TI69eihi/XUfY/k/uuBRI8WY=", + "checksumSHA1": "LnZqwaKHuOH0bcpDUrqrcGvER/o=", "path": "github.com/GoASTScanner/gas", - "revision": "6de76c92610b387855cdfdd53c99b149928916f7", - "revisionTime": "2017-10-04T14:01:47Z" + "revision": "1d9f816ca5d8224320a72b8aefbdb6f5d3692da6", + "revisionTime": "2018-03-05T12:20:24Z" + }, + { + "checksumSHA1": "Z03LJp4+mkPL1JTZzzizqWdtZSs=", + "path": "github.com/GoASTScanner/gas/cmd/gas", + "revision": "1d9f816ca5d8224320a72b8aefbdb6f5d3692da6", + "revisionTime": "2018-03-05T12:20:24Z" }, { "checksumSHA1": "sK1dOo48F424xLCvE+ic8tRk7i8=", @@ -21,16 +27,16 @@ "revisionTime": "2017-04-11T19:38:53Z" }, { - "checksumSHA1": "7S1Y+wwGdmJi8LnKONzA2GbV78M=", + "checksumSHA1": "ZSTQB9oOviIo0K+41PBciAFINHU=", "path": "github.com/GoASTScanner/gas/output", - "revision": "1beec25f7754273c9672a3368ea7048d4e73138e", - "revisionTime": "2017-04-11T19:38:53Z" + "revision": "1d9f816ca5d8224320a72b8aefbdb6f5d3692da6", + "revisionTime": "2018-03-05T12:20:24Z" }, { - "checksumSHA1": "IkxNgpECcB1DvI3TGPIFixVqRZU=", + "checksumSHA1": "VQoUd/3JzI8CQdaLrAVN723MGZM=", "path": "github.com/GoASTScanner/gas/rules", - "revision": "1beec25f7754273c9672a3368ea7048d4e73138e", - "revisionTime": "2017-04-11T19:38:53Z" + "revision": "1d9f816ca5d8224320a72b8aefbdb6f5d3692da6", + "revisionTime": "2018-03-05T12:20:24Z" }, { "checksumSHA1": "cItvKwnl+gkO2j0Q2964efC+vTw=", @@ -39,10 +45,10 @@ "revisionTime": "2015-02-08T22:17:26Z" }, { - "checksumSHA1": "HL3Dyr4dmbtBo+V3ULfRJMWAyoA=", + "checksumSHA1": "xEwn4Ufny5rQEJ8f9hao7gkd86g=", "path": "github.com/alecthomas/gometalinter", - "revision": "212b1b91e362ea0b0e441c9b53ce31e81405c240", - "revisionTime": "2017-11-26T10:02:12Z" + "revision": "39a4757a714702004d3fdca45ff83af4dc484af9", + "revisionTime": "2018-02-23T20:08:23Z" }, { "checksumSHA1": "fCc3grA7vIxfBru7R3SqjcW+oLI=", @@ -117,10 +123,10 @@ "revisionTime": "2017-06-02T23:41:31Z" }, { - "checksumSHA1": "REm9ddVDL+Pm1OPxjVMxxpIjyw4=", + "checksumSHA1": "SsCcmchQUYbzaKLaJ0zfrd9DdrI=", "path": "github.com/golang/lint/golint", - "revision": "6aaf7c34af0f4c36a57e0c429bace4d706d8e931", - "revisionTime": "2017-10-05T22:33:36Z" + "revision": "fb4f8c1d3a179654f93ef7e91d68fc7b1de6e88f", + "revisionTime": "2018-03-01T17:26:52Z" }, { "checksumSHA1": "e/Kc2UOy1lKAy31xWlK37M1r2e8=", @@ -149,8 +155,8 @@ { "checksumSHA1": "NKvKUGq0lp/GjLS7Ffp7BAjcoTg=", "path": "github.com/kardianos/govendor", - "revision": "d644d03c4cc00cf7c88a365de8508c12ce56c392", - "revisionTime": "2017-11-27T22:34:05Z" + "revision": "c5ee5dc32350319e3423e570818eaa818601b789", + "revisionTime": "2018-02-09T21:39:04Z" }, { "checksumSHA1": "m24kWw3bFoAkKVvTjmxSLsywdHY=", @@ -225,10 +231,10 @@ "revisionTime": "2017-05-06T05:20:04Z" }, { - "checksumSHA1": "GkLbM8KFqtVcp4kzc40+VinQukY=", + "checksumSHA1": "QSxPR3g/AtzGMJSGrdHH6bBQnTc=", "path": "github.com/kisielk/errcheck", - "revision": "b1445a9dd8285a50c6d1661d16f0a9ceb08125f7", - "revisionTime": "2017-09-18T09:31:01Z" + "revision": "8050dd7cc11578becd8622667107bb21a7baf451", + "revisionTime": "2018-03-03T00:00:09Z" }, { "checksumSHA1": "GP25rgIPshJh0tpiBg3Z8Dexqj4=", @@ -243,10 +249,10 @@ "revisionTime": "2016-11-30T08:01:11Z" }, { - "checksumSHA1": "+ArBDwAK19OC1iDy1yEbXOC9sgQ=", + "checksumSHA1": "Us06jbfYQlapYdo8mO94mQMy22o=", "path": "github.com/mattn/goveralls", - "revision": "b71a1e4855f87991aff01c2c833a75a07059c61c", - "revisionTime": "2017-11-14T04:29:57Z" + "revision": "a419d25dbaefa70d50cfbf5fbd2fc2f047bf95d2", + "revisionTime": "2018-03-01T14:36:12Z" }, { "checksumSHA1": "dk0ehYSmMaGLWFQPND3cVgk744I=", @@ -419,14 +425,14 @@ { "checksumSHA1": "eWl/ySoMqPr+Q9p9smYNkTgXu2w=", "path": "github.com/opennota/check/cmd/structcheck", - "revision": "11e2eec79ec4f789607e3efbf405cdca2504d4cb", - "revisionTime": "2017-04-02T03:17:31Z" + "revision": "86da7ade2cccfc1c5d6beeb55e5c65eba54f5f3c", + "revisionTime": "2018-01-21T06:50:09Z" }, { - "checksumSHA1": "Ja04Qe8nP2zw9SYO+TzbebFt5M4=", + "checksumSHA1": "2NeV5byYMgK2g1GLWiqQWwt/OzE=", "path": "github.com/opennota/check/cmd/varcheck", - "revision": "11e2eec79ec4f789607e3efbf405cdca2504d4cb", - "revisionTime": "2017-04-02T03:17:31Z" + "revision": "86da7ade2cccfc1c5d6beeb55e5c65eba54f5f3c", + "revisionTime": "2018-01-21T06:50:09Z" }, { "checksumSHA1": "F1IYMLBLAZaTOWnmXsgaxTGvrWI=", @@ -453,10 +459,28 @@ "revisionTime": "2017-01-28T01:21:29Z" }, { - "checksumSHA1": "2oomtTH6LB5cZZe1xOs4XJV0GTU=", + "checksumSHA1": "PMpzEhKo6usb71Qsby+a8uZMgBw=", "path": "github.com/stripe/safesql", - "revision": "452e37ed794488bd0d99676532f346e03cc6cd2c", - "revisionTime": "2016-03-04T06:18:37Z" + "revision": "cddf355596fe2dbae05b4b5f845b4a6e2fb4e818", + "revisionTime": "2017-12-21T19:52:08Z" + }, + { + "checksumSHA1": "9YtB2Xi9YK/scfhUOjgxmjoaqUw=", + "path": "github.com/tmthrgd/go-bindata", + "revision": "40f4993ede74f673cfe96bed75ef8513a389a00a", + "revisionTime": "2017-11-30T10:15:03Z" + }, + { + "checksumSHA1": "JpZW4NtMSnXZ7T7rug7JEYgeHKc=", + "path": "github.com/tmthrgd/go-bindata/go-bindata", + "revision": "40f4993ede74f673cfe96bed75ef8513a389a00a", + "revisionTime": "2017-11-30T10:15:03Z" + }, + { + "checksumSHA1": "/XExakIFq9PUOjkjlMpe7T/Ps+8=", + "path": "github.com/tmthrgd/go-bindata/internal/identifier", + "revision": "40f4993ede74f673cfe96bed75ef8513a389a00a", + "revisionTime": "2017-11-30T10:15:03Z" }, { "checksumSHA1": "fZaFaXc4iKu9PXl8xrmK3RrZpIY=", @@ -482,6 +506,12 @@ "revision": "8b13b3fbf7312913fcfdbfa78997b9bd1dbb11af", "revisionTime": "2016-07-02T15:04:58Z" }, + { + "checksumSHA1": "S32hhkopTwtHKbri0u4mwxV0UqQ=", + "path": "golang.org/x/lint", + "revision": "fb4f8c1d3a179654f93ef7e91d68fc7b1de6e88f", + "revisionTime": "2018-03-01T17:26:52Z" + }, { "checksumSHA1": "PugQbLLjnbBSj+NOXRYBVRnLuuQ=", "path": "golang.org/x/sys/unix", @@ -503,14 +533,14 @@ { "checksumSHA1": "V4M/6A62nVBzPFxPbN+EAatCrVs=", "path": "golang.org/x/tools/cmd/goimports", - "revision": "36c7af3342056179a831c19ec142f2763b310f7b", - "revisionTime": "2017-11-29T19:51:57Z" + "revision": "9f6d4ad827bbe70b5f5c8db2d3d279ea0a2767ad", + "revisionTime": "2018-02-17T07:00:07Z" }, { "checksumSHA1": "V6/A1ZOZ2GUOZcRWcXegtci2FoU=", "path": "golang.org/x/tools/cmd/gotype", - "revision": "36c7af3342056179a831c19ec142f2763b310f7b", - "revisionTime": "2017-11-29T19:51:57Z" + "revision": "9f6d4ad827bbe70b5f5c8db2d3d279ea0a2767ad", + "revisionTime": "2018-02-17T07:00:07Z" }, { "checksumSHA1": "nD89PLkMqA5CakR8SoDuj3iQz1M=", @@ -548,6 +578,12 @@ "revision": "2a5864fcfb595b4ee9a7607f1beb25778cf64c6e", "revisionTime": "2017-03-22T18:59:57Z" }, + { + "checksumSHA1": "d/01nwqyc48GkZ3eqEOMszzTwBE=", + "path": "golang.org/x/tools/go/callgraph/rta", + "revision": "73e16cff9e0d4a802937444bebb562458548241d", + "revisionTime": "2018-02-27T16:02:18Z" + }, { "checksumSHA1": "rSUfKH182TkCgMhJVsr84a19cbo=", "path": "golang.org/x/tools/go/gcexportdata", @@ -629,26 +665,26 @@ { "checksumSHA1": "YL/UCzWYvDXeFInLOLC1buYve6w=", "path": "honnef.co/go/tools/cmd/gosimple", - "revision": "376b3b58b9e4def403181ee2fd3d4cc7de8375ae", - "revisionTime": "2017-11-25T07:40:24Z" + "revision": "8ed405e85c65fb38745a8eafe01ee9590523f172", + "revisionTime": "2018-01-10T22:45:03Z" }, { "checksumSHA1": "84jyAI0Uv1PQ3fN3Ufi0T7/IpOw=", "path": "honnef.co/go/tools/cmd/megacheck", - "revision": "376b3b58b9e4def403181ee2fd3d4cc7de8375ae", - "revisionTime": "2017-11-25T07:40:24Z" + "revision": "8ed405e85c65fb38745a8eafe01ee9590523f172", + "revisionTime": "2018-01-10T22:45:03Z" }, { "checksumSHA1": "dP4Ft0yiZSTZOzzNho1Gg5b7o2w=", "path": "honnef.co/go/tools/cmd/staticcheck", - "revision": "376b3b58b9e4def403181ee2fd3d4cc7de8375ae", - "revisionTime": "2017-11-25T07:40:24Z" + "revision": "8ed405e85c65fb38745a8eafe01ee9590523f172", + "revisionTime": "2018-01-10T22:45:03Z" }, { "checksumSHA1": "Qipy1/3Z8n4UnoWF9X0sQ/VC5JI=", "path": "honnef.co/go/tools/cmd/unused", - "revision": "376b3b58b9e4def403181ee2fd3d4cc7de8375ae", - "revisionTime": "2017-11-25T07:40:24Z" + "revision": "8ed405e85c65fb38745a8eafe01ee9590523f172", + "revisionTime": "2018-01-10T22:45:03Z" }, { "checksumSHA1": "smQXvyCgi0lsTRk7edZNx/z44rc=", @@ -733,10 +769,10 @@ "revision": "" }, { - "checksumSHA1": "uKQMTzjTI15hy1sTGgsZ/b8PImA=", + "checksumSHA1": "FlLpgONxRMWkHp8H9c461RKJMhQ=", "path": "mvdan.cc/interfacer", - "revision": "d7e7372184a059b8fd99d96a593e3811bf989d75", - "revisionTime": "2017-09-08T18:13:45Z" + "revision": "99221a8084d79b2e7419d4a6ddd9d8c7761eae6c", + "revisionTime": "2018-03-01T11:25:15Z" }, { "checksumSHA1": "0+bmt/m62xZSbyATqBbp1MTy6ZI=", @@ -751,17 +787,17 @@ "revisionTime": "2017-09-08T18:12:59Z" }, { - "checksumSHA1": "TCj3HvSF+NLYG4SPv7Mv4GQ09xE=", + "checksumSHA1": "BX0SRkBmSo6WoyfZtcw4ympOsI8=", "path": "mvdan.cc/unparam", - "revision": "644240604b3c6d719b0f981ef28cd1168962efb9", - "revisionTime": "2017-11-25T20:11:03Z" + "revision": "0c3aec22d8e6d9b51a978b31539c51fd52071488", + "revisionTime": "2018-03-01T11:27:09Z" }, { - "checksumSHA1": "Cdz2FebEWti1vA3eW36lypbYn9w=", + "checksumSHA1": "aN6Bomg+fwd0GSfKYVgmPf0pd+I=", "path": "mvdan.cc/unparam/check", - "revision": "644240604b3c6d719b0f981ef28cd1168962efb9", - "revisionTime": "2017-11-25T20:11:03Z" + "revision": "0c3aec22d8e6d9b51a978b31539c51fd52071488", + "revisionTime": "2018-03-01T11:27:09Z" } ], - "rootPath": "github.com/wrouesnel/self-contained-go-project/tools" + "rootPath": "github.com/wrouesnel/postgres_exporter/tools" } diff --git a/vendor/github.com/dsnet/compress/LICENSE.md b/vendor/github.com/dsnet/compress/LICENSE.md new file mode 100644 index 00000000..945b396c --- /dev/null +++ b/vendor/github.com/dsnet/compress/LICENSE.md @@ -0,0 +1,24 @@ +Copyright © 2015, Joe Tsai and The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. +* Neither the copyright holder nor the names of its contributors may be used to +endorse or promote products derived from this software without specific prior +written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/dsnet/compress/README.md b/vendor/github.com/dsnet/compress/README.md new file mode 100644 index 00000000..f2569441 --- /dev/null +++ b/vendor/github.com/dsnet/compress/README.md @@ -0,0 +1,75 @@ +# Collection of compression libraries for Go # + +[![GoDoc](https://godoc.org/github.com/dsnet/compress/cmp?status.svg)](https://godoc.org/github.com/dsnet/compress) +[![Build Status](https://travis-ci.org/dsnet/compress.svg?branch=master)](https://travis-ci.org/dsnet/compress) +[![Report Card](https://goreportcard.com/badge/github.com/dsnet/compress)](https://goreportcard.com/report/github.com/dsnet/compress) + +## Introduction ## + +**NOTE: This library is in active development. As such, there are no guarantees about the stability of the API. The author reserves the right to arbitrarily break the API for any reason.** + +This repository hosts a collection of compression related libraries. The goal of this project is to provide pure Go implementations for popular compression algorithms beyond what the Go standard library provides. The goals for these packages are as follows: +* Maintainable: That the code remains well documented, well tested, readable, easy to maintain, and easy to verify that it conforms to the specification for the format being implemented. +* Performant: To be able to compress and decompress within at least 80% of the rates that the C implementations are able to achieve. +* Flexible: That the code provides low-level and fine granularity control over the compression streams similar to what the C APIs would provide. + +Of these three, the first objective is often at odds with the other two objectives and provides interesting challenges. Higher performance can often be achieved by muddling abstraction layers or using non-intuitive low-level primitives. Also, more features and functionality, while useful in some situations, often complicates the API. Thus, this package will attempt to satisfy all the goals, but will defer to favoring maintainability when the performance or flexibility benefits are not significant enough. + + +## Library Status ## + +For the packages available, only some features are currently implemented: + +| Package | Reader | Writer | +| ------- | :----: | :----: | +| brotli | :white_check_mark: | | +| bzip2 | :white_check_mark: | :white_check_mark: | +| flate | :white_check_mark: | | +| xflate | :white_check_mark: | :white_check_mark: | + +This library is in active development. As such, there are no guarantees about the stability of the API. The author reserves the right to arbitrarily break the API for any reason. When the library becomes more mature, it is planned to eventually conform to some strict versioning scheme like [Semantic Versioning](http://semver.org/). + +However, in the meanwhile, this library does provide some basic API guarantees. For the types defined below, the method signatures are guaranteed to not change. Note that the author still reserves the right to change the fields within each ```Reader``` and ```Writer``` structs. +```go +type ReaderConfig struct { ... } +type Reader struct { ... } + func NewReader(io.Reader, *ReaderConfig) (*Reader, error) { ... } + func (*Reader) Read([]byte) (int, error) { ... } + func (*Reader) Close() error { ... } + +type WriterConfig struct { ... } +type Writer struct { ... } + func NewWriter(io.Writer, *WriterConfig) (*Writer, error) { ... } + func (*Writer) Write([]byte) (int, error) { ... } + func (*Writer) Close() error { ... } +``` + +To see what work still remains, see the [Task List](https://github.com/dsnet/compress/wiki/Task-List). + +## Performance ## + +See [Performance Metrics](https://github.com/dsnet/compress/wiki/Performance-Metrics). + + +## Frequently Asked Questions ## + +See [Frequently Asked Questions](https://github.com/dsnet/compress/wiki/Frequently-Asked-Questions). + + +## Installation ## + +Run the command: + +```go get -u github.com/dsnet/compress``` + +This library requires `Go1.7` or higher in order to build. + + +## Packages ## + +| Package | Description | +| :------ | :---------- | +| [brotli](http://godoc.org/github.com/dsnet/compress/brotli) | Package brotli implements the Brotli format, described in RFC 7932. | +| [bzip2](http://godoc.org/github.com/dsnet/compress/bzip2) | Package bzip2 implements the BZip2 compressed data format. | +| [flate](http://godoc.org/github.com/dsnet/compress/flate) | Package flate implements the DEFLATE format, described in RFC 1951. | +| [xflate](http://godoc.org/github.com/dsnet/compress/xflate) | Package xflate implements the XFLATE format, an random-access extension to DEFLATE. | diff --git a/vendor/github.com/dsnet/compress/api.go b/vendor/github.com/dsnet/compress/api.go new file mode 100644 index 00000000..f80a9232 --- /dev/null +++ b/vendor/github.com/dsnet/compress/api.go @@ -0,0 +1,74 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package compress is a collection of compression libraries. +package compress + +import ( + "bufio" + "io" + + "github.com/dsnet/compress/internal/errors" +) + +// The Error interface identifies all compression related errors. +type Error interface { + error + CompressError() + + // IsDeprecated reports the use of a deprecated and unsupported feature. + IsDeprecated() bool + + // IsCorrupted reports whether the input stream was corrupted. + IsCorrupted() bool +} + +var _ Error = errors.Error{} + +// ByteReader is an interface accepted by all decompression Readers. +// It guarantees that the decompressor never reads more data than is necessary +// from the underlying io.Reader. +type ByteReader interface { + io.Reader + io.ByteReader +} + +var _ ByteReader = (*bufio.Reader)(nil) + +// BufferedReader is an interface accepted by all decompression Readers. +// It guarantees that the decompressor never reads more data than is necessary +// from the underlying io.Reader. Since BufferedReader allows a decompressor +// to peek at bytes further along in the stream without advancing the read +// pointer, decompression can experience a significant performance gain when +// provided a reader that satisfies this interface. Thus, a decompressor will +// prefer this interface over ByteReader for performance reasons. +// +// The bufio.Reader satisfies this interface. +type BufferedReader interface { + io.Reader + + // Buffered returns the number of bytes currently buffered. + // + // This value becomes invalid following the next Read/Discard operation. + Buffered() int + + // Peek returns the next n bytes without advancing the reader. + // + // If Peek returns fewer than n bytes, it also returns an error explaining + // why the peek is short. Peek must support peeking of at least 8 bytes. + // If 0 <= n <= Buffered(), Peek is guaranteed to succeed without reading + // from the underlying io.Reader. + // + // This result becomes invalid following the next Read/Discard operation. + Peek(n int) ([]byte, error) + + // Discard skips the next n bytes, returning the number of bytes discarded. + // + // If Discard skips fewer than n bytes, it also returns an error. + // If 0 <= n <= Buffered(), Discard is guaranteed to succeed without reading + // from the underlying io.Reader. + Discard(n int) (int, error) +} + +var _ BufferedReader = (*bufio.Reader)(nil) diff --git a/vendor/github.com/dsnet/compress/bzip2/bwt.go b/vendor/github.com/dsnet/compress/bzip2/bwt.go new file mode 100644 index 00000000..44a2541f --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/bwt.go @@ -0,0 +1,110 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package bzip2 + +import "github.com/dsnet/compress/bzip2/internal/sais" + +// The Burrows-Wheeler Transform implementation used here is based on the +// Suffix Array by Induced Sorting (SA-IS) methodology by Nong, Zhang, and Chan. +// This implementation uses the sais algorithm originally written by Yuta Mori. +// +// The SA-IS algorithm runs in O(n) and outputs a Suffix Array. There is a +// mathematical relationship between Suffix Arrays and the Burrows-Wheeler +// Transform, such that a SA can be converted to a BWT in O(n) time. +// +// References: +// http://www.hpl.hp.com/techreports/Compaq-DEC/SRC-RR-124.pdf +// https://github.com/cscott/compressjs/blob/master/lib/BWT.js +// https://www.quora.com/How-can-I-optimize-burrows-wheeler-transform-and-inverse-transform-to-work-in-O-n-time-O-n-space +type burrowsWheelerTransform struct { + buf []byte + sa []int + perm []uint32 +} + +func (bwt *burrowsWheelerTransform) Encode(buf []byte) (ptr int) { + if len(buf) == 0 { + return -1 + } + + // TODO(dsnet): Find a way to avoid the duplicate input string method. + // We only need to do this because suffix arrays (by definition) only + // operate non-wrapped suffixes of a string. On the other hand, + // the BWT specifically used in bzip2 operate on a strings that wrap-around + // when being sorted. + + // Step 1: Concatenate the input string to itself so that we can use the + // suffix array algorithm for bzip2's variant of BWT. + n := len(buf) + bwt.buf = append(append(bwt.buf[:0], buf...), buf...) + if cap(bwt.sa) < 2*n { + bwt.sa = make([]int, 2*n) + } + t := bwt.buf[:2*n] + sa := bwt.sa[:2*n] + + // Step 2: Compute the suffix array (SA). The input string, t, will not be + // modified, while the results will be written to the output, sa. + sais.ComputeSA(t, sa) + + // Step 3: Convert the SA to a BWT. Since ComputeSA does not mutate the + // input, we have two copies of the input; in buf and buf2. Thus, we write + // the transformation to buf, while using buf2. + var j int + buf2 := t[n:] + for _, i := range sa { + if i < n { + if i == 0 { + ptr = j + i = n + } + buf[j] = buf2[i-1] + j++ + } + } + return ptr +} + +func (bwt *burrowsWheelerTransform) Decode(buf []byte, ptr int) { + if len(buf) == 0 { + return + } + + // Step 1: Compute cumm, where cumm[ch] reports the total number of + // characters that precede the character ch in the alphabet. + var cumm [256]int + for _, v := range buf { + cumm[v]++ + } + var sum int + for i, v := range cumm { + cumm[i] = sum + sum += v + } + + // Step 2: Compute perm, where perm[ptr] contains a pointer to the next + // byte in buf and the next pointer in perm itself. + if cap(bwt.perm) < len(buf) { + bwt.perm = make([]uint32, len(buf)) + } + perm := bwt.perm[:len(buf)] + for i, b := range buf { + perm[cumm[b]] = uint32(i) + cumm[b]++ + } + + // Step 3: Follow each pointer in perm to the next byte, starting with the + // origin pointer. + if cap(bwt.buf) < len(buf) { + bwt.buf = make([]byte, len(buf)) + } + buf2 := bwt.buf[:len(buf)] + i := perm[ptr] + for j := range buf2 { + buf2[j] = buf[i] + i = perm[i] + } + copy(buf, buf2) +} diff --git a/vendor/github.com/dsnet/compress/bzip2/common.go b/vendor/github.com/dsnet/compress/bzip2/common.go new file mode 100644 index 00000000..c6339815 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/common.go @@ -0,0 +1,110 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package bzip2 implements the BZip2 compressed data format. +// +// Canonical C implementation: +// http://bzip.org +// +// Unofficial format specification: +// https://github.com/dsnet/compress/blob/master/doc/bzip2-format.pdf +package bzip2 + +import ( + "fmt" + "hash/crc32" + + "github.com/dsnet/compress/internal" + "github.com/dsnet/compress/internal/errors" +) + +// There does not exist a formal specification of the BZip2 format. As such, +// much of this work is derived by either reverse engineering the original C +// source code or using secondary sources. +// +// Significant amounts of fuzz testing is done to ensure that outputs from +// this package is properly decoded by the C library. Furthermore, we test that +// both this package and the C library agree about what inputs are invalid. +// +// Compression stack: +// Run-length encoding 1 (RLE1) +// Burrows-Wheeler transform (BWT) +// Move-to-front transform (MTF) +// Run-length encoding 2 (RLE2) +// Prefix encoding (PE) +// +// References: +// http://bzip.org/ +// https://en.wikipedia.org/wiki/Bzip2 +// https://code.google.com/p/jbzip2/ + +const ( + BestSpeed = 1 + BestCompression = 9 + DefaultCompression = 6 +) + +const ( + hdrMagic = 0x425a // Hex of "BZ" + blkMagic = 0x314159265359 // BCD of PI + endMagic = 0x177245385090 // BCD of sqrt(PI) + + blockSize = 100000 +) + +func errorf(c int, f string, a ...interface{}) error { + return errors.Error{Code: c, Pkg: "bzip2", Msg: fmt.Sprintf(f, a...)} +} + +func panicf(c int, f string, a ...interface{}) { + errors.Panic(errorf(c, f, a...)) +} + +// errWrap converts a lower-level errors.Error to be one from this package. +// The replaceCode passed in will be used to replace the code for any errors +// with the errors.Invalid code. +// +// For the Reader, set this to errors.Corrupted. +// For the Writer, set this to errors.Internal. +func errWrap(err error, replaceCode int) error { + if cerr, ok := err.(errors.Error); ok { + if errors.IsInvalid(cerr) { + cerr.Code = replaceCode + } + err = errorf(cerr.Code, "%s", cerr.Msg) + } + return err +} + +var errClosed = errorf(errors.Closed, "") + +// crc computes the CRC-32 used by BZip2. +// +// The CRC-32 computation in bzip2 treats bytes as having bits in big-endian +// order. That is, the MSB is read before the LSB. Thus, we can use the +// standard library version of CRC-32 IEEE with some minor adjustments. +// +// The byte array is used as an intermediate buffer to swap the bits of every +// byte of the input. +type crc struct { + val uint32 + buf [256]byte +} + +// update computes the CRC-32 of appending buf to c. +func (c *crc) update(buf []byte) { + cval := internal.ReverseUint32(c.val) + for len(buf) > 0 { + n := len(buf) + if n > len(c.buf) { + n = len(c.buf) + } + for i, b := range buf[:n] { + c.buf[i] = internal.ReverseLUT[b] + } + cval = crc32.Update(cval, crc32.IEEETable, c.buf[:n]) + buf = buf[n:] + } + c.val = internal.ReverseUint32(cval) +} diff --git a/vendor/github.com/dsnet/compress/bzip2/fuzz_off.go b/vendor/github.com/dsnet/compress/bzip2/fuzz_off.go new file mode 100644 index 00000000..ddd32f50 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/fuzz_off.go @@ -0,0 +1,13 @@ +// Copyright 2016, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !gofuzz + +// This file exists to suppress fuzzing details from release builds. + +package bzip2 + +type fuzzReader struct{} + +func (*fuzzReader) updateChecksum(int64, uint32) {} diff --git a/vendor/github.com/dsnet/compress/bzip2/fuzz_on.go b/vendor/github.com/dsnet/compress/bzip2/fuzz_on.go new file mode 100644 index 00000000..54122351 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/fuzz_on.go @@ -0,0 +1,77 @@ +// Copyright 2016, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build gofuzz + +// This file exists to export internal implementation details for fuzz testing. + +package bzip2 + +func ForwardBWT(buf []byte) (ptr int) { + var bwt burrowsWheelerTransform + return bwt.Encode(buf) +} + +func ReverseBWT(buf []byte, ptr int) { + var bwt burrowsWheelerTransform + bwt.Decode(buf, ptr) +} + +type fuzzReader struct { + Checksums Checksums +} + +// updateChecksum updates Checksums. +// +// If a valid pos is provided, it appends the (pos, val) pair to the slice. +// Otherwise, it will update the last record with the new value. +func (fr *fuzzReader) updateChecksum(pos int64, val uint32) { + if pos >= 0 { + fr.Checksums = append(fr.Checksums, Checksum{pos, val}) + } else { + fr.Checksums[len(fr.Checksums)-1].Value = val + } +} + +type Checksum struct { + Offset int64 // Bit offset of the checksum + Value uint32 // Checksum value +} + +type Checksums []Checksum + +// Apply overwrites all checksum fields in d with the ones in cs. +func (cs Checksums) Apply(d []byte) []byte { + d = append([]byte(nil), d...) + for _, c := range cs { + setU32(d, c.Offset, c.Value) + } + return d +} + +func setU32(d []byte, pos int64, val uint32) { + for i := uint(0); i < 32; i++ { + bpos := uint64(pos) + uint64(i) + d[bpos/8] &= ^byte(1 << (7 - bpos%8)) + d[bpos/8] |= byte(val>>(31-i)) << (7 - bpos%8) + } +} + +// Verify checks that all checksum fields in d matches those in cs. +func (cs Checksums) Verify(d []byte) bool { + for _, c := range cs { + if getU32(d, c.Offset) != c.Value { + return false + } + } + return true +} + +func getU32(d []byte, pos int64) (val uint32) { + for i := uint(0); i < 32; i++ { + bpos := uint64(pos) + uint64(i) + val |= (uint32(d[bpos/8] >> (7 - bpos%8))) << (31 - i) + } + return val +} diff --git a/vendor/github.com/dsnet/compress/bzip2/internal/sais/common.go b/vendor/github.com/dsnet/compress/bzip2/internal/sais/common.go new file mode 100644 index 00000000..cd4eee82 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/internal/sais/common.go @@ -0,0 +1,28 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package sais implements a linear time suffix array algorithm. +package sais + +//go:generate go run sais_gen.go byte sais_byte.go +//go:generate go run sais_gen.go int sais_int.go + +// This package ports the C sais implementation by Yuta Mori. The ports are +// located in sais_byte.go and sais_int.go, which are identical to each other +// except for the types. Since Go does not support generics, we use generators to +// create the two files. +// +// References: +// https://sites.google.com/site/yuta256/sais +// https://www.researchgate.net/publication/221313676_Linear_Time_Suffix_Array_Construction_Using_D-Critical_Substrings +// https://www.researchgate.net/publication/224176324_Two_Efficient_Algorithms_for_Linear_Time_Suffix_Array_Construction + +// ComputeSA computes the suffix array of t and places the result in sa. +// Both t and sa must be the same length. +func ComputeSA(t []byte, sa []int) { + if len(sa) != len(t) { + panic("mismatching sizes") + } + computeSA_byte(t, sa, 0, len(t), 256) +} diff --git a/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_byte.go b/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_byte.go new file mode 100644 index 00000000..01b8529b --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_byte.go @@ -0,0 +1,661 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Code generated by sais_gen.go. DO NOT EDIT. + +// ==================================================== +// Copyright (c) 2008-2010 Yuta Mori All Rights Reserved. +// +// Permission is hereby granted, free of charge, to any person +// obtaining a copy of this software and associated documentation +// files (the "Software"), to deal in the Software without +// restriction, including without limitation the rights to use, +// copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +// ==================================================== + +package sais + +func getCounts_byte(T []byte, C []int, n, k int) { + var i int + for i = 0; i < k; i++ { + C[i] = 0 + } + for i = 0; i < n; i++ { + C[T[i]]++ + } +} + +func getBuckets_byte(C, B []int, k int, end bool) { + var i, sum int + if end { + for i = 0; i < k; i++ { + sum += C[i] + B[i] = sum + } + } else { + for i = 0; i < k; i++ { + sum += C[i] + B[i] = sum - C[i] + } + } +} + +func sortLMS1_byte(T []byte, SA, C, B []int, n, k int) { + var b, i, j int + var c0, c1 int + + // Compute SAl. + if &C[0] == &B[0] { + getCounts_byte(T, C, n, k) + } + getBuckets_byte(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + j-- + if int(T[j]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i = 0; i < n; i++ { + if j = SA[i]; j > 0 { + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + if int(T[j]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + SA[i] = 0 + } else if j < 0 { + SA[i] = ^j + } + } + + // Compute SAs. + if &C[0] == &B[0] { + getCounts_byte(T, C, n, k) + } + getBuckets_byte(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i = n - 1; i >= 0; i-- { + if j = SA[i]; j > 0 { + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + b-- + if int(T[j]) > c1 { + SA[b] = ^(j + 1) + } else { + SA[b] = j + } + SA[i] = 0 + } + } +} + +func postProcLMS1_byte(T []byte, SA []int, n, m int) int { + var i, j, p, q, plen, qlen, name int + var c0, c1 int + var diff bool + + // Compact all the sorted substrings into the first m items of SA. + // 2*m must be not larger than n (provable). + for i = 0; SA[i] < 0; i++ { + SA[i] = ^SA[i] + } + if i < m { + for j, i = i, i+1; ; i++ { + if p = SA[i]; p < 0 { + SA[j] = ^p + j++ + SA[i] = 0 + if j == m { + break + } + } + } + } + + // Store the length of all substrings. + i = n - 1 + j = n - 1 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + SA[m+((i+1)>>1)] = j - i + j = i + 1 + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + + // Find the lexicographic names of all substrings. + name = 0 + qlen = 0 + for i, q = 0, n; i < m; i++ { + p = SA[i] + plen = SA[m+(p>>1)] + diff = true + if (plen == qlen) && ((q + plen) < n) { + for j = 0; (j < plen) && (T[p+j] == T[q+j]); j++ { + } + if j == plen { + diff = false + } + } + if diff { + name++ + q = p + qlen = plen + } + SA[m+(p>>1)] = name + } + return name +} + +func sortLMS2_byte(T []byte, SA, C, B, D []int, n, k int) { + var b, i, j, t, d int + var c0, c1 int + + // Compute SAl. + getBuckets_byte(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + j-- + if int(T[j]) < c1 { + t = 1 + } else { + t = 0 + } + j += n + if t&1 > 0 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i, d = 0, 0; i < n; i++ { + if j = SA[i]; j > 0 { + if n <= j { + d += 1 + j -= n + } + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + t = int(c0) << 1 + if int(T[j]) < c1 { + t |= 1 + } + if D[t] != d { + j += n + D[t] = d + } + if t&1 > 0 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + SA[i] = 0 + } else if j < 0 { + SA[i] = ^j + } + } + for i = n - 1; 0 <= i; i-- { + if SA[i] > 0 { + if SA[i] < n { + SA[i] += n + for j = i - 1; SA[j] < n; j-- { + } + SA[j] -= n + i = j + } + } + } + + // Compute SAs. + getBuckets_byte(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i, d = n-1, d+1; i >= 0; i-- { + if j = SA[i]; j > 0 { + if n <= j { + d += 1 + j -= n + } + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + t = int(c0) << 1 + if int(T[j]) > c1 { + t |= 1 + } + if D[t] != d { + j += n + D[t] = d + } + b-- + if t&1 > 0 { + SA[b] = ^(j + 1) + } else { + SA[b] = j + } + SA[i] = 0 + } + } +} + +func postProcLMS2_byte(SA []int, n, m int) int { + var i, j, d, name int + + // Compact all the sorted LMS substrings into the first m items of SA. + name = 0 + for i = 0; SA[i] < 0; i++ { + j = ^SA[i] + if n <= j { + name += 1 + } + SA[i] = j + } + if i < m { + for d, i = i, i+1; ; i++ { + if j = SA[i]; j < 0 { + j = ^j + if n <= j { + name += 1 + } + SA[d] = j + d++ + SA[i] = 0 + if d == m { + break + } + } + } + } + if name < m { + // Store the lexicographic names. + for i, d = m-1, name+1; 0 <= i; i-- { + if j = SA[i]; n <= j { + j -= n + d-- + } + SA[m+(j>>1)] = d + } + } else { + // Unset flags. + for i = 0; i < m; i++ { + if j = SA[i]; n <= j { + j -= n + SA[i] = j + } + } + } + return name +} + +func induceSA_byte(T []byte, SA, C, B []int, n, k int) { + var b, i, j int + var c0, c1 int + + // Compute SAl. + if &C[0] == &B[0] { + getCounts_byte(T, C, n, k) + } + getBuckets_byte(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + if j > 0 && int(T[j-1]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i = 0; i < n; i++ { + j = SA[i] + SA[i] = ^j + if j > 0 { + j-- + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + if j > 0 && int(T[j-1]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + } + } + + // Compute SAs. + if &C[0] == &B[0] { + getCounts_byte(T, C, n, k) + } + getBuckets_byte(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i = n - 1; i >= 0; i-- { + if j = SA[i]; j > 0 { + j-- + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + b-- + if (j == 0) || (int(T[j-1]) > c1) { + SA[b] = ^j + } else { + SA[b] = j + } + } else { + SA[i] = ^j + } + } +} + +func computeSA_byte(T []byte, SA []int, fs, n, k int) { + const ( + minBucketSize = 512 + sortLMS2Limit = 0x3fffffff + ) + + var C, B, D, RA []int + var bo int // Offset of B relative to SA + var b, i, j, m, p, q, name, newfs int + var c0, c1 int + var flags uint + + if k <= minBucketSize { + C = make([]int, k) + if k <= fs { + bo = n + fs - k + B = SA[bo:] + flags = 1 + } else { + B = make([]int, k) + flags = 3 + } + } else if k <= fs { + C = SA[n+fs-k:] + if k <= fs-k { + bo = n + fs - 2*k + B = SA[bo:] + flags = 0 + } else if k <= 4*minBucketSize { + B = make([]int, k) + flags = 2 + } else { + B = C + flags = 8 + } + } else { + C = make([]int, k) + B = C + flags = 4 | 8 + } + if n <= sortLMS2Limit && 2 <= (n/k) { + if flags&1 > 0 { + if 2*k <= fs-k { + flags |= 32 + } else { + flags |= 16 + } + } else if flags == 0 && 2*k <= (fs-2*k) { + flags |= 32 + } + } + + // Stage 1: Reduce the problem by at least 1/2. + // Sort all the LMS-substrings. + getCounts_byte(T, C, n, k) + getBuckets_byte(C, B, k, true) // Find ends of buckets + for i = 0; i < n; i++ { + SA[i] = 0 + } + b = -1 + i = n - 1 + j = n + m = 0 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + if b >= 0 { + SA[b] = j + } + B[c1]-- + b = B[c1] + j = i + m++ + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + + if m > 1 { + if flags&(16|32) > 0 { + if flags&16 > 0 { + D = make([]int, 2*k) + } else { + D = SA[bo-2*k:] + } + B[T[j+1]]++ + for i, j = 0, 0; i < k; i++ { + j += C[i] + if B[i] != j { + SA[B[i]] += n + } + D[i] = 0 + D[i+k] = 0 + } + sortLMS2_byte(T, SA, C, B, D, n, k) + name = postProcLMS2_byte(SA, n, m) + } else { + sortLMS1_byte(T, SA, C, B, n, k) + name = postProcLMS1_byte(T, SA, n, m) + } + } else if m == 1 { + SA[b] = j + 1 + name = 1 + } else { + name = 0 + } + + // Stage 2: Solve the reduced problem. + // Recurse if names are not yet unique. + if name < m { + newfs = n + fs - 2*m + if flags&(1|4|8) == 0 { + if k+name <= newfs { + newfs -= k + } else { + flags |= 8 + } + } + RA = SA[m+newfs:] + for i, j = m+(n>>1)-1, m-1; m <= i; i-- { + if SA[i] != 0 { + RA[j] = SA[i] - 1 + j-- + } + } + computeSA_int(RA, SA, newfs, m, name) + + i = n - 1 + j = m - 1 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + RA[j] = i + 1 + j-- + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + for i = 0; i < m; i++ { + SA[i] = RA[SA[i]] + } + if flags&4 > 0 { + B = make([]int, k) + C = B + } + if flags&2 > 0 { + B = make([]int, k) + } + } + + // Stage 3: Induce the result for the original problem. + if flags&8 > 0 { + getCounts_byte(T, C, n, k) + } + // Put all left-most S characters into their buckets. + if m > 1 { + getBuckets_byte(C, B, k, true) // Find ends of buckets + i = m - 1 + j = n + p = SA[m-1] + c1 = int(T[p]) + for { + c0 = c1 + q = B[c0] + for q < j { + j-- + SA[j] = 0 + } + for { + j-- + SA[j] = p + if i--; i < 0 { + break + } + p = SA[i] + if c1 = int(T[p]); c1 != c0 { + break + } + } + if i < 0 { + break + } + } + for j > 0 { + j-- + SA[j] = 0 + } + } + induceSA_byte(T, SA, C, B, n, k) +} diff --git a/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_int.go b/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_int.go new file mode 100644 index 00000000..280682f0 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_int.go @@ -0,0 +1,661 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Code generated by sais_gen.go. DO NOT EDIT. + +// ==================================================== +// Copyright (c) 2008-2010 Yuta Mori All Rights Reserved. +// +// Permission is hereby granted, free of charge, to any person +// obtaining a copy of this software and associated documentation +// files (the "Software"), to deal in the Software without +// restriction, including without limitation the rights to use, +// copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +// ==================================================== + +package sais + +func getCounts_int(T []int, C []int, n, k int) { + var i int + for i = 0; i < k; i++ { + C[i] = 0 + } + for i = 0; i < n; i++ { + C[T[i]]++ + } +} + +func getBuckets_int(C, B []int, k int, end bool) { + var i, sum int + if end { + for i = 0; i < k; i++ { + sum += C[i] + B[i] = sum + } + } else { + for i = 0; i < k; i++ { + sum += C[i] + B[i] = sum - C[i] + } + } +} + +func sortLMS1_int(T []int, SA, C, B []int, n, k int) { + var b, i, j int + var c0, c1 int + + // Compute SAl. + if &C[0] == &B[0] { + getCounts_int(T, C, n, k) + } + getBuckets_int(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + j-- + if int(T[j]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i = 0; i < n; i++ { + if j = SA[i]; j > 0 { + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + if int(T[j]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + SA[i] = 0 + } else if j < 0 { + SA[i] = ^j + } + } + + // Compute SAs. + if &C[0] == &B[0] { + getCounts_int(T, C, n, k) + } + getBuckets_int(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i = n - 1; i >= 0; i-- { + if j = SA[i]; j > 0 { + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + b-- + if int(T[j]) > c1 { + SA[b] = ^(j + 1) + } else { + SA[b] = j + } + SA[i] = 0 + } + } +} + +func postProcLMS1_int(T []int, SA []int, n, m int) int { + var i, j, p, q, plen, qlen, name int + var c0, c1 int + var diff bool + + // Compact all the sorted substrings into the first m items of SA. + // 2*m must be not larger than n (provable). + for i = 0; SA[i] < 0; i++ { + SA[i] = ^SA[i] + } + if i < m { + for j, i = i, i+1; ; i++ { + if p = SA[i]; p < 0 { + SA[j] = ^p + j++ + SA[i] = 0 + if j == m { + break + } + } + } + } + + // Store the length of all substrings. + i = n - 1 + j = n - 1 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + SA[m+((i+1)>>1)] = j - i + j = i + 1 + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + + // Find the lexicographic names of all substrings. + name = 0 + qlen = 0 + for i, q = 0, n; i < m; i++ { + p = SA[i] + plen = SA[m+(p>>1)] + diff = true + if (plen == qlen) && ((q + plen) < n) { + for j = 0; (j < plen) && (T[p+j] == T[q+j]); j++ { + } + if j == plen { + diff = false + } + } + if diff { + name++ + q = p + qlen = plen + } + SA[m+(p>>1)] = name + } + return name +} + +func sortLMS2_int(T []int, SA, C, B, D []int, n, k int) { + var b, i, j, t, d int + var c0, c1 int + + // Compute SAl. + getBuckets_int(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + j-- + if int(T[j]) < c1 { + t = 1 + } else { + t = 0 + } + j += n + if t&1 > 0 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i, d = 0, 0; i < n; i++ { + if j = SA[i]; j > 0 { + if n <= j { + d += 1 + j -= n + } + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + t = int(c0) << 1 + if int(T[j]) < c1 { + t |= 1 + } + if D[t] != d { + j += n + D[t] = d + } + if t&1 > 0 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + SA[i] = 0 + } else if j < 0 { + SA[i] = ^j + } + } + for i = n - 1; 0 <= i; i-- { + if SA[i] > 0 { + if SA[i] < n { + SA[i] += n + for j = i - 1; SA[j] < n; j-- { + } + SA[j] -= n + i = j + } + } + } + + // Compute SAs. + getBuckets_int(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i, d = n-1, d+1; i >= 0; i-- { + if j = SA[i]; j > 0 { + if n <= j { + d += 1 + j -= n + } + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + t = int(c0) << 1 + if int(T[j]) > c1 { + t |= 1 + } + if D[t] != d { + j += n + D[t] = d + } + b-- + if t&1 > 0 { + SA[b] = ^(j + 1) + } else { + SA[b] = j + } + SA[i] = 0 + } + } +} + +func postProcLMS2_int(SA []int, n, m int) int { + var i, j, d, name int + + // Compact all the sorted LMS substrings into the first m items of SA. + name = 0 + for i = 0; SA[i] < 0; i++ { + j = ^SA[i] + if n <= j { + name += 1 + } + SA[i] = j + } + if i < m { + for d, i = i, i+1; ; i++ { + if j = SA[i]; j < 0 { + j = ^j + if n <= j { + name += 1 + } + SA[d] = j + d++ + SA[i] = 0 + if d == m { + break + } + } + } + } + if name < m { + // Store the lexicographic names. + for i, d = m-1, name+1; 0 <= i; i-- { + if j = SA[i]; n <= j { + j -= n + d-- + } + SA[m+(j>>1)] = d + } + } else { + // Unset flags. + for i = 0; i < m; i++ { + if j = SA[i]; n <= j { + j -= n + SA[i] = j + } + } + } + return name +} + +func induceSA_int(T []int, SA, C, B []int, n, k int) { + var b, i, j int + var c0, c1 int + + // Compute SAl. + if &C[0] == &B[0] { + getCounts_int(T, C, n, k) + } + getBuckets_int(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + if j > 0 && int(T[j-1]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i = 0; i < n; i++ { + j = SA[i] + SA[i] = ^j + if j > 0 { + j-- + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + if j > 0 && int(T[j-1]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + } + } + + // Compute SAs. + if &C[0] == &B[0] { + getCounts_int(T, C, n, k) + } + getBuckets_int(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i = n - 1; i >= 0; i-- { + if j = SA[i]; j > 0 { + j-- + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + b-- + if (j == 0) || (int(T[j-1]) > c1) { + SA[b] = ^j + } else { + SA[b] = j + } + } else { + SA[i] = ^j + } + } +} + +func computeSA_int(T []int, SA []int, fs, n, k int) { + const ( + minBucketSize = 512 + sortLMS2Limit = 0x3fffffff + ) + + var C, B, D, RA []int + var bo int // Offset of B relative to SA + var b, i, j, m, p, q, name, newfs int + var c0, c1 int + var flags uint + + if k <= minBucketSize { + C = make([]int, k) + if k <= fs { + bo = n + fs - k + B = SA[bo:] + flags = 1 + } else { + B = make([]int, k) + flags = 3 + } + } else if k <= fs { + C = SA[n+fs-k:] + if k <= fs-k { + bo = n + fs - 2*k + B = SA[bo:] + flags = 0 + } else if k <= 4*minBucketSize { + B = make([]int, k) + flags = 2 + } else { + B = C + flags = 8 + } + } else { + C = make([]int, k) + B = C + flags = 4 | 8 + } + if n <= sortLMS2Limit && 2 <= (n/k) { + if flags&1 > 0 { + if 2*k <= fs-k { + flags |= 32 + } else { + flags |= 16 + } + } else if flags == 0 && 2*k <= (fs-2*k) { + flags |= 32 + } + } + + // Stage 1: Reduce the problem by at least 1/2. + // Sort all the LMS-substrings. + getCounts_int(T, C, n, k) + getBuckets_int(C, B, k, true) // Find ends of buckets + for i = 0; i < n; i++ { + SA[i] = 0 + } + b = -1 + i = n - 1 + j = n + m = 0 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + if b >= 0 { + SA[b] = j + } + B[c1]-- + b = B[c1] + j = i + m++ + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + + if m > 1 { + if flags&(16|32) > 0 { + if flags&16 > 0 { + D = make([]int, 2*k) + } else { + D = SA[bo-2*k:] + } + B[T[j+1]]++ + for i, j = 0, 0; i < k; i++ { + j += C[i] + if B[i] != j { + SA[B[i]] += n + } + D[i] = 0 + D[i+k] = 0 + } + sortLMS2_int(T, SA, C, B, D, n, k) + name = postProcLMS2_int(SA, n, m) + } else { + sortLMS1_int(T, SA, C, B, n, k) + name = postProcLMS1_int(T, SA, n, m) + } + } else if m == 1 { + SA[b] = j + 1 + name = 1 + } else { + name = 0 + } + + // Stage 2: Solve the reduced problem. + // Recurse if names are not yet unique. + if name < m { + newfs = n + fs - 2*m + if flags&(1|4|8) == 0 { + if k+name <= newfs { + newfs -= k + } else { + flags |= 8 + } + } + RA = SA[m+newfs:] + for i, j = m+(n>>1)-1, m-1; m <= i; i-- { + if SA[i] != 0 { + RA[j] = SA[i] - 1 + j-- + } + } + computeSA_int(RA, SA, newfs, m, name) + + i = n - 1 + j = m - 1 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + RA[j] = i + 1 + j-- + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + for i = 0; i < m; i++ { + SA[i] = RA[SA[i]] + } + if flags&4 > 0 { + B = make([]int, k) + C = B + } + if flags&2 > 0 { + B = make([]int, k) + } + } + + // Stage 3: Induce the result for the original problem. + if flags&8 > 0 { + getCounts_int(T, C, n, k) + } + // Put all left-most S characters into their buckets. + if m > 1 { + getBuckets_int(C, B, k, true) // Find ends of buckets + i = m - 1 + j = n + p = SA[m-1] + c1 = int(T[p]) + for { + c0 = c1 + q = B[c0] + for q < j { + j-- + SA[j] = 0 + } + for { + j-- + SA[j] = p + if i--; i < 0 { + break + } + p = SA[i] + if c1 = int(T[p]); c1 != c0 { + break + } + } + if i < 0 { + break + } + } + for j > 0 { + j-- + SA[j] = 0 + } + } + induceSA_int(T, SA, C, B, n, k) +} diff --git a/vendor/github.com/dsnet/compress/bzip2/mtf_rle2.go b/vendor/github.com/dsnet/compress/bzip2/mtf_rle2.go new file mode 100644 index 00000000..5c71b343 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/mtf_rle2.go @@ -0,0 +1,131 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package bzip2 + +import "github.com/dsnet/compress/internal/errors" + +// moveToFront implements both the MTF and RLE stages of bzip2 at the same time. +// Any runs of zeros in the encoded output will be replaced by a sequence of +// RUNA and RUNB symbols are encode the length of the run. +// +// The RLE encoding used can actually be encoded to and decoded from using +// normal two's complement arithmetic. The methodology for doing so is below. +// +// Assuming the following: +// num: The value being encoded by RLE encoding. +// run: A sequence of RUNA and RUNB symbols represented as a binary integer, +// where RUNA is the 0 bit, RUNB is the 1 bit, and least-significant RUN +// symbols are at the least-significant bit positions. +// cnt: The number of RUNA and RUNB symbols. +// +// Then the RLE encoding used by bzip2 has this mathematical property: +// num+1 == (1< len(mtf.dictBuf) { + panicf(errors.Internal, "alphabet too large") + } + copy(mtf.dictBuf[:], dict) + mtf.dictLen = len(dict) + mtf.blkSize = blkSize +} + +func (mtf *moveToFront) Encode(vals []byte) (syms []uint16) { + dict := mtf.dictBuf[:mtf.dictLen] + syms = mtf.syms[:0] + + if len(vals) > mtf.blkSize { + panicf(errors.Internal, "exceeded block size") + } + + var lastNum uint32 + for _, val := range vals { + // Normal move-to-front transform. + var idx uint8 // Reverse lookup idx in dict + for di, dv := range dict { + if dv == val { + idx = uint8(di) + break + } + } + copy(dict[1:], dict[:idx]) + dict[0] = val + + // Run-length encoding augmentation. + if idx == 0 { + lastNum++ + continue + } + if lastNum > 0 { + for rc := lastNum + 1; rc != 1; rc >>= 1 { + syms = append(syms, uint16(rc&1)) + } + lastNum = 0 + } + syms = append(syms, uint16(idx)+1) + } + if lastNum > 0 { + for rc := lastNum + 1; rc != 1; rc >>= 1 { + syms = append(syms, uint16(rc&1)) + } + } + mtf.syms = syms + return syms +} + +func (mtf *moveToFront) Decode(syms []uint16) (vals []byte) { + dict := mtf.dictBuf[:mtf.dictLen] + vals = mtf.vals[:0] + + var lastCnt uint + var lastRun uint32 + for _, sym := range syms { + // Run-length encoding augmentation. + if sym < 2 { + lastRun |= uint32(sym) << lastCnt + lastCnt++ + continue + } + if lastCnt > 0 { + cnt := int((1< mtf.blkSize || lastCnt > 24 { + panicf(errors.Corrupted, "run-length decoding exceeded block size") + } + for i := cnt; i > 0; i-- { + vals = append(vals, dict[0]) + } + lastCnt, lastRun = 0, 0 + } + + // Normal move-to-front transform. + val := dict[sym-1] // Forward lookup val in dict + copy(dict[1:], dict[:sym-1]) + dict[0] = val + + if len(vals) >= mtf.blkSize { + panicf(errors.Corrupted, "run-length decoding exceeded block size") + } + vals = append(vals, val) + } + if lastCnt > 0 { + cnt := int((1< mtf.blkSize || lastCnt > 24 { + panicf(errors.Corrupted, "run-length decoding exceeded block size") + } + for i := cnt; i > 0; i-- { + vals = append(vals, dict[0]) + } + } + mtf.vals = vals + return vals +} diff --git a/vendor/github.com/dsnet/compress/bzip2/prefix.go b/vendor/github.com/dsnet/compress/bzip2/prefix.go new file mode 100644 index 00000000..4847d809 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/prefix.go @@ -0,0 +1,374 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package bzip2 + +import ( + "io" + + "github.com/dsnet/compress/internal" + "github.com/dsnet/compress/internal/errors" + "github.com/dsnet/compress/internal/prefix" +) + +const ( + minNumTrees = 2 + maxNumTrees = 6 + + maxPrefixBits = 20 // Maximum bit-width of a prefix code + maxNumSyms = 256 + 2 // Maximum number of symbols in the alphabet + numBlockSyms = 50 // Number of bytes in a block +) + +// encSel and decSel are used to handle the prefix encoding for tree selectors. +// The prefix encoding is as follows: +// +// Code TreeIdx +// 0 <=> 0 +// 10 <=> 1 +// 110 <=> 2 +// 1110 <=> 3 +// 11110 <=> 4 +// 111110 <=> 5 +// 111111 <=> 6 Invalid tree index, so should fail +// +var encSel, decSel = func() (e prefix.Encoder, d prefix.Decoder) { + var selCodes [maxNumTrees + 1]prefix.PrefixCode + for i := range selCodes { + selCodes[i] = prefix.PrefixCode{Sym: uint32(i), Len: uint32(i + 1)} + } + selCodes[maxNumTrees] = prefix.PrefixCode{Sym: maxNumTrees, Len: maxNumTrees} + prefix.GeneratePrefixes(selCodes[:]) + e.Init(selCodes[:]) + d.Init(selCodes[:]) + return +}() + +type prefixReader struct{ prefix.Reader } + +func (pr *prefixReader) Init(r io.Reader) { + pr.Reader.Init(r, true) +} + +func (pr *prefixReader) ReadBitsBE64(nb uint) uint64 { + if nb <= 32 { + v := uint32(pr.ReadBits(nb)) + return uint64(internal.ReverseUint32N(v, nb)) + } + v0 := internal.ReverseUint32(uint32(pr.ReadBits(32))) + v1 := internal.ReverseUint32(uint32(pr.ReadBits(nb - 32))) + v := uint64(v0)<<32 | uint64(v1) + return v >> (64 - nb) +} + +func (pr *prefixReader) ReadPrefixCodes(codes []prefix.PrefixCodes, trees []prefix.Decoder) { + for i, pc := range codes { + clen := int(pr.ReadBitsBE64(5)) + sum := 1 << maxPrefixBits + for sym := range pc { + for { + if clen < 1 || clen > maxPrefixBits { + panicf(errors.Corrupted, "invalid prefix bit-length: %d", clen) + } + + b, ok := pr.TryReadBits(1) + if !ok { + b = pr.ReadBits(1) + } + if b == 0 { + break + } + + b, ok = pr.TryReadBits(1) + if !ok { + b = pr.ReadBits(1) + } + clen -= int(b*2) - 1 // +1 or -1 + } + pc[sym] = prefix.PrefixCode{Sym: uint32(sym), Len: uint32(clen)} + sum -= (1 << maxPrefixBits) >> uint(clen) + } + + if sum == 0 { + // Fast path, but only handles complete trees. + if err := prefix.GeneratePrefixes(pc); err != nil { + errors.Panic(err) // Using complete trees; should never fail + } + } else { + // Slow path, but handles anything. + pc = handleDegenerateCodes(pc) // Never fails, but may fail later + codes[i] = pc + } + trees[i].Init(pc) + } +} + +type prefixWriter struct{ prefix.Writer } + +func (pw *prefixWriter) Init(w io.Writer) { + pw.Writer.Init(w, true) +} + +func (pw *prefixWriter) WriteBitsBE64(v uint64, nb uint) { + if nb <= 32 { + v := internal.ReverseUint32N(uint32(v), nb) + pw.WriteBits(uint(v), nb) + return + } + v <<= (64 - nb) + v0 := internal.ReverseUint32(uint32(v >> 32)) + v1 := internal.ReverseUint32(uint32(v)) + pw.WriteBits(uint(v0), 32) + pw.WriteBits(uint(v1), nb-32) + return +} + +func (pw *prefixWriter) WritePrefixCodes(codes []prefix.PrefixCodes, trees []prefix.Encoder) { + for i, pc := range codes { + if err := prefix.GeneratePrefixes(pc); err != nil { + errors.Panic(err) // Using complete trees; should never fail + } + trees[i].Init(pc) + + clen := int(pc[0].Len) + pw.WriteBitsBE64(uint64(clen), 5) + for _, c := range pc { + for int(c.Len) < clen { + pw.WriteBits(3, 2) // 11 + clen-- + } + for int(c.Len) > clen { + pw.WriteBits(1, 2) // 10 + clen++ + } + pw.WriteBits(0, 1) + } + } +} + +// handleDegenerateCodes converts a degenerate tree into a canonical tree. +// +// For example, when the input is an under-subscribed tree: +// input: []PrefixCode{ +// {Sym: 0, Len: 3}, +// {Sym: 1, Len: 4}, +// {Sym: 2, Len: 3}, +// } +// output: []PrefixCode{ +// {Sym: 0, Len: 3, Val: 0}, // 000 +// {Sym: 1, Len: 4, Val: 2}, // 0010 +// {Sym: 2, Len: 3, Val: 4}, // 100 +// {Sym: 258, Len: 4, Val: 10}, // 1010 +// {Sym: 259, Len: 3, Val: 6}, // 110 +// {Sym: 260, Len: 1, Val: 1}, // 1 +// } +// +// For example, when the input is an over-subscribed tree: +// input: []PrefixCode{ +// {Sym: 0, Len: 1}, +// {Sym: 1, Len: 3}, +// {Sym: 2, Len: 4}, +// {Sym: 3, Len: 3}, +// {Sym: 4, Len: 2}, +// } +// output: []PrefixCode{ +// {Sym: 0, Len: 1, Val: 0}, // 0 +// {Sym: 1, Len: 3, Val: 3}, // 011 +// {Sym: 3, Len: 3, Val: 7}, // 111 +// {Sym: 4, Len: 2, Val: 1}, // 01 +// } +func handleDegenerateCodes(codes prefix.PrefixCodes) prefix.PrefixCodes { + // Since there is no formal definition for the BZip2 format, there is no + // specification that says that the code lengths must form a complete + // prefix tree (IE: it is neither over-subscribed nor under-subscribed). + // Thus, the original C implementation becomes the reference for how prefix + // decoding is done in these edge cases. Unfortunately, the C version does + // not error when an invalid tree is used, but rather allows decoding to + // continue and only errors if some bit pattern happens to cause an error. + // Thus, it is possible for an invalid tree to end up decoding an input + // "properly" so long as invalid bit patterns are not present. In order to + // replicate this non-specified behavior, we use a ported version of the + // C code to generate the codes as a valid canonical tree by substituting + // invalid nodes with invalid symbols. + // + // ==================================================== + // This program, "bzip2", the associated library "libbzip2", and all + // documentation, are copyright (C) 1996-2010 Julian R Seward. All + // rights reserved. + // + // Redistribution and use in source and binary forms, with or without + // modification, are permitted provided that the following conditions + // are met: + // + // 1. Redistributions of source code must retain the above copyright + // notice, this list of conditions and the following disclaimer. + // + // 2. The origin of this software must not be misrepresented; you must + // not claim that you wrote the original software. If you use this + // software in a product, an acknowledgment in the product + // documentation would be appreciated but is not required. + // + // 3. Altered source versions must be plainly marked as such, and must + // not be misrepresented as being the original software. + // + // 4. The name of the author may not be used to endorse or promote + // products derived from this software without specific prior written + // permission. + // + // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + // OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + // ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + // GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + // + // Julian Seward, jseward@bzip.org + // bzip2/libbzip2 version 1.0.6 of 6 September 2010 + // ==================================================== + var ( + limits [maxPrefixBits + 2]int32 + bases [maxPrefixBits + 2]int32 + perms [maxNumSyms]int32 + + minLen = uint32(maxPrefixBits) + maxLen = uint32(0) + ) + + const ( + statusOkay = iota + statusInvalid + statusNeedBits + statusMaxBits + ) + + // createTables is the BZ2_hbCreateDecodeTables function from the C code. + createTables := func(codes []prefix.PrefixCode) { + for _, c := range codes { + if c.Len > maxLen { + maxLen = c.Len + } + if c.Len < minLen { + minLen = c.Len + } + } + + var pp int + for i := minLen; i <= maxLen; i++ { + for j, c := range codes { + if c.Len == i { + perms[pp] = int32(j) + pp++ + } + } + } + + var vec int32 + for _, c := range codes { + bases[c.Len+1]++ + } + for i := 1; i < len(bases); i++ { + bases[i] += bases[i-1] + } + for i := minLen; i <= maxLen; i++ { + vec += bases[i+1] - bases[i] + limits[i] = vec - 1 + vec <<= 1 + } + for i := minLen + 1; i <= maxLen; i++ { + bases[i] = ((limits[i-1] + 1) << 1) - bases[i] + } + } + + // getSymbol is the GET_MTF_VAL macro from the C code. + getSymbol := func(c prefix.PrefixCode) (uint32, int) { + v := internal.ReverseUint32(c.Val) + n := c.Len + + zn := minLen + if zn > n { + return 0, statusNeedBits + } + zvec := int32(v >> (32 - zn)) + v <<= zn + for { + if zn > maxLen { + return 0, statusMaxBits + } + if zvec <= limits[zn] { + break + } + zn++ + if zn > n { + return 0, statusNeedBits + } + zvec = (zvec << 1) | int32(v>>31) + v <<= 1 + } + if zvec-bases[zn] < 0 || zvec-bases[zn] >= maxNumSyms { + return 0, statusInvalid + } + return uint32(perms[zvec-bases[zn]]), statusOkay + } + + // Step 1: Create the prefix trees using the C algorithm. + createTables(codes) + + // Step 2: Starting with the shortest bit pattern, explore the whole tree. + // If tree is under-subscribed, the worst-case runtime is O(1< 0 { + codes = append(codes, c) + } + } + return codes +} diff --git a/vendor/github.com/dsnet/compress/bzip2/reader.go b/vendor/github.com/dsnet/compress/bzip2/reader.go new file mode 100644 index 00000000..86d3f718 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/reader.go @@ -0,0 +1,274 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package bzip2 + +import ( + "io" + + "github.com/dsnet/compress/internal" + "github.com/dsnet/compress/internal/errors" + "github.com/dsnet/compress/internal/prefix" +) + +type Reader struct { + InputOffset int64 // Total number of bytes read from underlying io.Reader + OutputOffset int64 // Total number of bytes emitted from Read + + rd prefixReader + err error + level int // The current compression level + rdHdrFtr int // Number of times we read the stream header and footer + blkCRC uint32 // CRC-32 IEEE of each block (as stored) + endCRC uint32 // Checksum of all blocks using bzip2's custom method + + crc crc + mtf moveToFront + bwt burrowsWheelerTransform + rle runLengthEncoding + + // These fields are allocated with Reader and re-used later. + treeSels []uint8 + codes2D [maxNumTrees][maxNumSyms]prefix.PrefixCode + codes1D [maxNumTrees]prefix.PrefixCodes + trees1D [maxNumTrees]prefix.Decoder + syms []uint16 + + fuzzReader // Exported functionality when fuzz testing +} + +type ReaderConfig struct { + _ struct{} // Blank field to prevent unkeyed struct literals +} + +func NewReader(r io.Reader, conf *ReaderConfig) (*Reader, error) { + zr := new(Reader) + zr.Reset(r) + return zr, nil +} + +func (zr *Reader) Reset(r io.Reader) error { + *zr = Reader{ + rd: zr.rd, + + mtf: zr.mtf, + bwt: zr.bwt, + rle: zr.rle, + + treeSels: zr.treeSels, + trees1D: zr.trees1D, + syms: zr.syms, + } + zr.rd.Init(r) + return nil +} + +func (zr *Reader) Read(buf []byte) (int, error) { + for { + cnt, err := zr.rle.Read(buf) + if err != rleDone && zr.err == nil { + zr.err = err + } + if cnt > 0 { + zr.crc.update(buf[:cnt]) + zr.OutputOffset += int64(cnt) + return cnt, nil + } + if zr.err != nil || len(buf) == 0 { + return 0, zr.err + } + + // Read the next chunk. + zr.rd.Offset = zr.InputOffset + func() { + defer errors.Recover(&zr.err) + if zr.rdHdrFtr%2 == 0 { + // Check if we are already at EOF. + if err := zr.rd.PullBits(1); err != nil { + if err == io.ErrUnexpectedEOF && zr.rdHdrFtr > 0 { + err = io.EOF // EOF is okay if we read at least one stream + } + errors.Panic(err) + } + + // Read stream header. + if zr.rd.ReadBitsBE64(16) != hdrMagic { + panicf(errors.Corrupted, "invalid stream magic") + } + if ver := zr.rd.ReadBitsBE64(8); ver != 'h' { + if ver == '0' { + panicf(errors.Deprecated, "bzip1 format is not supported") + } + panicf(errors.Corrupted, "invalid version: %q", ver) + } + lvl := int(zr.rd.ReadBitsBE64(8)) - '0' + if lvl < BestSpeed || lvl > BestCompression { + panicf(errors.Corrupted, "invalid block size: %d", lvl*blockSize) + } + zr.level = lvl + zr.rdHdrFtr++ + } else { + // Check and update the CRC. + if internal.GoFuzz { + zr.updateChecksum(-1, zr.crc.val) // Update with value + zr.blkCRC = zr.crc.val // Suppress CRC failures + } + if zr.blkCRC != zr.crc.val { + panicf(errors.Corrupted, "mismatching block checksum") + } + zr.endCRC = (zr.endCRC<<1 | zr.endCRC>>31) ^ zr.blkCRC + } + buf := zr.decodeBlock() + zr.rle.Init(buf) + }() + if zr.InputOffset, err = zr.rd.Flush(); zr.err == nil { + zr.err = err + } + if zr.err != nil { + zr.err = errWrap(zr.err, errors.Corrupted) + return 0, zr.err + } + } +} + +func (zr *Reader) Close() error { + if zr.err == io.EOF || zr.err == errClosed { + zr.rle.Init(nil) // Make sure future reads fail + zr.err = errClosed + return nil + } + return zr.err // Return the persistent error +} + +func (zr *Reader) decodeBlock() []byte { + if magic := zr.rd.ReadBitsBE64(48); magic != blkMagic { + if magic == endMagic { + endCRC := uint32(zr.rd.ReadBitsBE64(32)) + if internal.GoFuzz { + zr.updateChecksum(zr.rd.BitsRead()-32, zr.endCRC) + endCRC = zr.endCRC // Suppress CRC failures + } + if zr.endCRC != endCRC { + panicf(errors.Corrupted, "mismatching stream checksum") + } + zr.endCRC = 0 + zr.rd.ReadPads() + zr.rdHdrFtr++ + return nil + } + panicf(errors.Corrupted, "invalid block or footer magic") + } + + zr.crc.val = 0 + zr.blkCRC = uint32(zr.rd.ReadBitsBE64(32)) + if internal.GoFuzz { + zr.updateChecksum(zr.rd.BitsRead()-32, 0) // Record offset only + } + if zr.rd.ReadBitsBE64(1) != 0 { + panicf(errors.Deprecated, "block randomization is not supported") + } + + // Read BWT related fields. + ptr := int(zr.rd.ReadBitsBE64(24)) // BWT origin pointer + + // Read MTF related fields. + var dictArr [256]uint8 + dict := dictArr[:0] + bmapHi := uint16(zr.rd.ReadBits(16)) + for i := 0; i < 256; i, bmapHi = i+16, bmapHi>>1 { + if bmapHi&1 > 0 { + bmapLo := uint16(zr.rd.ReadBits(16)) + for j := 0; j < 16; j, bmapLo = j+1, bmapLo>>1 { + if bmapLo&1 > 0 { + dict = append(dict, uint8(i+j)) + } + } + } + } + + // Step 1: Prefix encoding. + syms := zr.decodePrefix(len(dict)) + + // Step 2: Move-to-front transform and run-length encoding. + zr.mtf.Init(dict, zr.level*blockSize) + buf := zr.mtf.Decode(syms) + + // Step 3: Burrows-Wheeler transformation. + if ptr >= len(buf) { + panicf(errors.Corrupted, "origin pointer (0x%06x) exceeds block size: %d", ptr, len(buf)) + } + zr.bwt.Decode(buf, ptr) + + return buf +} + +func (zr *Reader) decodePrefix(numSyms int) (syms []uint16) { + numSyms += 2 // Remove 0 symbol, add RUNA, RUNB, and EOF symbols + if numSyms < 3 { + panicf(errors.Corrupted, "not enough prefix symbols: %d", numSyms) + } + + // Read information about the trees and tree selectors. + var mtf internal.MoveToFront + numTrees := int(zr.rd.ReadBitsBE64(3)) + if numTrees < minNumTrees || numTrees > maxNumTrees { + panicf(errors.Corrupted, "invalid number of prefix trees: %d", numTrees) + } + numSels := int(zr.rd.ReadBitsBE64(15)) + if cap(zr.treeSels) < numSels { + zr.treeSels = make([]uint8, numSels) + } + treeSels := zr.treeSels[:numSels] + for i := range treeSels { + sym, ok := zr.rd.TryReadSymbol(&decSel) + if !ok { + sym = zr.rd.ReadSymbol(&decSel) + } + if int(sym) >= numTrees { + panicf(errors.Corrupted, "invalid prefix tree selector: %d", sym) + } + treeSels[i] = uint8(sym) + } + mtf.Decode(treeSels) + zr.treeSels = treeSels + + // Initialize prefix codes. + for i := range zr.codes2D[:numTrees] { + zr.codes1D[i] = zr.codes2D[i][:numSyms] + } + zr.rd.ReadPrefixCodes(zr.codes1D[:numTrees], zr.trees1D[:numTrees]) + + // Read prefix encoded symbols of compressed data. + var tree *prefix.Decoder + var blkLen, selIdx int + syms = zr.syms[:0] + for { + if blkLen == 0 { + blkLen = numBlockSyms + if selIdx >= len(treeSels) { + panicf(errors.Corrupted, "not enough prefix tree selectors") + } + tree = &zr.trees1D[treeSels[selIdx]] + selIdx++ + } + blkLen-- + sym, ok := zr.rd.TryReadSymbol(tree) + if !ok { + sym = zr.rd.ReadSymbol(tree) + } + + if int(sym) == numSyms-1 { + break // EOF marker + } + if int(sym) >= numSyms { + panicf(errors.Corrupted, "invalid prefix symbol: %d", sym) + } + if len(syms) >= zr.level*blockSize { + panicf(errors.Corrupted, "number of prefix symbols exceeds block size") + } + syms = append(syms, uint16(sym)) + } + zr.syms = syms + return syms +} diff --git a/vendor/github.com/dsnet/compress/bzip2/rle1.go b/vendor/github.com/dsnet/compress/bzip2/rle1.go new file mode 100644 index 00000000..1d789f65 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/rle1.go @@ -0,0 +1,101 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package bzip2 + +import "github.com/dsnet/compress/internal/errors" + +// rleDone is a special "error" to indicate that the RLE stage is done. +var rleDone = errorf(errors.Unknown, "RLE1 stage is completed") + +// runLengthEncoding implements the first RLE stage of bzip2. Every sequence +// of 4..255 duplicated bytes is replaced by only the first 4 bytes, and a +// single byte representing the repeat length. Similar to the C bzip2 +// implementation, the encoder will always terminate repeat sequences with a +// count (even if it is the end of the buffer), and it will also never produce +// run lengths of 256..259. The decoder can handle the latter case. +// +// For example, if the input was: +// input: "AAAAAAABBBBCCCD" +// +// Then the output will be: +// output: "AAAA\x03BBBB\x00CCCD" +type runLengthEncoding struct { + buf []byte + idx int + lastVal byte + lastCnt int +} + +func (rle *runLengthEncoding) Init(buf []byte) { + *rle = runLengthEncoding{buf: buf} +} + +func (rle *runLengthEncoding) Write(buf []byte) (int, error) { + for i, b := range buf { + if rle.lastVal != b { + rle.lastCnt = 0 + } + rle.lastCnt++ + switch { + case rle.lastCnt < 4: + if rle.idx >= len(rle.buf) { + return i, rleDone + } + rle.buf[rle.idx] = b + rle.idx++ + case rle.lastCnt == 4: + if rle.idx+1 >= len(rle.buf) { + return i, rleDone + } + rle.buf[rle.idx] = b + rle.idx++ + rle.buf[rle.idx] = 0 + rle.idx++ + case rle.lastCnt < 256: + rle.buf[rle.idx-1]++ + default: + if rle.idx >= len(rle.buf) { + return i, rleDone + } + rle.lastCnt = 1 + rle.buf[rle.idx] = b + rle.idx++ + } + rle.lastVal = b + } + return len(buf), nil +} + +func (rle *runLengthEncoding) Read(buf []byte) (int, error) { + for i := range buf { + switch { + case rle.lastCnt == -4: + if rle.idx >= len(rle.buf) { + return i, errorf(errors.Corrupted, "missing terminating run-length repeater") + } + rle.lastCnt = int(rle.buf[rle.idx]) + rle.idx++ + if rle.lastCnt > 0 { + break // Break the switch + } + fallthrough // Count was zero, continue the work + case rle.lastCnt <= 0: + if rle.idx >= len(rle.buf) { + return i, rleDone + } + b := rle.buf[rle.idx] + rle.idx++ + if b != rle.lastVal { + rle.lastCnt = 0 + rle.lastVal = b + } + } + buf[i] = rle.lastVal + rle.lastCnt-- + } + return len(buf), nil +} + +func (rle *runLengthEncoding) Bytes() []byte { return rle.buf[:rle.idx] } diff --git a/vendor/github.com/dsnet/compress/bzip2/writer.go b/vendor/github.com/dsnet/compress/bzip2/writer.go new file mode 100644 index 00000000..5c1a4c66 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/writer.go @@ -0,0 +1,307 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package bzip2 + +import ( + "io" + + "github.com/dsnet/compress/internal" + "github.com/dsnet/compress/internal/errors" + "github.com/dsnet/compress/internal/prefix" +) + +type Writer struct { + InputOffset int64 // Total number of bytes issued to Write + OutputOffset int64 // Total number of bytes written to underlying io.Writer + + wr prefixWriter + err error + level int // The current compression level + wrHdr bool // Have we written the stream header? + blkCRC uint32 // CRC-32 IEEE of each block + endCRC uint32 // Checksum of all blocks using bzip2's custom method + + crc crc + rle runLengthEncoding + bwt burrowsWheelerTransform + mtf moveToFront + + // These fields are allocated with Writer and re-used later. + buf []byte + treeSels []uint8 + treeSelsMTF []uint8 + codes2D [maxNumTrees][maxNumSyms]prefix.PrefixCode + codes1D [maxNumTrees]prefix.PrefixCodes + trees1D [maxNumTrees]prefix.Encoder +} + +type WriterConfig struct { + Level int + + _ struct{} // Blank field to prevent unkeyed struct literals +} + +func NewWriter(w io.Writer, conf *WriterConfig) (*Writer, error) { + var lvl int + if conf != nil { + lvl = conf.Level + } + if lvl == 0 { + lvl = DefaultCompression + } + if lvl < BestSpeed || lvl > BestCompression { + return nil, errorf(errors.Invalid, "compression level: %d", lvl) + } + zw := new(Writer) + zw.level = lvl + zw.Reset(w) + return zw, nil +} + +func (zw *Writer) Reset(w io.Writer) error { + *zw = Writer{ + wr: zw.wr, + level: zw.level, + + rle: zw.rle, + bwt: zw.bwt, + mtf: zw.mtf, + + buf: zw.buf, + treeSels: zw.treeSels, + treeSelsMTF: zw.treeSelsMTF, + trees1D: zw.trees1D, + } + zw.wr.Init(w) + if len(zw.buf) != zw.level*blockSize { + zw.buf = make([]byte, zw.level*blockSize) + } + zw.rle.Init(zw.buf) + return nil +} + +func (zw *Writer) Write(buf []byte) (int, error) { + if zw.err != nil { + return 0, zw.err + } + + cnt := len(buf) + for { + wrCnt, err := zw.rle.Write(buf) + if err != rleDone && zw.err == nil { + zw.err = err + } + zw.crc.update(buf[:wrCnt]) + buf = buf[wrCnt:] + if len(buf) == 0 { + zw.InputOffset += int64(cnt) + return cnt, nil + } + if zw.err = zw.flush(); zw.err != nil { + return 0, zw.err + } + } +} + +func (zw *Writer) flush() error { + vals := zw.rle.Bytes() + if len(vals) == 0 { + return nil + } + zw.wr.Offset = zw.OutputOffset + func() { + defer errors.Recover(&zw.err) + if !zw.wrHdr { + // Write stream header. + zw.wr.WriteBitsBE64(hdrMagic, 16) + zw.wr.WriteBitsBE64('h', 8) + zw.wr.WriteBitsBE64(uint64('0'+zw.level), 8) + zw.wrHdr = true + } + zw.encodeBlock(vals) + }() + var err error + if zw.OutputOffset, err = zw.wr.Flush(); zw.err == nil { + zw.err = err + } + if zw.err != nil { + zw.err = errWrap(zw.err, errors.Internal) + return zw.err + } + zw.endCRC = (zw.endCRC<<1 | zw.endCRC>>31) ^ zw.blkCRC + zw.blkCRC = 0 + zw.rle.Init(zw.buf) + return nil +} + +func (zw *Writer) Close() error { + if zw.err == errClosed { + return nil + } + + // Flush RLE buffer if there is left-over data. + if zw.err = zw.flush(); zw.err != nil { + return zw.err + } + + // Write stream footer. + zw.wr.Offset = zw.OutputOffset + func() { + defer errors.Recover(&zw.err) + if !zw.wrHdr { + // Write stream header. + zw.wr.WriteBitsBE64(hdrMagic, 16) + zw.wr.WriteBitsBE64('h', 8) + zw.wr.WriteBitsBE64(uint64('0'+zw.level), 8) + zw.wrHdr = true + } + zw.wr.WriteBitsBE64(endMagic, 48) + zw.wr.WriteBitsBE64(uint64(zw.endCRC), 32) + zw.wr.WritePads(0) + }() + var err error + if zw.OutputOffset, err = zw.wr.Flush(); zw.err == nil { + zw.err = err + } + if zw.err != nil { + zw.err = errWrap(zw.err, errors.Internal) + return zw.err + } + + zw.err = errClosed + return nil +} + +func (zw *Writer) encodeBlock(buf []byte) { + zw.blkCRC = zw.crc.val + zw.wr.WriteBitsBE64(blkMagic, 48) + zw.wr.WriteBitsBE64(uint64(zw.blkCRC), 32) + zw.wr.WriteBitsBE64(0, 1) + zw.crc.val = 0 + + // Step 1: Burrows-Wheeler transformation. + ptr := zw.bwt.Encode(buf) + zw.wr.WriteBitsBE64(uint64(ptr), 24) + + // Step 2: Move-to-front transform and run-length encoding. + var dictMap [256]bool + for _, c := range buf { + dictMap[c] = true + } + + var dictArr [256]uint8 + var bmapLo [16]uint16 + dict := dictArr[:0] + bmapHi := uint16(0) + for i, b := range dictMap { + if b { + c := uint8(i) + dict = append(dict, c) + bmapHi |= 1 << (c >> 4) + bmapLo[c>>4] |= 1 << (c & 0xf) + } + } + + zw.wr.WriteBits(uint(bmapHi), 16) + for _, m := range bmapLo { + if m > 0 { + zw.wr.WriteBits(uint(m), 16) + } + } + + zw.mtf.Init(dict, len(buf)) + syms := zw.mtf.Encode(buf) + + // Step 3: Prefix encoding. + zw.encodePrefix(syms, len(dict)) +} + +func (zw *Writer) encodePrefix(syms []uint16, numSyms int) { + numSyms += 2 // Remove 0 symbol, add RUNA, RUNB, and EOB symbols + if numSyms < 3 { + panicf(errors.Internal, "unable to encode EOB marker") + } + syms = append(syms, uint16(numSyms-1)) // EOB marker + + // Compute number of prefix trees needed. + numTrees := maxNumTrees + for i, lim := range []int{200, 600, 1200, 2400} { + if len(syms) < lim { + numTrees = minNumTrees + i + break + } + } + + // Compute number of block selectors. + numSels := (len(syms) + numBlockSyms - 1) / numBlockSyms + if cap(zw.treeSels) < numSels { + zw.treeSels = make([]uint8, numSels) + } + treeSels := zw.treeSels[:numSels] + for i := range treeSels { + treeSels[i] = uint8(i % numTrees) + } + + // Initialize prefix codes. + for i := range zw.codes2D[:numTrees] { + pc := zw.codes2D[i][:numSyms] + for j := range pc { + pc[j] = prefix.PrefixCode{Sym: uint32(j)} + } + zw.codes1D[i] = pc + } + + // First cut at assigning prefix trees to each group. + var codes prefix.PrefixCodes + var blkLen, selIdx int + for _, sym := range syms { + if blkLen == 0 { + blkLen = numBlockSyms + codes = zw.codes2D[treeSels[selIdx]][:numSyms] + selIdx++ + } + blkLen-- + codes[sym].Cnt++ + } + + // TODO(dsnet): Use K-means to cluster groups to each prefix tree. + + // Generate lengths and prefixes based on symbol frequencies. + for i := range zw.trees1D[:numTrees] { + pc := prefix.PrefixCodes(zw.codes2D[i][:numSyms]) + pc.SortByCount() + if err := prefix.GenerateLengths(pc, maxPrefixBits); err != nil { + errors.Panic(err) + } + pc.SortBySymbol() + } + + // Write out information about the trees and tree selectors. + var mtf internal.MoveToFront + zw.wr.WriteBitsBE64(uint64(numTrees), 3) + zw.wr.WriteBitsBE64(uint64(numSels), 15) + zw.treeSelsMTF = append(zw.treeSelsMTF[:0], treeSels...) + mtf.Encode(zw.treeSelsMTF) + for _, sym := range zw.treeSelsMTF { + zw.wr.WriteSymbol(uint(sym), &encSel) + } + zw.wr.WritePrefixCodes(zw.codes1D[:numTrees], zw.trees1D[:numTrees]) + + // Write out prefix encoded symbols of compressed data. + var tree *prefix.Encoder + blkLen, selIdx = 0, 0 + for _, sym := range syms { + if blkLen == 0 { + blkLen = numBlockSyms + tree = &zw.trees1D[treeSels[selIdx]] + selIdx++ + } + blkLen-- + ok := zw.wr.TryWriteSymbol(uint(sym), tree) + if !ok { + zw.wr.WriteSymbol(uint(sym), tree) + } + } +} diff --git a/vendor/github.com/dsnet/compress/internal/common.go b/vendor/github.com/dsnet/compress/internal/common.go new file mode 100644 index 00000000..da4e7034 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/common.go @@ -0,0 +1,107 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package internal is a collection of common compression algorithms. +// +// For performance reasons, these packages lack strong error checking and +// require that the caller to ensure that strict invariants are kept. +package internal + +var ( + // IdentityLUT returns the input key itself. + IdentityLUT = func() (lut [256]byte) { + for i := range lut { + lut[i] = uint8(i) + } + return lut + }() + + // ReverseLUT returns the input key with its bits reversed. + ReverseLUT = func() (lut [256]byte) { + for i := range lut { + b := uint8(i) + b = (b&0xaa)>>1 | (b&0x55)<<1 + b = (b&0xcc)>>2 | (b&0x33)<<2 + b = (b&0xf0)>>4 | (b&0x0f)<<4 + lut[i] = b + } + return lut + }() +) + +// ReverseUint32 reverses all bits of v. +func ReverseUint32(v uint32) (x uint32) { + x |= uint32(ReverseLUT[byte(v>>0)]) << 24 + x |= uint32(ReverseLUT[byte(v>>8)]) << 16 + x |= uint32(ReverseLUT[byte(v>>16)]) << 8 + x |= uint32(ReverseLUT[byte(v>>24)]) << 0 + return x +} + +// ReverseUint32N reverses the lower n bits of v. +func ReverseUint32N(v uint32, n uint) (x uint32) { + return ReverseUint32(v << (32 - n)) +} + +// ReverseUint64 reverses all bits of v. +func ReverseUint64(v uint64) (x uint64) { + x |= uint64(ReverseLUT[byte(v>>0)]) << 56 + x |= uint64(ReverseLUT[byte(v>>8)]) << 48 + x |= uint64(ReverseLUT[byte(v>>16)]) << 40 + x |= uint64(ReverseLUT[byte(v>>24)]) << 32 + x |= uint64(ReverseLUT[byte(v>>32)]) << 24 + x |= uint64(ReverseLUT[byte(v>>40)]) << 16 + x |= uint64(ReverseLUT[byte(v>>48)]) << 8 + x |= uint64(ReverseLUT[byte(v>>56)]) << 0 + return x +} + +// ReverseUint64N reverses the lower n bits of v. +func ReverseUint64N(v uint64, n uint) (x uint64) { + return ReverseUint64(v << (64 - n)) +} + +// MoveToFront is a data structure that allows for more efficient move-to-front +// transformations. This specific implementation assumes that the alphabet is +// densely packed within 0..255. +type MoveToFront struct { + dict [256]uint8 // Mapping from indexes to values + tail int // Number of tail bytes that are already ordered +} + +func (m *MoveToFront) Encode(vals []uint8) { + copy(m.dict[:], IdentityLUT[:256-m.tail]) // Reset dict to be identity + + var max int + for i, val := range vals { + var idx uint8 // Reverse lookup idx in dict + for di, dv := range m.dict { + if dv == val { + idx = uint8(di) + break + } + } + vals[i] = idx + + max |= int(idx) + copy(m.dict[1:], m.dict[:idx]) + m.dict[0] = val + } + m.tail = 256 - max - 1 +} + +func (m *MoveToFront) Decode(idxs []uint8) { + copy(m.dict[:], IdentityLUT[:256-m.tail]) // Reset dict to be identity + + var max int + for i, idx := range idxs { + val := m.dict[idx] // Forward lookup val in dict + idxs[i] = val + + max |= int(idx) + copy(m.dict[1:], m.dict[:idx]) + m.dict[0] = val + } + m.tail = 256 - max - 1 +} diff --git a/vendor/github.com/dsnet/compress/internal/debug.go b/vendor/github.com/dsnet/compress/internal/debug.go new file mode 100644 index 00000000..01df1f89 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/debug.go @@ -0,0 +1,12 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build debug,!gofuzz + +package internal + +const ( + Debug = true + GoFuzz = false +) diff --git a/vendor/github.com/dsnet/compress/internal/errors/errors.go b/vendor/github.com/dsnet/compress/internal/errors/errors.go new file mode 100644 index 00000000..c631afbd --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/errors/errors.go @@ -0,0 +1,120 @@ +// Copyright 2016, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package errors implements functions to manipulate compression errors. +// +// In idiomatic Go, it is an anti-pattern to use panics as a form of error +// reporting in the API. Instead, the expected way to transmit errors is by +// returning an error value. Unfortunately, the checking of "err != nil" in +// tight loops commonly found in compression causes non-negligible performance +// degradation. While this may not be idiomatic, the internal packages of this +// repository rely on panics as a normal means to convey errors. In order to +// ensure that these panics do not leak across the public API, the public +// packages must recover from these panics and present an error value. +// +// The Panic and Recover functions in this package provide a safe way to +// recover from errors only generated from within this repository. +// +// Example usage: +// func Foo() (err error) { +// defer errors.Recover(&err) +// +// if rand.Intn(2) == 0 { +// // Unexpected panics will not be caught by Recover. +// io.Closer(nil).Close() +// } else { +// // Errors thrown by Panic will be caught by Recover. +// errors.Panic(errors.New("whoopsie")) +// } +// } +// +package errors + +import "strings" + +const ( + // Unknown indicates that there is no classification for this error. + Unknown = iota + + // Internal indicates that this error is due to an internal bug. + // Users should file a issue report if this type of error is encountered. + Internal + + // Invalid indicates that this error is due to the user misusing the API + // and is indicative of a bug on the user's part. + Invalid + + // Deprecated indicates the use of a deprecated and unsupported feature. + Deprecated + + // Corrupted indicates that the input stream is corrupted. + Corrupted + + // Closed indicates that the handlers are closed. + Closed +) + +var codeMap = map[int]string{ + Unknown: "unknown error", + Internal: "internal error", + Invalid: "invalid argument", + Deprecated: "deprecated format", + Corrupted: "corrupted input", + Closed: "closed handler", +} + +type Error struct { + Code int // The error type + Pkg string // Name of the package where the error originated + Msg string // Descriptive message about the error (optional) +} + +func (e Error) Error() string { + var ss []string + for _, s := range []string{e.Pkg, codeMap[e.Code], e.Msg} { + if s != "" { + ss = append(ss, s) + } + } + return strings.Join(ss, ": ") +} + +func (e Error) CompressError() {} +func (e Error) IsInternal() bool { return e.Code == Internal } +func (e Error) IsInvalid() bool { return e.Code == Invalid } +func (e Error) IsDeprecated() bool { return e.Code == Deprecated } +func (e Error) IsCorrupted() bool { return e.Code == Corrupted } +func (e Error) IsClosed() bool { return e.Code == Closed } + +func IsInternal(err error) bool { return isCode(err, Internal) } +func IsInvalid(err error) bool { return isCode(err, Invalid) } +func IsDeprecated(err error) bool { return isCode(err, Deprecated) } +func IsCorrupted(err error) bool { return isCode(err, Corrupted) } +func IsClosed(err error) bool { return isCode(err, Closed) } + +func isCode(err error, code int) bool { + if cerr, ok := err.(Error); ok && cerr.Code == code { + return true + } + return false +} + +// errWrap is used by Panic and Recover to ensure that only errors raised by +// Panic are recovered by Recover. +type errWrap struct{ e *error } + +func Recover(err *error) { + switch ex := recover().(type) { + case nil: + // Do nothing. + case errWrap: + *err = *ex.e + default: + panic(ex) + } +} + +func Panic(err error) { + panic(errWrap{&err}) +} diff --git a/vendor/github.com/dsnet/compress/internal/gofuzz.go b/vendor/github.com/dsnet/compress/internal/gofuzz.go new file mode 100644 index 00000000..5035c9d6 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/gofuzz.go @@ -0,0 +1,12 @@ +// Copyright 2016, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build gofuzz + +package internal + +const ( + Debug = true + GoFuzz = true +) diff --git a/vendor/github.com/dsnet/compress/internal/prefix/debug.go b/vendor/github.com/dsnet/compress/internal/prefix/debug.go new file mode 100644 index 00000000..04fce70b --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/debug.go @@ -0,0 +1,159 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build debug + +package prefix + +import ( + "fmt" + "math" + "strings" +) + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func lenBase2(n uint) int { + return int(math.Ceil(math.Log2(float64(n + 1)))) +} +func padBase2(v, n uint, m int) string { + s := fmt.Sprintf("%b", 1< 0 { + return strings.Repeat(" ", pad) + s + } + return s +} + +func lenBase10(n int) int { + return int(math.Ceil(math.Log10(float64(n + 1)))) +} +func padBase10(n, m int) string { + s := fmt.Sprintf("%d", n) + if pad := m - len(s); pad > 0 { + return strings.Repeat(" ", pad) + s + } + return s +} + +func (rc RangeCodes) String() string { + var maxLen, maxBase int + for _, c := range rc { + maxLen = max(maxLen, int(c.Len)) + maxBase = max(maxBase, int(c.Base)) + } + + var ss []string + ss = append(ss, "{") + for i, c := range rc { + base := padBase10(int(c.Base), lenBase10(maxBase)) + if c.Len > 0 { + base += fmt.Sprintf("-%d", c.End()-1) + } + ss = append(ss, fmt.Sprintf("\t%s: {len: %s, range: %s},", + padBase10(int(i), lenBase10(len(rc)-1)), + padBase10(int(c.Len), lenBase10(maxLen)), + base, + )) + } + ss = append(ss, "}") + return strings.Join(ss, "\n") +} + +func (pc PrefixCodes) String() string { + var maxSym, maxLen, maxCnt int + for _, c := range pc { + maxSym = max(maxSym, int(c.Sym)) + maxLen = max(maxLen, int(c.Len)) + maxCnt = max(maxCnt, int(c.Cnt)) + } + + var ss []string + ss = append(ss, "{") + for _, c := range pc { + var cntStr string + if maxCnt > 0 { + cnt := int(32*float32(c.Cnt)/float32(maxCnt) + 0.5) + cntStr = fmt.Sprintf("%s |%s", + padBase10(int(c.Cnt), lenBase10(maxCnt)), + strings.Repeat("#", cnt), + ) + } + ss = append(ss, fmt.Sprintf("\t%s: %s, %s", + padBase10(int(c.Sym), lenBase10(maxSym)), + padBase2(uint(c.Val), uint(c.Len), maxLen), + cntStr, + )) + } + ss = append(ss, "}") + return strings.Join(ss, "\n") +} + +func (pd Decoder) String() string { + var ss []string + ss = append(ss, "{") + if len(pd.chunks) > 0 { + ss = append(ss, "\tchunks: {") + for i, c := range pd.chunks { + label := "sym" + if uint(c&countMask) > uint(pd.chunkBits) { + label = "idx" + } + ss = append(ss, fmt.Sprintf("\t\t%s: {%s: %s, len: %s}", + padBase2(uint(i), uint(pd.chunkBits), int(pd.chunkBits)), + label, padBase10(int(c>>countBits), 3), + padBase10(int(c&countMask), 2), + )) + } + ss = append(ss, "\t},") + + for j, links := range pd.links { + ss = append(ss, fmt.Sprintf("\tlinks[%d]: {", j)) + linkBits := lenBase2(uint(pd.linkMask)) + for i, c := range links { + ss = append(ss, fmt.Sprintf("\t\t%s: {sym: %s, len: %s},", + padBase2(uint(i), uint(linkBits), int(linkBits)), + padBase10(int(c>>countBits), 3), + padBase10(int(c&countMask), 2), + )) + } + ss = append(ss, "\t},") + } + } + ss = append(ss, fmt.Sprintf("\tchunkMask: %b,", pd.chunkMask)) + ss = append(ss, fmt.Sprintf("\tlinkMask: %b,", pd.linkMask)) + ss = append(ss, fmt.Sprintf("\tchunkBits: %d,", pd.chunkBits)) + ss = append(ss, fmt.Sprintf("\tMinBits: %d,", pd.MinBits)) + ss = append(ss, fmt.Sprintf("\tNumSyms: %d,", pd.NumSyms)) + ss = append(ss, "}") + return strings.Join(ss, "\n") +} + +func (pe Encoder) String() string { + var maxLen int + for _, c := range pe.chunks { + maxLen = max(maxLen, int(c&countMask)) + } + + var ss []string + ss = append(ss, "{") + if len(pe.chunks) > 0 { + ss = append(ss, "\tchunks: {") + for i, c := range pe.chunks { + ss = append(ss, fmt.Sprintf("\t\t%s: %s,", + padBase10(i, 3), + padBase2(uint(c>>countBits), uint(c&countMask), maxLen), + )) + } + ss = append(ss, "\t},") + } + ss = append(ss, fmt.Sprintf("\tchunkMask: %b,", pe.chunkMask)) + ss = append(ss, fmt.Sprintf("\tNumSyms: %d,", pe.NumSyms)) + ss = append(ss, "}") + return strings.Join(ss, "\n") +} diff --git a/vendor/github.com/dsnet/compress/internal/prefix/decoder.go b/vendor/github.com/dsnet/compress/internal/prefix/decoder.go new file mode 100644 index 00000000..a9bc2dcb --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/decoder.go @@ -0,0 +1,136 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package prefix + +import ( + "sort" + + "github.com/dsnet/compress/internal" +) + +// The algorithm used to decode variable length codes is based on the lookup +// method in zlib. If the code is less-than-or-equal to maxChunkBits, +// then the symbol can be decoded using a single lookup into the chunks table. +// Otherwise, the links table will be used for a second level lookup. +// +// The chunks slice is keyed by the contents of the bit buffer ANDed with +// the chunkMask to avoid a out-of-bounds lookup. The value of chunks is a tuple +// that is decoded as follow: +// +// var length = chunks[bitBuffer&chunkMask] & countMask +// var symbol = chunks[bitBuffer&chunkMask] >> countBits +// +// If the decoded length is larger than chunkBits, then an overflow link table +// must be used for further decoding. In this case, the symbol is actually the +// index into the links tables. The second-level links table returned is +// processed in the same way as the chunks table. +// +// if length > chunkBits { +// var index = symbol // Previous symbol is index into links tables +// length = links[index][bitBuffer>>chunkBits & linkMask] & countMask +// symbol = links[index][bitBuffer>>chunkBits & linkMask] >> countBits +// } +// +// See the following: +// http://www.gzip.org/algorithm.txt + +type Decoder struct { + chunks []uint32 // First-level lookup map + links [][]uint32 // Second-level lookup map + chunkMask uint32 // Mask the length of the chunks table + linkMask uint32 // Mask the length of the link table + chunkBits uint32 // Bit-length of the chunks table + + MinBits uint32 // The minimum number of bits to safely make progress + NumSyms uint32 // Number of symbols +} + +// Init initializes Decoder according to the codes provided. +func (pd *Decoder) Init(codes PrefixCodes) { + // Handle special case trees. + if len(codes) <= 1 { + switch { + case len(codes) == 0: // Empty tree (should error if used later) + *pd = Decoder{chunks: pd.chunks[:0], links: pd.links[:0], NumSyms: 0} + case len(codes) == 1 && codes[0].Len == 0: // Single code tree (bit-length of zero) + pd.chunks = append(pd.chunks[:0], codes[0].Sym< c.Len { + minBits = c.Len + } + if maxBits < c.Len { + maxBits = c.Len + } + } + + // Allocate chunks table as needed. + const maxChunkBits = 9 // This can be tuned for better performance + pd.NumSyms = uint32(len(codes)) + pd.MinBits = minBits + pd.chunkBits = maxBits + if pd.chunkBits > maxChunkBits { + pd.chunkBits = maxChunkBits + } + numChunks := 1 << pd.chunkBits + pd.chunks = allocUint32s(pd.chunks, numChunks) + pd.chunkMask = uint32(numChunks - 1) + + // Allocate links tables as needed. + pd.links = pd.links[:0] + pd.linkMask = 0 + if pd.chunkBits < maxBits { + numLinks := 1 << (maxBits - pd.chunkBits) + pd.linkMask = uint32(numLinks - 1) + + var linkIdx uint32 + for i := range pd.chunks { + pd.chunks[i] = 0 // Logic below relies on zero value as uninitialized + } + for _, c := range codes { + if c.Len > pd.chunkBits && pd.chunks[c.Val&pd.chunkMask] == 0 { + pd.chunks[c.Val&pd.chunkMask] = (linkIdx << countBits) | (pd.chunkBits + 1) + linkIdx++ + } + } + + pd.links = extendSliceUint32s(pd.links, int(linkIdx)) + linksFlat := allocUint32s(pd.links[0], numLinks*int(linkIdx)) + for i, j := 0, 0; i < len(pd.links); i, j = i+1, j+numLinks { + pd.links[i] = linksFlat[j : j+numLinks] + } + } + + // Fill out chunks and links tables with values. + for _, c := range codes { + chunk := c.Sym<> countBits + links := pd.links[linkIdx] + skip := 1 << uint(c.Len-pd.chunkBits) + for j := int(c.Val >> pd.chunkBits); j < len(links); j += skip { + links[j] = chunk + } + } + } +} diff --git a/vendor/github.com/dsnet/compress/internal/prefix/encoder.go b/vendor/github.com/dsnet/compress/internal/prefix/encoder.go new file mode 100644 index 00000000..4424a011 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/encoder.go @@ -0,0 +1,66 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package prefix + +import ( + "sort" + + "github.com/dsnet/compress/internal" +) + +type Encoder struct { + chunks []uint32 // First-level lookup map + chunkMask uint32 // Mask the length of the chunks table + + NumSyms uint32 // Number of symbols +} + +// Init initializes Encoder according to the codes provided. +func (pe *Encoder) Init(codes PrefixCodes) { + // Handle special case trees. + if len(codes) <= 1 { + switch { + case len(codes) == 0: // Empty tree (should error if used later) + *pe = Encoder{chunks: pe.chunks[:0], NumSyms: 0} + case len(codes) == 1 && codes[0].Len == 0: // Single code tree (bit-length of zero) + pe.chunks = append(pe.chunks[:0], codes[0].Val< 0; n >>= 1 { + numChunks <<= 1 + } + pe.NumSyms = uint32(len(codes)) + +retry: + // Allocate and reset chunks. + pe.chunks = allocUint32s(pe.chunks, numChunks) + pe.chunkMask = uint32(numChunks - 1) + for i := range pe.chunks { + pe.chunks[i] = 0 // Logic below relies on zero value as uninitialized + } + + // Insert each symbol, checking that there are no conflicts. + for _, c := range codes { + if pe.chunks[c.Sym&pe.chunkMask] > 0 { + // Collision found our "hash" table, so grow and try again. + numChunks <<= 1 + goto retry + } + pe.chunks[c.Sym&pe.chunkMask] = c.Val<> uint(c.Len) + } + return sum == 0 || len(pc) == 0 +} + +// checkPrefixes reports whether all codes have non-overlapping prefixes. +func (pc PrefixCodes) checkPrefixes() bool { + for i, c1 := range pc { + for j, c2 := range pc { + mask := uint32(1)< 0 { + c.Val = internal.ReverseUint32N(c.Val, uint(c.Len)) + if vals[c.Len].Cnt > 0 && vals[c.Len].Val+1 != c.Val { + return false + } + vals[c.Len].Val = c.Val + vals[c.Len].Cnt++ + } + } + + // Rule 2. + var last PrefixCode + for _, v := range vals { + if v.Cnt > 0 { + curVal := v.Val - v.Cnt + 1 + if last.Cnt != 0 && last.Val >= curVal { + return false + } + last = v + } + } + return true +} + +// GenerateLengths assigns non-zero bit-lengths to all codes. Codes with high +// frequency counts will be assigned shorter codes to reduce bit entropy. +// This function is used primarily by compressors. +// +// The input codes must have the Cnt field populated, be sorted by count. +// Even if a code has a count of 0, a non-zero bit-length will be assigned. +// +// The result will have the Len field populated. The algorithm used guarantees +// that Len <= maxBits and that it is a complete prefix tree. The resulting +// codes will remain sorted by count. +func GenerateLengths(codes PrefixCodes, maxBits uint) error { + if len(codes) <= 1 { + if len(codes) == 1 { + codes[0].Len = 0 + } + return nil + } + + // Verify that the codes are in ascending order by count. + cntLast := codes[0].Cnt + for _, c := range codes[1:] { + if c.Cnt < cntLast { + return errorf(errors.Invalid, "non-monotonically increasing symbol counts") + } + cntLast = c.Cnt + } + + // Construct a Huffman tree used to generate the bit-lengths. + // + // The Huffman tree is a binary tree where each symbol lies as a leaf node + // on this tree. The length of the prefix code to assign is the depth of + // that leaf from the root. The Huffman algorithm, which runs in O(n), + // is used to generate the tree. It assumes that codes are sorted in + // increasing order of frequency. + // + // The algorithm is as follows: + // 1. Start with two queues, F and Q, where F contains all of the starting + // symbols sorted such that symbols with lowest counts come first. + // 2. While len(F)+len(Q) > 1: + // 2a. Dequeue the node from F or Q that has the lowest weight as N0. + // 2b. Dequeue the node from F or Q that has the lowest weight as N1. + // 2c. Create a new node N that has N0 and N1 as its children. + // 2d. Enqueue N into the back of Q. + // 3. The tree's root node is Q[0]. + type node struct { + cnt uint32 + + // n0 or c0 represent the left child of this node. + // Since Go does not have unions, only one of these will be set. + // Similarly, n1 or c1 represent the right child of this node. + // + // If n0 or n1 is set, then it represents a "pointer" to another + // node in the Huffman tree. Since Go's pointer analysis cannot reason + // that these node pointers do not escape (golang.org/issue/13493), + // we use an index to a node in the nodes slice as a pseudo-pointer. + // + // If c0 or c1 is set, then it represents a leaf "node" in the + // Huffman tree. The leaves are the PrefixCode values themselves. + n0, n1 int // Index to child nodes + c0, c1 *PrefixCode + } + var nodeIdx int + var nodeArr [1024]node // Large enough to handle most cases on the stack + nodes := nodeArr[:] + if len(nodes) < len(codes) { + nodes = make([]node, len(codes)) // Number of internal nodes < number of leaves + } + freqs, queue := codes, nodes[:0] + for len(freqs)+len(queue) > 1 { + // These are the two smallest nodes at the front of freqs and queue. + var n node + if len(queue) == 0 || (len(freqs) > 0 && freqs[0].Cnt <= queue[0].cnt) { + n.c0, freqs = &freqs[0], freqs[1:] + n.cnt += n.c0.Cnt + } else { + n.cnt += queue[0].cnt + n.n0 = nodeIdx // nodeIdx is same as &queue[0] - &nodes[0] + nodeIdx++ + queue = queue[1:] + } + if len(queue) == 0 || (len(freqs) > 0 && freqs[0].Cnt <= queue[0].cnt) { + n.c1, freqs = &freqs[0], freqs[1:] + n.cnt += n.c1.Cnt + } else { + n.cnt += queue[0].cnt + n.n1 = nodeIdx // nodeIdx is same as &queue[0] - &nodes[0] + nodeIdx++ + queue = queue[1:] + } + queue = append(queue, n) + } + rootIdx := nodeIdx + + // Search the whole binary tree, noting when we hit each leaf node. + // We do not care about the exact Huffman tree structure, but rather we only + // care about depth of each of the leaf nodes. That is, the depth determines + // how long each symbol is in bits. + // + // Since the number of leaves is n, there is at most n internal nodes. + // Thus, this algorithm runs in O(n). + var fixBits bool + var explore func(int, uint) + explore = func(rootIdx int, level uint) { + root := &nodes[rootIdx] + + // Explore left branch. + if root.c0 == nil { + explore(root.n0, level+1) + } else { + fixBits = fixBits || (level > maxBits) + root.c0.Len = uint32(level) + } + + // Explore right branch. + if root.c1 == nil { + explore(root.n1, level+1) + } else { + fixBits = fixBits || (level > maxBits) + root.c1.Len = uint32(level) + } + } + explore(rootIdx, 1) + + // Fix the bit-lengths if we violate the maxBits requirement. + if fixBits { + // Create histogram for number of symbols with each bit-length. + var symBitsArr [valueBits + 1]uint32 + symBits := symBitsArr[:] // symBits[nb] indicates number of symbols using nb bits + for _, c := range codes { + for int(c.Len) >= len(symBits) { + symBits = append(symBits, 0) + } + symBits[c.Len]++ + } + + // Fudge the tree such that the largest bit-length is <= maxBits. + // This is accomplish by effectively doing a tree rotation. That is, we + // increase the bit-length of some higher frequency code, so that the + // bit-lengths of lower frequency codes can be decreased. + // + // Visually, this looks like the following transform: + // + // Level Before After + // __ ___ + // / \ / \ + // n-1 X / \ /\ /\ + // n X /\ X X X X + // n+1 X X + // + var treeRotate func(uint) + treeRotate = func(nb uint) { + if symBits[nb-1] == 0 { + treeRotate(nb - 1) + } + symBits[nb-1] -= 1 // Push this node to the level below + symBits[nb] += 3 // This level gets one node from above, two from below + symBits[nb+1] -= 2 // Push two nodes to the level above + } + for i := uint(len(symBits)) - 1; i > maxBits; i-- { + for symBits[i] > 0 { + treeRotate(i - 1) + } + } + + // Assign bit-lengths to each code. Since codes is sorted in increasing + // order of frequency, that means that the most frequently used symbols + // should have the shortest bit-lengths. Thus, we copy symbols to codes + // from the back of codes first. + cs := codes + for nb, cnt := range symBits { + if cnt > 0 { + pos := len(cs) - int(cnt) + cs2 := cs[pos:] + for i := range cs2 { + cs2[i].Len = uint32(nb) + } + cs = cs[:pos] + } + } + if len(cs) != 0 { + panic("not all codes were used up") + } + } + + if internal.Debug && !codes.checkLengths() { + panic("incomplete prefix tree detected") + } + return nil +} + +// GeneratePrefixes assigns a prefix value to all codes according to the +// bit-lengths. This function is used by both compressors and decompressors. +// +// The input codes must have the Sym and Len fields populated and be +// sorted by symbol. The bit-lengths of each code must be properly allocated, +// such that it forms a complete tree. +// +// The result will have the Val field populated and will produce a canonical +// prefix tree. The resulting codes will remain sorted by symbol. +func GeneratePrefixes(codes PrefixCodes) error { + if len(codes) <= 1 { + if len(codes) == 1 { + if codes[0].Len != 0 { + return errorf(errors.Invalid, "degenerate prefix tree with one node") + } + codes[0].Val = 0 + } + return nil + } + + // Compute basic statistics on the symbols. + var bitCnts [valueBits + 1]uint + c0 := codes[0] + bitCnts[c0.Len]++ + minBits, maxBits, symLast := c0.Len, c0.Len, c0.Sym + for _, c := range codes[1:] { + if c.Sym <= symLast { + return errorf(errors.Invalid, "non-unique or non-monotonically increasing symbols") + } + if minBits > c.Len { + minBits = c.Len + } + if maxBits < c.Len { + maxBits = c.Len + } + bitCnts[c.Len]++ // Histogram of bit counts + symLast = c.Sym // Keep track of last symbol + } + if minBits == 0 { + return errorf(errors.Invalid, "invalid prefix bit-length") + } + + // Compute the next code for a symbol of a given bit length. + var nextCodes [valueBits + 1]uint + var code uint + for i := minBits; i <= maxBits; i++ { + code <<= 1 + nextCodes[i] = code + code += bitCnts[i] + } + if code != 1<= n { + return s[:n] + } + return make([]uint32, n, n*3/2) +} + +func extendSliceUint32s(s [][]uint32, n int) [][]uint32 { + if cap(s) >= n { + return s[:n] + } + ss := make([][]uint32, n, n*3/2) + copy(ss, s[:cap(s)]) + return ss +} diff --git a/vendor/github.com/dsnet/compress/internal/prefix/range.go b/vendor/github.com/dsnet/compress/internal/prefix/range.go new file mode 100644 index 00000000..b7eddad5 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/range.go @@ -0,0 +1,93 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package prefix + +type RangeCode struct { + Base uint32 // Starting base offset of the range + Len uint32 // Bit-length of a subsequent integer to add to base offset +} +type RangeCodes []RangeCode + +type RangeEncoder struct { + rcs RangeCodes + lut [1024]uint32 + minBase uint +} + +// End reports the non-inclusive ending range. +func (rc RangeCode) End() uint32 { return rc.Base + (1 << rc.Len) } + +// MakeRangeCodes creates a RangeCodes, where each region is assumed to be +// contiguously stacked, without any gaps, with bit-lengths taken from bits. +func MakeRangeCodes(minBase uint, bits []uint) (rc RangeCodes) { + for _, nb := range bits { + rc = append(rc, RangeCode{Base: uint32(minBase), Len: uint32(nb)}) + minBase += 1 << nb + } + return rc +} + +// Base reports the inclusive starting range for all ranges. +func (rcs RangeCodes) Base() uint32 { return rcs[0].Base } + +// End reports the non-inclusive ending range for all ranges. +func (rcs RangeCodes) End() uint32 { return rcs[len(rcs)-1].End() } + +// checkValid reports whether the RangeCodes is valid. In order to be valid, +// the following must hold true: +// rcs[i-1].Base <= rcs[i].Base +// rcs[i-1].End <= rcs[i].End +// rcs[i-1].End >= rcs[i].Base +// +// Practically speaking, each range must be increasing and must not have any +// gaps in between. It is okay for ranges to overlap. +func (rcs RangeCodes) checkValid() bool { + if len(rcs) == 0 { + return false + } + pre := rcs[0] + for _, cur := range rcs[1:] { + preBase, preEnd := pre.Base, pre.End() + curBase, curEnd := cur.Base, cur.End() + if preBase > curBase || preEnd > curEnd || preEnd < curBase { + return false + } + pre = cur + } + return true +} + +func (re *RangeEncoder) Init(rcs RangeCodes) { + if !rcs.checkValid() { + panic("invalid range codes") + } + *re = RangeEncoder{rcs: rcs, minBase: uint(rcs.Base())} + for sym, rc := range rcs { + base := int(rc.Base) - int(re.minBase) + end := int(rc.End()) - int(re.minBase) + if base >= len(re.lut) { + break + } + if end > len(re.lut) { + end = len(re.lut) + } + for i := base; i < end; i++ { + re.lut[i] = uint32(sym) + } + } +} + +func (re *RangeEncoder) Encode(offset uint) (sym uint) { + if idx := int(offset - re.minBase); idx < len(re.lut) { + return uint(re.lut[idx]) + } + sym = uint(re.lut[len(re.lut)-1]) +retry: + if int(sym) >= len(re.rcs) || re.rcs[sym].Base > uint32(offset) { + return sym - 1 + } + sym++ + goto retry // Avoid for-loop so that this function can be inlined +} diff --git a/vendor/github.com/dsnet/compress/internal/prefix/reader.go b/vendor/github.com/dsnet/compress/internal/prefix/reader.go new file mode 100644 index 00000000..e6252c95 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/reader.go @@ -0,0 +1,335 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package prefix + +import ( + "bufio" + "bytes" + "encoding/binary" + "io" + "strings" + + "github.com/dsnet/compress" + "github.com/dsnet/compress/internal" + "github.com/dsnet/compress/internal/errors" +) + +// Reader implements a prefix decoder. If the input io.Reader satisfies the +// compress.ByteReader or compress.BufferedReader interface, then it also +// guarantees that it will never read more bytes than is necessary. +// +// For high performance, provide an io.Reader that satisfies the +// compress.BufferedReader interface. If the input does not satisfy either +// compress.ByteReader or compress.BufferedReader, then it will be internally +// wrapped with a bufio.Reader. +type Reader struct { + Offset int64 // Number of bytes read from the underlying io.Reader + + rd io.Reader + byteRd compress.ByteReader // Set if rd is a ByteReader + bufRd compress.BufferedReader // Set if rd is a BufferedReader + + bufBits uint64 // Buffer to hold some bits + numBits uint // Number of valid bits in bufBits + bigEndian bool // Do we treat input bytes as big endian? + + // These fields are only used if rd is a compress.BufferedReader. + bufPeek []byte // Buffer for the Peek data + discardBits int // Number of bits to discard from reader + fedBits uint // Number of bits fed in last call to PullBits + + // These fields are used to reduce allocations. + bb *buffer + br *bytesReader + sr *stringReader + bu *bufio.Reader +} + +// Init initializes the bit Reader to read from r. If bigEndian is true, then +// bits will be read starting from the most-significant bits of a byte +// (as done in bzip2), otherwise it will read starting from the +// least-significant bits of a byte (such as for deflate and brotli). +func (pr *Reader) Init(r io.Reader, bigEndian bool) { + *pr = Reader{ + rd: r, + bigEndian: bigEndian, + + bb: pr.bb, + br: pr.br, + sr: pr.sr, + bu: pr.bu, + } + switch rr := r.(type) { + case *bytes.Buffer: + if pr.bb == nil { + pr.bb = new(buffer) + } + *pr.bb = buffer{Buffer: rr} + pr.bufRd = pr.bb + case *bytes.Reader: + if pr.br == nil { + pr.br = new(bytesReader) + } + *pr.br = bytesReader{Reader: rr} + pr.bufRd = pr.br + case *strings.Reader: + if pr.sr == nil { + pr.sr = new(stringReader) + } + *pr.sr = stringReader{Reader: rr} + pr.bufRd = pr.sr + case compress.BufferedReader: + pr.bufRd = rr + case compress.ByteReader: + pr.byteRd = rr + default: + if pr.bu == nil { + pr.bu = bufio.NewReader(nil) + } + pr.bu.Reset(r) + pr.rd, pr.bufRd = pr.bu, pr.bu + } +} + +// BitsRead reports the total number of bits emitted from any Read method. +func (pr *Reader) BitsRead() int64 { + offset := 8*pr.Offset - int64(pr.numBits) + if pr.bufRd != nil { + discardBits := pr.discardBits + int(pr.fedBits-pr.numBits) + offset = 8*pr.Offset + int64(discardBits) + } + return offset +} + +// IsBufferedReader reports whether the underlying io.Reader is also a +// compress.BufferedReader. +func (pr *Reader) IsBufferedReader() bool { + return pr.bufRd != nil +} + +// ReadPads reads 0-7 bits from the bit buffer to achieve byte-alignment. +func (pr *Reader) ReadPads() uint { + nb := pr.numBits % 8 + val := uint(pr.bufBits & uint64(1<>= nb + pr.numBits -= nb + return val +} + +// Read reads bytes into buf. +// The bit-ordering mode does not affect this method. +func (pr *Reader) Read(buf []byte) (cnt int, err error) { + if pr.numBits > 0 { + if pr.numBits%8 != 0 { + return 0, errorf(errors.Invalid, "non-aligned bit buffer") + } + for cnt = 0; len(buf) > cnt && pr.numBits > 0; cnt++ { + if pr.bigEndian { + buf[cnt] = internal.ReverseLUT[byte(pr.bufBits)] + } else { + buf[cnt] = byte(pr.bufBits) + } + pr.bufBits >>= 8 + pr.numBits -= 8 + } + return cnt, nil + } + if _, err := pr.Flush(); err != nil { + return 0, err + } + cnt, err = pr.rd.Read(buf) + pr.Offset += int64(cnt) + return cnt, err +} + +// ReadOffset reads an offset value using the provided RangeCodes indexed by +// the symbol read. +func (pr *Reader) ReadOffset(pd *Decoder, rcs RangeCodes) uint { + rc := rcs[pr.ReadSymbol(pd)] + return uint(rc.Base) + pr.ReadBits(uint(rc.Len)) +} + +// TryReadBits attempts to read nb bits using the contents of the bit buffer +// alone. It returns the value and whether it succeeded. +// +// This method is designed to be inlined for performance reasons. +func (pr *Reader) TryReadBits(nb uint) (uint, bool) { + if pr.numBits < nb { + return 0, false + } + val := uint(pr.bufBits & uint64(1<>= nb + pr.numBits -= nb + return val, true +} + +// ReadBits reads nb bits in from the underlying reader. +func (pr *Reader) ReadBits(nb uint) uint { + if err := pr.PullBits(nb); err != nil { + errors.Panic(err) + } + val := uint(pr.bufBits & uint64(1<>= nb + pr.numBits -= nb + return val +} + +// TryReadSymbol attempts to decode the next symbol using the contents of the +// bit buffer alone. It returns the decoded symbol and whether it succeeded. +// +// This method is designed to be inlined for performance reasons. +func (pr *Reader) TryReadSymbol(pd *Decoder) (uint, bool) { + if pr.numBits < uint(pd.MinBits) || len(pd.chunks) == 0 { + return 0, false + } + chunk := pd.chunks[uint32(pr.bufBits)&pd.chunkMask] + nb := uint(chunk & countMask) + if nb > pr.numBits || nb > uint(pd.chunkBits) { + return 0, false + } + pr.bufBits >>= nb + pr.numBits -= nb + return uint(chunk >> countBits), true +} + +// ReadSymbol reads the next symbol using the provided prefix Decoder. +func (pr *Reader) ReadSymbol(pd *Decoder) uint { + if len(pd.chunks) == 0 { + panicf(errors.Invalid, "decode with empty prefix tree") + } + + nb := uint(pd.MinBits) + for { + if err := pr.PullBits(nb); err != nil { + errors.Panic(err) + } + chunk := pd.chunks[uint32(pr.bufBits)&pd.chunkMask] + nb = uint(chunk & countMask) + if nb > uint(pd.chunkBits) { + linkIdx := chunk >> countBits + chunk = pd.links[linkIdx][uint32(pr.bufBits>>pd.chunkBits)&pd.linkMask] + nb = uint(chunk & countMask) + } + if nb <= pr.numBits { + pr.bufBits >>= nb + pr.numBits -= nb + return uint(chunk >> countBits) + } + } +} + +// Flush updates the read offset of the underlying ByteReader. +// If reader is a compress.BufferedReader, then this calls Discard to update +// the read offset. +func (pr *Reader) Flush() (int64, error) { + if pr.bufRd == nil { + return pr.Offset, nil + } + + // Update the number of total bits to discard. + pr.discardBits += int(pr.fedBits - pr.numBits) + pr.fedBits = pr.numBits + + // Discard some bytes to update read offset. + var err error + nd := (pr.discardBits + 7) / 8 // Round up to nearest byte + nd, err = pr.bufRd.Discard(nd) + pr.discardBits -= nd * 8 // -7..0 + pr.Offset += int64(nd) + + // These are invalid after Discard. + pr.bufPeek = nil + return pr.Offset, err +} + +// PullBits ensures that at least nb bits exist in the bit buffer. +// If the underlying reader is a compress.BufferedReader, then this will fill +// the bit buffer with as many bits as possible, relying on Peek and Discard to +// properly advance the read offset. Otherwise, it will use ReadByte to fill the +// buffer with just the right number of bits. +func (pr *Reader) PullBits(nb uint) error { + if pr.bufRd != nil { + pr.discardBits += int(pr.fedBits - pr.numBits) + for { + if len(pr.bufPeek) == 0 { + pr.fedBits = pr.numBits // Don't discard bits just added + if _, err := pr.Flush(); err != nil { + return err + } + + // Peek no more bytes than necessary. + // The computation for cntPeek computes the minimum number of + // bytes to Peek to fill nb bits. + var err error + cntPeek := int(nb+(-nb&7)) / 8 + if cntPeek < pr.bufRd.Buffered() { + cntPeek = pr.bufRd.Buffered() + } + pr.bufPeek, err = pr.bufRd.Peek(cntPeek) + pr.bufPeek = pr.bufPeek[int(pr.numBits/8):] // Skip buffered bits + if len(pr.bufPeek) == 0 { + if pr.numBits >= nb { + break + } + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + } + + n := int(64-pr.numBits) / 8 // Number of bytes to copy to bit buffer + if len(pr.bufPeek) >= 8 { + // Starting with Go 1.7, the compiler should use a wide integer + // load here if the architecture supports it. + u := binary.LittleEndian.Uint64(pr.bufPeek) + if pr.bigEndian { + // Swap all the bits within each byte. + u = (u&0xaaaaaaaaaaaaaaaa)>>1 | (u&0x5555555555555555)<<1 + u = (u&0xcccccccccccccccc)>>2 | (u&0x3333333333333333)<<2 + u = (u&0xf0f0f0f0f0f0f0f0)>>4 | (u&0x0f0f0f0f0f0f0f0f)<<4 + } + + pr.bufBits |= u << pr.numBits + pr.numBits += uint(n * 8) + pr.bufPeek = pr.bufPeek[n:] + break + } else { + if n > len(pr.bufPeek) { + n = len(pr.bufPeek) + } + for _, c := range pr.bufPeek[:n] { + if pr.bigEndian { + c = internal.ReverseLUT[c] + } + pr.bufBits |= uint64(c) << pr.numBits + pr.numBits += 8 + } + pr.bufPeek = pr.bufPeek[n:] + if pr.numBits > 56 { + break + } + } + } + pr.fedBits = pr.numBits + } else { + for pr.numBits < nb { + c, err := pr.byteRd.ReadByte() + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + if pr.bigEndian { + c = internal.ReverseLUT[c] + } + pr.bufBits |= uint64(c) << pr.numBits + pr.numBits += 8 + pr.Offset++ + } + } + return nil +} diff --git a/vendor/github.com/dsnet/compress/internal/prefix/wrap.go b/vendor/github.com/dsnet/compress/internal/prefix/wrap.go new file mode 100644 index 00000000..49906d4a --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/wrap.go @@ -0,0 +1,146 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package prefix + +import ( + "bytes" + "io" + "strings" +) + +// For some of the common Readers, we wrap and extend them to satisfy the +// compress.BufferedReader interface to improve performance. + +type buffer struct { + *bytes.Buffer +} + +type bytesReader struct { + *bytes.Reader + pos int64 + buf []byte + arr [512]byte +} + +type stringReader struct { + *strings.Reader + pos int64 + buf []byte + arr [512]byte +} + +func (r *buffer) Buffered() int { + return r.Len() +} + +func (r *buffer) Peek(n int) ([]byte, error) { + b := r.Bytes() + if len(b) < n { + return b, io.EOF + } + return b[:n], nil +} + +func (r *buffer) Discard(n int) (int, error) { + b := r.Next(n) + if len(b) < n { + return len(b), io.EOF + } + return n, nil +} + +func (r *bytesReader) Buffered() int { + r.update() + if r.Len() > len(r.buf) { + return len(r.buf) + } + return r.Len() +} + +func (r *bytesReader) Peek(n int) ([]byte, error) { + if n > len(r.arr) { + return nil, io.ErrShortBuffer + } + + // Return sub-slice of local buffer if possible. + r.update() + if len(r.buf) >= n { + return r.buf[:n], nil + } + + // Fill entire local buffer, and return appropriate sub-slice. + cnt, err := r.ReadAt(r.arr[:], r.pos) + r.buf = r.arr[:cnt] + if cnt < n { + return r.arr[:cnt], err + } + return r.arr[:n], nil +} + +func (r *bytesReader) Discard(n int) (int, error) { + var err error + if n > r.Len() { + n, err = r.Len(), io.EOF + } + r.Seek(int64(n), io.SeekCurrent) + return n, err +} + +// update reslices the internal buffer to be consistent with the read offset. +func (r *bytesReader) update() { + pos, _ := r.Seek(0, io.SeekCurrent) + if off := pos - r.pos; off >= 0 && off < int64(len(r.buf)) { + r.buf, r.pos = r.buf[off:], pos + } else { + r.buf, r.pos = nil, pos + } +} + +func (r *stringReader) Buffered() int { + r.update() + if r.Len() > len(r.buf) { + return len(r.buf) + } + return r.Len() +} + +func (r *stringReader) Peek(n int) ([]byte, error) { + if n > len(r.arr) { + return nil, io.ErrShortBuffer + } + + // Return sub-slice of local buffer if possible. + r.update() + if len(r.buf) >= n { + return r.buf[:n], nil + } + + // Fill entire local buffer, and return appropriate sub-slice. + cnt, err := r.ReadAt(r.arr[:], r.pos) + r.buf = r.arr[:cnt] + if cnt < n { + return r.arr[:cnt], err + } + return r.arr[:n], nil +} + +func (r *stringReader) Discard(n int) (int, error) { + var err error + if n > r.Len() { + n, err = r.Len(), io.EOF + } + r.Seek(int64(n), io.SeekCurrent) + return n, err +} + +// update reslices the internal buffer to be consistent with the read offset. +func (r *stringReader) update() { + pos, _ := r.Seek(0, io.SeekCurrent) + if off := pos - r.pos; off >= 0 && off < int64(len(r.buf)) { + r.buf, r.pos = r.buf[off:], pos + } else { + r.buf, r.pos = nil, pos + } +} diff --git a/vendor/github.com/dsnet/compress/internal/prefix/writer.go b/vendor/github.com/dsnet/compress/internal/prefix/writer.go new file mode 100644 index 00000000..c9783905 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/writer.go @@ -0,0 +1,166 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package prefix + +import ( + "encoding/binary" + "io" + + "github.com/dsnet/compress/internal/errors" +) + +// Writer implements a prefix encoder. For performance reasons, Writer will not +// write bytes immediately to the underlying stream. +type Writer struct { + Offset int64 // Number of bytes written to the underlying io.Writer + + wr io.Writer + bufBits uint64 // Buffer to hold some bits + numBits uint // Number of valid bits in bufBits + bigEndian bool // Are bits written in big-endian order? + + buf [512]byte + cntBuf int +} + +// Init initializes the bit Writer to write to w. If bigEndian is true, then +// bits will be written starting from the most-significant bits of a byte +// (as done in bzip2), otherwise it will write starting from the +// least-significant bits of a byte (such as for deflate and brotli). +func (pw *Writer) Init(w io.Writer, bigEndian bool) { + *pw = Writer{wr: w, bigEndian: bigEndian} + return +} + +// BitsWritten reports the total number of bits issued to any Write method. +func (pw *Writer) BitsWritten() int64 { + return 8*pw.Offset + 8*int64(pw.cntBuf) + int64(pw.numBits) +} + +// WritePads writes 0-7 bits to the bit buffer to achieve byte-alignment. +func (pw *Writer) WritePads(v uint) { + nb := -pw.numBits & 7 + pw.bufBits |= uint64(v) << pw.numBits + pw.numBits += nb +} + +// Write writes bytes from buf. +// The bit-ordering mode does not affect this method. +func (pw *Writer) Write(buf []byte) (cnt int, err error) { + if pw.numBits > 0 || pw.cntBuf > 0 { + if pw.numBits%8 != 0 { + return 0, errorf(errors.Invalid, "non-aligned bit buffer") + } + if _, err := pw.Flush(); err != nil { + return 0, err + } + } + cnt, err = pw.wr.Write(buf) + pw.Offset += int64(cnt) + return cnt, err +} + +// WriteOffset writes ofs in a (sym, extra) fashion using the provided prefix +// Encoder and RangeEncoder. +func (pw *Writer) WriteOffset(ofs uint, pe *Encoder, re *RangeEncoder) { + sym := re.Encode(ofs) + pw.WriteSymbol(sym, pe) + rc := re.rcs[sym] + pw.WriteBits(ofs-uint(rc.Base), uint(rc.Len)) +} + +// TryWriteBits attempts to write nb bits using the contents of the bit buffer +// alone. It reports whether it succeeded. +// +// This method is designed to be inlined for performance reasons. +func (pw *Writer) TryWriteBits(v, nb uint) bool { + if 64-pw.numBits < nb { + return false + } + pw.bufBits |= uint64(v) << pw.numBits + pw.numBits += nb + return true +} + +// WriteBits writes nb bits of v to the underlying writer. +func (pw *Writer) WriteBits(v, nb uint) { + if _, err := pw.PushBits(); err != nil { + errors.Panic(err) + } + pw.bufBits |= uint64(v) << pw.numBits + pw.numBits += nb +} + +// TryWriteSymbol attempts to encode the next symbol using the contents of the +// bit buffer alone. It reports whether it succeeded. +// +// This method is designed to be inlined for performance reasons. +func (pw *Writer) TryWriteSymbol(sym uint, pe *Encoder) bool { + chunk := pe.chunks[uint32(sym)&pe.chunkMask] + nb := uint(chunk & countMask) + if 64-pw.numBits < nb { + return false + } + pw.bufBits |= uint64(chunk>>countBits) << pw.numBits + pw.numBits += nb + return true +} + +// WriteSymbol writes the symbol using the provided prefix Encoder. +func (pw *Writer) WriteSymbol(sym uint, pe *Encoder) { + if _, err := pw.PushBits(); err != nil { + errors.Panic(err) + } + chunk := pe.chunks[uint32(sym)&pe.chunkMask] + nb := uint(chunk & countMask) + pw.bufBits |= uint64(chunk>>countBits) << pw.numBits + pw.numBits += nb +} + +// Flush flushes all complete bytes from the bit buffer to the byte buffer, and +// then flushes all bytes in the byte buffer to the underlying writer. +// After this call, the bit Writer is will only withhold 7 bits at most. +func (pw *Writer) Flush() (int64, error) { + if pw.numBits < 8 && pw.cntBuf == 0 { + return pw.Offset, nil + } + if _, err := pw.PushBits(); err != nil { + return pw.Offset, err + } + cnt, err := pw.wr.Write(pw.buf[:pw.cntBuf]) + pw.cntBuf -= cnt + pw.Offset += int64(cnt) + return pw.Offset, err +} + +// PushBits pushes as many bytes as possible from the bit buffer to the byte +// buffer, reporting the number of bits pushed. +func (pw *Writer) PushBits() (uint, error) { + if pw.cntBuf >= len(pw.buf)-8 { + cnt, err := pw.wr.Write(pw.buf[:pw.cntBuf]) + pw.cntBuf -= cnt + pw.Offset += int64(cnt) + if err != nil { + return 0, err + } + } + + u := pw.bufBits + if pw.bigEndian { + // Swap all the bits within each byte. + u = (u&0xaaaaaaaaaaaaaaaa)>>1 | (u&0x5555555555555555)<<1 + u = (u&0xcccccccccccccccc)>>2 | (u&0x3333333333333333)<<2 + u = (u&0xf0f0f0f0f0f0f0f0)>>4 | (u&0x0f0f0f0f0f0f0f0f)<<4 + } + // Starting with Go 1.7, the compiler should use a wide integer + // store here if the architecture supports it. + binary.LittleEndian.PutUint64(pw.buf[pw.cntBuf:], u) + + nb := pw.numBits / 8 // Number of bytes to copy from bit buffer + pw.cntBuf += int(nb) + pw.bufBits >>= 8 * nb + pw.numBits -= 8 * nb + return 8 * nb, nil +} diff --git a/vendor/github.com/dsnet/compress/internal/release.go b/vendor/github.com/dsnet/compress/internal/release.go new file mode 100644 index 00000000..0990be1c --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/release.go @@ -0,0 +1,21 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !debug,!gofuzz + +package internal + +// Debug indicates whether the debug build tag was set. +// +// If set, programs may choose to print with more human-readable +// debug information and also perform sanity checks that would otherwise be too +// expensive to run in a release build. +const Debug = false + +// GoFuzz indicates whether the gofuzz build tag was set. +// +// If set, programs may choose to disable certain checks (like checksums) that +// would be nearly impossible for gofuzz to properly get right. +// If GoFuzz is set, it implies that Debug is set as well. +const GoFuzz = false diff --git a/vendor/github.com/dsnet/compress/zbench.sh b/vendor/github.com/dsnet/compress/zbench.sh new file mode 100755 index 00000000..0205920d --- /dev/null +++ b/vendor/github.com/dsnet/compress/zbench.sh @@ -0,0 +1,12 @@ +#!/bin/bash +# +# Copyright 2017, Joe Tsai. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE.md file. + +# zbench wraps internal/tool/bench and is useful for comparing benchmarks from +# the implementations in this repository relative to other implementations. +# +# See internal/tool/bench/main.go for more details. +cd $(dirname "${BASH_SOURCE[0]}")/internal/tool/bench +go run $(go list -f '{{ join .GoFiles "\n" }}') "$@" diff --git a/vendor/github.com/dsnet/compress/zfuzz.sh b/vendor/github.com/dsnet/compress/zfuzz.sh new file mode 100755 index 00000000..42958ed4 --- /dev/null +++ b/vendor/github.com/dsnet/compress/zfuzz.sh @@ -0,0 +1,10 @@ +#!/bin/bash +# +# Copyright 2017, Joe Tsai. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE.md file. + +# zfuzz wraps internal/tool/fuzz and is useful for fuzz testing each of +# the implementations in this repository. +cd $(dirname "${BASH_SOURCE[0]}")/internal/tool/fuzz +./fuzz.sh "$@" diff --git a/vendor/github.com/dsnet/compress/zprof.sh b/vendor/github.com/dsnet/compress/zprof.sh new file mode 100755 index 00000000..3cd535be --- /dev/null +++ b/vendor/github.com/dsnet/compress/zprof.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# +# Copyright 2017, Joe Tsai. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE.md file. + +if [ $# == 0 ]; then + echo "Usage: $0 PKG_PATH TEST_ARGS..." + echo "" + echo "Runs coverage and performance benchmarks for a given package." + echo "The results are stored in the _zprof_ directory." + echo "" + echo "Example:" + echo " $0 flate -test.bench=Decode/Twain/Default" + exit 1 +fi + +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PKG_PATH=$1 +PKG_NAME=$(basename $PKG_PATH) +shift + +TMPDIR=$(mktemp -d) +trap "rm -rf $TMPDIR $PKG_PATH/$PKG_NAME.test" SIGINT SIGTERM EXIT + +( + cd $DIR/$PKG_PATH + + # Print the go version. + go version + + # Perform coverage profiling. + go test github.com/dsnet/compress/$PKG_PATH -coverprofile $TMPDIR/cover.profile + if [ $? != 0 ]; then exit 1; fi + go tool cover -html $TMPDIR/cover.profile -o cover.html + + # Perform performance profiling. + if [ $# != 0 ]; then + go test -c github.com/dsnet/compress/$PKG_PATH + if [ $? != 0 ]; then exit 1; fi + ./$PKG_NAME.test -test.cpuprofile $TMPDIR/cpu.profile -test.memprofile $TMPDIR/mem.profile -test.run - "$@" + PPROF="go tool pprof" + $PPROF -output=cpu.svg -web $PKG_NAME.test $TMPDIR/cpu.profile 2> /dev/null + $PPROF -output=cpu.html -weblist=. $PKG_NAME.test $TMPDIR/cpu.profile 2> /dev/null + $PPROF -output=mem_objects.svg -alloc_objects -web $PKG_NAME.test $TMPDIR/mem.profile 2> /dev/null + $PPROF -output=mem_objects.html -alloc_objects -weblist=. $PKG_NAME.test $TMPDIR/mem.profile 2> /dev/null + $PPROF -output=mem_space.svg -alloc_space -web $PKG_NAME.test $TMPDIR/mem.profile 2> /dev/null + $PPROF -output=mem_space.html -alloc_space -weblist=. $PKG_NAME.test $TMPDIR/mem.profile 2> /dev/null + fi + + rm -rf $DIR/_zprof_/$PKG_NAME + mkdir -p $DIR/_zprof_/$PKG_NAME + mv *.html *.svg $DIR/_zprof_/$PKG_NAME 2> /dev/null +) diff --git a/vendor/github.com/dsnet/compress/ztest.sh b/vendor/github.com/dsnet/compress/ztest.sh new file mode 100755 index 00000000..d4c19932 --- /dev/null +++ b/vendor/github.com/dsnet/compress/ztest.sh @@ -0,0 +1,50 @@ +#!/bin/bash +# +# Copyright 2017, Joe Tsai. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE.md file. + +cd $(go list -f '{{ .Dir }}' github.com/dsnet/compress) + +BOLD="\x1b[1mRunning: " +PASS="\x1b[32mPASS" +FAIL="\x1b[31mFAIL" +RESET="\x1b[0m" + +echo -e "${BOLD}fmt${RESET}" +RET_FMT=$(find . -name "*.go" | egrep -v "/(_.*_|\..*|testdata)/" | xargs gofmt -d) +if [[ ! -z "$RET_FMT" ]]; then echo "$RET_FMT"; echo; fi + +echo -e "${BOLD}test${RESET}" +RET_TEST=$(go test -race ./... | egrep -v "^(ok|[?])\s+") +if [[ ! -z "$RET_TEST" ]]; then echo "$RET_TEST"; echo; fi + +echo -e "${BOLD}staticcheck${RESET}" +RET_SCHK=$(staticcheck \ + -ignore " + github.com/dsnet/compress/internal/prefix/*.go:SA4016 + github.com/dsnet/compress/brotli/*.go:SA4016 + " ./... 2>&1) +if [[ ! -z "$RET_SCHK" ]]; then echo "$RET_SCHK"; echo; fi + +echo -e "${BOLD}vet${RESET}" +RET_VET=$(go vet ./... 2>&1 | + egrep -v "^flate/dict_decoder.go:(.*)WriteByte" | + egrep -v "^exit status") +if [[ ! -z "$RET_VET" ]]; then echo "$RET_VET"; echo; fi + +echo -e "${BOLD}lint${RESET}" +RET_LINT=$(golint ./... 2>&1 | + egrep -v "should have comment(.*)or be unexported" | + egrep -v "^(.*)type name will be used as(.*)by other packages" | + egrep -v "^brotli/transform.go:(.*)replace i [+]= 1 with i[+]{2}" | + egrep -v "^internal/prefix/prefix.go:(.*)replace symBits(.*) [-]= 1 with symBits(.*)[-]{2}" | + egrep -v "^xflate/common.go:(.*)NoCompression should be of the form" | + egrep -v "^exit status") +if [[ ! -z "$RET_LINT" ]]; then echo "$RET_LINT"; echo; fi + +if [[ ! -z "$RET_FMT" ]] || [ ! -z "$RET_TEST" ] || [[ ! -z "$RET_VET" ]] || [[ ! -z "$RET_SCHK" ]] || [[ ! -z "$RET_LINT" ]] || [[ ! -z "$RET_SPELL" ]]; then + echo -e "${FAIL}${RESET}"; exit 1 +else + echo -e "${PASS}${RESET}"; exit 0 +fi diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS new file mode 100644 index 00000000..bcfa1952 --- /dev/null +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS new file mode 100644 index 00000000..931ae316 --- /dev/null +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE new file mode 100644 index 00000000..6050c10f --- /dev/null +++ b/vendor/github.com/golang/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README new file mode 100644 index 00000000..cea12879 --- /dev/null +++ b/vendor/github.com/golang/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go new file mode 100644 index 00000000..72efb035 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go new file mode 100644 index 00000000..fcd192b8 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 00000000..e6179f65 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 00000000..8c9f2049 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike + // the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + for end := d + length; d != end; d++ { + dst[d] = dst[d-offset] + } + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go new file mode 100644 index 00000000..8d393e90 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go new file mode 100644 index 00000000..150d91bc --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 00000000..adfd979f --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 00000000..dbcae905 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go new file mode 100644 index 00000000..0cf5e379 --- /dev/null +++ b/vendor/github.com/golang/snappy/snappy.go @@ -0,0 +1,87 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the snappy block-based compression format. +// It aims for very high speeds and reasonable compression. +// +// The C++ snappy implementation is at https://github.com/google/snappy +package snappy // import "github.com/golang/snappy" + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/magefile/mage/CONTRIBUTING.md b/vendor/github.com/magefile/mage/CONTRIBUTING.md new file mode 100644 index 00000000..e1394d20 --- /dev/null +++ b/vendor/github.com/magefile/mage/CONTRIBUTING.md @@ -0,0 +1,42 @@ +# Contributing + +Of course, contributions are more than welcome. Please read these guidelines for +making the process as painless as possible. + +## Discussion + +Development discussion should take place on the #mage channel of [gopher +slack](https://gophers.slack.com/). + +There is a separate #mage-dev channel that has the github app to post github +activity to the channel, to make it easy to follow. + +## Issues + +If there's an issue you'd like to work on, please comment on it, so we can +discuss approach, etc. and make sure no one else is currently working on that +issue. + +Please always create an issue before sending a PR unless it's an obvious typo +or other trivial change. + +## Dependency Management + +Currently mage has no dependencies(!) outside the standard libary. Let's keep +it that way. Since it's likely that mage will be vendored into a project, +adding dependencies to mage adds dependencies to every project that uses mage. + +## Versions + +Please avoid using features of go and the stdlib that prevent mage from being +buildable with older versions of Go. The CI tests currently check that mage is +buildable with go 1.7 and later. You may build with whatever version you like, +but CI has the final say. + +## Testing + +Please write tests for any new features. Tests must use the normal go testing +package. + +Tests must pass the race detector (run `go test -race ./...`). + diff --git a/vendor/github.com/magefile/mage/Gopkg.lock b/vendor/github.com/magefile/mage/Gopkg.lock new file mode 100644 index 00000000..bef2d009 --- /dev/null +++ b/vendor/github.com/magefile/mage/Gopkg.lock @@ -0,0 +1,9 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "ab4fef131ee828e96ba67d31a7d690bd5f2f42040c6766b1b12fe856f87e0ff7" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/magefile/mage/Gopkg.toml b/vendor/github.com/magefile/mage/Gopkg.toml new file mode 100644 index 00000000..9425a542 --- /dev/null +++ b/vendor/github.com/magefile/mage/Gopkg.toml @@ -0,0 +1,22 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + diff --git a/vendor/github.com/magefile/mage/LICENSE b/vendor/github.com/magefile/mage/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/github.com/magefile/mage/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/magefile/mage/README.md b/vendor/github.com/magefile/mage/README.md new file mode 100644 index 00000000..63826bb7 --- /dev/null +++ b/vendor/github.com/magefile/mage/README.md @@ -0,0 +1,61 @@ +

+ +## About [![Build Status](https://travis-ci.org/magefile/mage.svg?branch=master)](https://travis-ci.org/magefile/mage) + +Mage is a make/rake-like build tool using Go. You write plain-old go functions, +and Mage automatically uses them as Makefile-like runnable targets. + +## Installation + +Mage has no dependencies outside the Go standard library, and builds with Go 1.7 +and above (possibly even lower versions, but they're not regularly tested). + +Install mage by running + +``` +go get -u -d github.com/magefile/mage +cd $GOPATH/src/github.com/magefile/mage +go run bootstrap.go +``` + +This will download the code into your GOPATH, and then run the bootstrap script +to build mage with version infomation embedded in it. A normal `go get` +(without -d) will build the binary correctly, but no version info will be +embedded. If you've done this, no worries, just go to +$GOPATH/src/github.com/magefile/mage and run `mage install` or `go run +bootstrap.go` and a new binary will be created with the correct version +information. + +The mage binary will be created in your $GOPATH/bin directory. + +You may also install a binary release from our +[releases](https://github.com/magefile/mage/releases) page. + +## Demo + +[![Mage Demo](https://img.youtube.com/vi/GOqbD0lF-iA/maxresdefault.jpg)](https://www.youtube.com/watch?v=GOqbD0lF-iA) + +## Discussion + +Join the `#mage` channel on [gophers slack](https://gophers.slack.com/messages/general/) for discussion of usage, development, etc. + +# Documentation + +see [magefile.org](https://magefile.org) for full docs + +see [godoc.org/github.com/magefile/mage/mage](https://godoc.org/github.com/magefile/mage/mage) for how to use mage as a library. + +# Why? + +Makefiles are hard to read and hard to write. Mostly because makefiles are essentially fancy bash scripts with significant white space and additional make-related syntax. + +Mage lets you have multiple magefiles, name your magefiles whatever you +want, and they're easy to customize for multiple operating systems. Mage has no +dependencies (aside from go) and runs just fine on all major operating systems, whereas make generally uses bash which is not well supported on Windows. +Go is superior to bash for any non-trivial task involving branching, looping, anything that's not just straight line execution of commands. And if your project is written in Go, why introduce another +language as idiosyncratic as bash? Why not use the language your contributors +are already comfortable with? + +# TODO + +* File conversion tasks diff --git a/vendor/github.com/magefile/mage/bootstrap.go b/vendor/github.com/magefile/mage/bootstrap.go new file mode 100644 index 00000000..c37f6fc8 --- /dev/null +++ b/vendor/github.com/magefile/mage/bootstrap.go @@ -0,0 +1,19 @@ +//+build ignore + +package main + +import ( + "os" + + "github.com/magefile/mage/mage" +) + +// This is a bootstrap builder, to build mage when you don't already *have* mage. +// Run it like +// go run bootstrap.go +// and it will install mage with all the right flags created for you. + +func main() { + os.Args = []string{os.Args[0], "-v", "install"} + os.Exit(mage.Main()) +} diff --git a/vendor/github.com/magefile/mage/build/build.go b/vendor/github.com/magefile/mage/build/build.go new file mode 100644 index 00000000..64198e67 --- /dev/null +++ b/vendor/github.com/magefile/mage/build/build.go @@ -0,0 +1,1655 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package build + +import ( + "bytes" + "errors" + "fmt" + "go/ast" + "go/doc" + "go/parser" + "go/token" + "io" + "io/ioutil" + "log" + "os" + pathpkg "path" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A Context specifies the supporting context for a build. +type Context struct { + GOARCH string // target architecture + GOOS string // target operating system + GOROOT string // Go root + GOPATH string // Go path + CgoEnabled bool // whether cgo can be used + UseAllFiles bool // use files regardless of +build lines, file names + Compiler string // compiler to assume when computing target paths + + // RequiredTags lists tags that the must exist in a build tag in order for + // the file to be included in the build. If RequiredTags is empty, no tags + // are required. Note that this is mostly useful in filtering the list of + // files in a single directory. Using required tags across an entire + // compile step will likely exclude much, if not all of the standard library + // files. + RequiredTags []string + + // The build and release tags specify build constraints + // that should be considered satisfied when processing +build lines. + // Clients creating a new context may customize BuildTags, which + // defaults to empty, but it is usually an error to customize ReleaseTags, + // which defaults to the list of Go releases the current release is compatible with. + // In addition to the BuildTags and ReleaseTags, build constraints + // consider the values of GOARCH and GOOS as satisfied tags. + BuildTags []string + ReleaseTags []string + + // The install suffix specifies a suffix to use in the name of the installation + // directory. By default it is empty, but custom builds that need to keep + // their outputs separate can set InstallSuffix to do so. For example, when + // using the race detector, the go command uses InstallSuffix = "race", so + // that on a Linux/386 system, packages are written to a directory named + // "linux_386_race" instead of the usual "linux_386". + InstallSuffix string + + // By default, Import uses the operating system's file system calls + // to read directories and files. To read from other sources, + // callers can set the following functions. They all have default + // behaviors that use the local file system, so clients need only set + // the functions whose behaviors they wish to change. + + // JoinPath joins the sequence of path fragments into a single path. + // If JoinPath is nil, Import uses filepath.Join. + JoinPath func(elem ...string) string + + // SplitPathList splits the path list into a slice of individual paths. + // If SplitPathList is nil, Import uses filepath.SplitList. + SplitPathList func(list string) []string + + // IsAbsPath reports whether path is an absolute path. + // If IsAbsPath is nil, Import uses filepath.IsAbs. + IsAbsPath func(path string) bool + + // IsDir reports whether the path names a directory. + // If IsDir is nil, Import calls os.Stat and uses the result's IsDir method. + IsDir func(path string) bool + + // HasSubdir reports whether dir is lexically a subdirectory of + // root, perhaps multiple levels below. It does not try to check + // whether dir exists. + // If so, HasSubdir sets rel to a slash-separated path that + // can be joined to root to produce a path equivalent to dir. + // If HasSubdir is nil, Import uses an implementation built on + // filepath.EvalSymlinks. + HasSubdir func(root, dir string) (rel string, ok bool) + + // ReadDir returns a slice of os.FileInfo, sorted by Name, + // describing the content of the named directory. + // If ReadDir is nil, Import uses ioutil.ReadDir. + ReadDir func(dir string) ([]os.FileInfo, error) + + // OpenFile opens a file (not a directory) for reading. + // If OpenFile is nil, Import uses os.Open. + OpenFile func(path string) (io.ReadCloser, error) +} + +// joinPath calls ctxt.JoinPath (if not nil) or else filepath.Join. +func (ctxt *Context) joinPath(elem ...string) string { + if f := ctxt.JoinPath; f != nil { + return f(elem...) + } + return filepath.Join(elem...) +} + +// splitPathList calls ctxt.SplitPathList (if not nil) or else filepath.SplitList. +func (ctxt *Context) splitPathList(s string) []string { + if f := ctxt.SplitPathList; f != nil { + return f(s) + } + return filepath.SplitList(s) +} + +// isAbsPath calls ctxt.IsAbsPath (if not nil) or else filepath.IsAbs. +func (ctxt *Context) isAbsPath(path string) bool { + if f := ctxt.IsAbsPath; f != nil { + return f(path) + } + return filepath.IsAbs(path) +} + +// isDir calls ctxt.IsDir (if not nil) or else uses os.Stat. +func (ctxt *Context) isDir(path string) bool { + if f := ctxt.IsDir; f != nil { + return f(path) + } + fi, err := os.Stat(path) + return err == nil && fi.IsDir() +} + +// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses +// the local file system to answer the question. +func (ctxt *Context) hasSubdir(root, dir string) (rel string, ok bool) { + if f := ctxt.HasSubdir; f != nil { + return f(root, dir) + } + + // Try using paths we received. + if rel, ok = hasSubdir(root, dir); ok { + return rel, ok + } + + // Try expanding symlinks and comparing + // expanded against unexpanded and + // expanded against expanded. + rootSym, _ := filepath.EvalSymlinks(root) + dirSym, _ := filepath.EvalSymlinks(dir) + + if rel, ok = hasSubdir(rootSym, dir); ok { + return rel, ok + } + if rel, ok = hasSubdir(root, dirSym); ok { + return rel, ok + } + return hasSubdir(rootSym, dirSym) +} + +// hasSubdir reports if dir is within root by performing lexical analysis only. +func hasSubdir(root, dir string) (rel string, ok bool) { + const sep = string(filepath.Separator) + root = filepath.Clean(root) + if !strings.HasSuffix(root, sep) { + root += sep + } + dir = filepath.Clean(dir) + if !strings.HasPrefix(dir, root) { + return "", false + } + return filepath.ToSlash(dir[len(root):]), true +} + +// readDir calls ctxt.ReadDir (if not nil) or else ioutil.ReadDir. +func (ctxt *Context) readDir(path string) ([]os.FileInfo, error) { + if f := ctxt.ReadDir; f != nil { + return f(path) + } + return ioutil.ReadDir(path) +} + +// openFile calls ctxt.OpenFile (if not nil) or else os.Open. +func (ctxt *Context) openFile(path string) (io.ReadCloser, error) { + if fn := ctxt.OpenFile; fn != nil { + return fn(path) + } + + f, err := os.Open(path) + if err != nil { + return nil, err // nil interface + } + return f, nil +} + +// isFile determines whether path is a file by trying to open it. +// It reuses openFile instead of adding another function to the +// list in Context. +func (ctxt *Context) isFile(path string) bool { + f, err := ctxt.openFile(path) + if err != nil { + return false + } + f.Close() + return true +} + +// gopath returns the list of Go path directories. +func (ctxt *Context) gopath() []string { + var all []string + for _, p := range ctxt.splitPathList(ctxt.GOPATH) { + if p == "" || p == ctxt.GOROOT { + // Empty paths are uninteresting. + // If the path is the GOROOT, ignore it. + // People sometimes set GOPATH=$GOROOT. + // Do not get confused by this common mistake. + continue + } + if strings.HasPrefix(p, "~") { + // Path segments starting with ~ on Unix are almost always + // users who have incorrectly quoted ~ while setting GOPATH, + // preventing it from expanding to $HOME. + // The situation is made more confusing by the fact that + // bash allows quoted ~ in $PATH (most shells do not). + // Do not get confused by this, and do not try to use the path. + // It does not exist, and printing errors about it confuses + // those users even more, because they think "sure ~ exists!". + // The go command diagnoses this situation and prints a + // useful error. + // On Windows, ~ is used in short names, such as c:\progra~1 + // for c:\program files. + continue + } + all = append(all, p) + } + return all +} + +// SrcDirs returns a list of package source root directories. +// It draws from the current Go root and Go path but omits directories +// that do not exist. +func (ctxt *Context) SrcDirs() []string { + var all []string + if ctxt.GOROOT != "" { + dir := ctxt.joinPath(ctxt.GOROOT, "src") + if ctxt.isDir(dir) { + all = append(all, dir) + } + } + for _, p := range ctxt.gopath() { + dir := ctxt.joinPath(p, "src") + if ctxt.isDir(dir) { + all = append(all, dir) + } + } + return all +} + +// Default is the default Context for builds. +// It uses the GOARCH, GOOS, GOROOT, and GOPATH environment variables +// if set, or else the compiled code's GOARCH, GOOS, and GOROOT. +var Default Context = defaultContext() + +func defaultGOPATH() string { + env := "HOME" + if runtime.GOOS == "windows" { + env = "USERPROFILE" + } else if runtime.GOOS == "plan9" { + env = "home" + } + if home := os.Getenv(env); home != "" { + def := filepath.Join(home, "go") + if filepath.Clean(def) == filepath.Clean(runtime.GOROOT()) { + // Don't set the default GOPATH to GOROOT, + // as that will trigger warnings from the go tool. + return "" + } + return def + } + return "" +} + +func defaultContext() Context { + var c Context + + c.GOARCH = envOr("GOARCH", runtime.GOARCH) + c.GOOS = envOr("GOOS", runtime.GOOS) + c.GOROOT = pathpkg.Clean(runtime.GOROOT()) + c.GOPATH = envOr("GOPATH", defaultGOPATH()) + c.Compiler = runtime.Compiler + + // Each major Go release in the Go 1.x series should add a tag here. + // Old tags should not be removed. That is, the go1.x tag is present + // in all releases >= Go 1.x. Code that requires Go 1.x or later should + // say "+build go1.x", and code that should only be built before Go 1.x + // (perhaps it is the stub to use in that case) should say "+build !go1.x". + // NOTE: If you add to this list, also update the doc comment in doc.go. + c.ReleaseTags = []string{"go1.1", "go1.2", "go1.3", "go1.4", "go1.5", "go1.6", "go1.7", "go1.8", "go1.9"} + + env := os.Getenv("CGO_ENABLED") + if env == "" { + env = defaultCGO_ENABLED + } + switch env { + case "1": + c.CgoEnabled = true + case "0": + c.CgoEnabled = false + default: + // cgo must be explicitly enabled for cross compilation builds + if runtime.GOARCH == c.GOARCH && runtime.GOOS == c.GOOS { + c.CgoEnabled = cgoEnabled[c.GOOS+"/"+c.GOARCH] + break + } + c.CgoEnabled = false + } + + return c +} + +func envOr(name, def string) string { + s := os.Getenv(name) + if s == "" { + return def + } + return s +} + +// An ImportMode controls the behavior of the Import method. +type ImportMode uint + +const ( + // If FindOnly is set, Import stops after locating the directory + // that should contain the sources for a package. It does not + // read any files in the directory. + FindOnly ImportMode = 1 << iota + + // If AllowBinary is set, Import can be satisfied by a compiled + // package object without corresponding sources. + // + // Deprecated: + // The supported way to create a compiled-only package is to + // write source code containing a //go:binary-only-package comment at + // the top of the file. Such a package will be recognized + // regardless of this flag setting (because it has source code) + // and will have BinaryOnly set to true in the returned Package. + AllowBinary + + // If ImportComment is set, parse import comments on package statements. + // Import returns an error if it finds a comment it cannot understand + // or finds conflicting comments in multiple source files. + // See golang.org/s/go14customimport for more information. + ImportComment + + // By default, Import searches vendor directories + // that apply in the given source directory before searching + // the GOROOT and GOPATH roots. + // If an Import finds and returns a package using a vendor + // directory, the resulting ImportPath is the complete path + // to the package, including the path elements leading up + // to and including "vendor". + // For example, if Import("y", "x/subdir", 0) finds + // "x/vendor/y", the returned package's ImportPath is "x/vendor/y", + // not plain "y". + // See golang.org/s/go15vendor for more information. + // + // Setting IgnoreVendor ignores vendor directories. + // + // In contrast to the package's ImportPath, + // the returned package's Imports, TestImports, and XTestImports + // are always the exact import paths from the source files: + // Import makes no attempt to resolve or check those paths. + IgnoreVendor +) + +// A Package describes the Go package found in a directory. +type Package struct { + Dir string // directory containing package sources + Name string // package name + ImportComment string // path in import comment on package statement + Doc string // documentation synopsis + ImportPath string // import path of package ("" if unknown) + Root string // root of Go tree where this package lives + SrcRoot string // package source root directory ("" if unknown) + PkgRoot string // package install root directory ("" if unknown) + PkgTargetRoot string // architecture dependent install root directory ("" if unknown) + BinDir string // command install directory ("" if unknown) + Goroot bool // package found in Go root + PkgObj string // installed .a file + AllTags []string // tags that can influence file selection in this directory + ConflictDir string // this directory shadows Dir in $GOPATH + BinaryOnly bool // cannot be rebuilt from source (has //go:binary-only-package comment) + + // Source files + GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles) + CgoFiles []string // .go source files that import "C" + IgnoredGoFiles []string // .go source files ignored for this build + InvalidGoFiles []string // .go source files with detected problems (parse error, wrong package name, and so on) + CFiles []string // .c source files + CXXFiles []string // .cc, .cpp and .cxx source files + MFiles []string // .m (Objective-C) source files + HFiles []string // .h, .hh, .hpp and .hxx source files + FFiles []string // .f, .F, .for and .f90 Fortran source files + SFiles []string // .s source files + SwigFiles []string // .swig files + SwigCXXFiles []string // .swigcxx files + SysoFiles []string // .syso system object files to add to archive + + // Cgo directives + CgoCFLAGS []string // Cgo CFLAGS directives + CgoCPPFLAGS []string // Cgo CPPFLAGS directives + CgoCXXFLAGS []string // Cgo CXXFLAGS directives + CgoFFLAGS []string // Cgo FFLAGS directives + CgoLDFLAGS []string // Cgo LDFLAGS directives + CgoPkgConfig []string // Cgo pkg-config directives + + // Dependency information + Imports []string // import paths from GoFiles, CgoFiles + ImportPos map[string][]token.Position // line information for Imports + + // Test information + TestGoFiles []string // _test.go files in package + TestImports []string // import paths from TestGoFiles + TestImportPos map[string][]token.Position // line information for TestImports + XTestGoFiles []string // _test.go files outside package + XTestImports []string // import paths from XTestGoFiles + XTestImportPos map[string][]token.Position // line information for XTestImports +} + +// IsCommand reports whether the package is considered a +// command to be installed (not just a library). +// Packages named "main" are treated as commands. +func (p *Package) IsCommand() bool { + return p.Name == "main" +} + +// ImportDir is like Import but processes the Go package found in +// the named directory. +func (ctxt *Context) ImportDir(dir string, mode ImportMode) (*Package, error) { + return ctxt.Import(".", dir, mode) +} + +// NoGoError is the error used by Import to describe a directory +// containing no buildable Go source files. (It may still contain +// test files, files hidden by build tags, and so on.) +type NoGoError struct { + Dir string +} + +func (e *NoGoError) Error() string { + return "no buildable Go source files in " + e.Dir +} + +// MultiplePackageError describes a directory containing +// multiple buildable Go source files for multiple packages. +type MultiplePackageError struct { + Dir string // directory containing files + Packages []string // package names found + Files []string // corresponding files: Files[i] declares package Packages[i] +} + +func (e *MultiplePackageError) Error() string { + // Error string limited to two entries for compatibility. + return fmt.Sprintf("found packages %s (%s) and %s (%s) in %s", e.Packages[0], e.Files[0], e.Packages[1], e.Files[1], e.Dir) +} + +func nameExt(name string) string { + i := strings.LastIndex(name, ".") + if i < 0 { + return "" + } + return name[i:] +} + +// Import returns details about the Go package named by the import path, +// interpreting local import paths relative to the srcDir directory. +// If the path is a local import path naming a package that can be imported +// using a standard import path, the returned package will set p.ImportPath +// to that path. +// +// In the directory containing the package, .go, .c, .h, and .s files are +// considered part of the package except for: +// +// - .go files in package documentation +// - files starting with _ or . (likely editor temporary files) +// - files with build constraints not satisfied by the context +// +// If an error occurs, Import returns a non-nil error and a non-nil +// *Package containing partial information. +// +func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Package, error) { + p := &Package{ + ImportPath: path, + } + if path == "" { + return p, fmt.Errorf("import %q: invalid import path", path) + } + + var pkgtargetroot string + var pkga string + var pkgerr error + suffix := "" + if ctxt.InstallSuffix != "" { + suffix = "_" + ctxt.InstallSuffix + } + switch ctxt.Compiler { + case "gccgo": + pkgtargetroot = "pkg/gccgo_" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix + case "gc": + pkgtargetroot = "pkg/" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix + default: + // Save error for end of function. + pkgerr = fmt.Errorf("import %q: unknown compiler %q", path, ctxt.Compiler) + } + setPkga := func() { + switch ctxt.Compiler { + case "gccgo": + dir, elem := pathpkg.Split(p.ImportPath) + pkga = pkgtargetroot + "/" + dir + "lib" + elem + ".a" + case "gc": + pkga = pkgtargetroot + "/" + p.ImportPath + ".a" + } + } + setPkga() + + binaryOnly := false + if IsLocalImport(path) { + pkga = "" // local imports have no installed path + if srcDir == "" { + return p, fmt.Errorf("import %q: import relative to unknown directory", path) + } + if !ctxt.isAbsPath(path) { + p.Dir = ctxt.joinPath(srcDir, path) + } + // p.Dir directory may or may not exist. Gather partial information first, check if it exists later. + // Determine canonical import path, if any. + // Exclude results where the import path would include /testdata/. + inTestdata := func(sub string) bool { + return strings.Contains(sub, "/testdata/") || strings.HasSuffix(sub, "/testdata") || strings.HasPrefix(sub, "testdata/") || sub == "testdata" + } + if ctxt.GOROOT != "" { + root := ctxt.joinPath(ctxt.GOROOT, "src") + if sub, ok := ctxt.hasSubdir(root, p.Dir); ok && !inTestdata(sub) { + p.Goroot = true + p.ImportPath = sub + p.Root = ctxt.GOROOT + goto Found + } + } + all := ctxt.gopath() + for i, root := range all { + rootsrc := ctxt.joinPath(root, "src") + if sub, ok := ctxt.hasSubdir(rootsrc, p.Dir); ok && !inTestdata(sub) { + // We found a potential import path for dir, + // but check that using it wouldn't find something + // else first. + if ctxt.GOROOT != "" { + if dir := ctxt.joinPath(ctxt.GOROOT, "src", sub); ctxt.isDir(dir) { + p.ConflictDir = dir + goto Found + } + } + for _, earlyRoot := range all[:i] { + if dir := ctxt.joinPath(earlyRoot, "src", sub); ctxt.isDir(dir) { + p.ConflictDir = dir + goto Found + } + } + + // sub would not name some other directory instead of this one. + // Record it. + p.ImportPath = sub + p.Root = root + goto Found + } + } + // It's okay that we didn't find a root containing dir. + // Keep going with the information we have. + } else { + if strings.HasPrefix(path, "/") { + return p, fmt.Errorf("import %q: cannot import absolute path", path) + } + + // tried records the location of unsuccessful package lookups + var tried struct { + vendor []string + goroot string + gopath []string + } + gopath := ctxt.gopath() + + // Vendor directories get first chance to satisfy import. + if mode&IgnoreVendor == 0 && srcDir != "" { + searchVendor := func(root string, isGoroot bool) bool { + sub, ok := ctxt.hasSubdir(root, srcDir) + if !ok || !strings.HasPrefix(sub, "src/") || strings.Contains(sub, "/testdata/") { + return false + } + for { + vendor := ctxt.joinPath(root, sub, "vendor") + if ctxt.isDir(vendor) { + dir := ctxt.joinPath(vendor, path) + if ctxt.isDir(dir) && hasGoFiles(ctxt, dir) { + p.Dir = dir + p.ImportPath = strings.TrimPrefix(pathpkg.Join(sub, "vendor", path), "src/") + p.Goroot = isGoroot + p.Root = root + setPkga() // p.ImportPath changed + return true + } + tried.vendor = append(tried.vendor, dir) + } + i := strings.LastIndex(sub, "/") + if i < 0 { + break + } + sub = sub[:i] + } + return false + } + if searchVendor(ctxt.GOROOT, true) { + goto Found + } + for _, root := range gopath { + if searchVendor(root, false) { + goto Found + } + } + } + + // Determine directory from import path. + if ctxt.GOROOT != "" { + dir := ctxt.joinPath(ctxt.GOROOT, "src", path) + isDir := ctxt.isDir(dir) + binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(ctxt.GOROOT, pkga)) + if isDir || binaryOnly { + p.Dir = dir + p.Goroot = true + p.Root = ctxt.GOROOT + goto Found + } + tried.goroot = dir + } + for _, root := range gopath { + dir := ctxt.joinPath(root, "src", path) + isDir := ctxt.isDir(dir) + binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(root, pkga)) + if isDir || binaryOnly { + p.Dir = dir + p.Root = root + goto Found + } + tried.gopath = append(tried.gopath, dir) + } + + // package was not found + var paths []string + format := "\t%s (vendor tree)" + for _, dir := range tried.vendor { + paths = append(paths, fmt.Sprintf(format, dir)) + format = "\t%s" + } + if tried.goroot != "" { + paths = append(paths, fmt.Sprintf("\t%s (from $GOROOT)", tried.goroot)) + } else { + paths = append(paths, "\t($GOROOT not set)") + } + format = "\t%s (from $GOPATH)" + for _, dir := range tried.gopath { + paths = append(paths, fmt.Sprintf(format, dir)) + format = "\t%s" + } + if len(tried.gopath) == 0 { + paths = append(paths, "\t($GOPATH not set. For more details see: 'go help gopath')") + } + return p, fmt.Errorf("cannot find package %q in any of:\n%s", path, strings.Join(paths, "\n")) + } + +Found: + if p.Root != "" { + p.SrcRoot = ctxt.joinPath(p.Root, "src") + p.PkgRoot = ctxt.joinPath(p.Root, "pkg") + p.BinDir = ctxt.joinPath(p.Root, "bin") + if pkga != "" { + p.PkgTargetRoot = ctxt.joinPath(p.Root, pkgtargetroot) + p.PkgObj = ctxt.joinPath(p.Root, pkga) + } + } + + // If it's a local import path, by the time we get here, we still haven't checked + // that p.Dir directory exists. This is the right time to do that check. + // We can't do it earlier, because we want to gather partial information for the + // non-nil *Package returned when an error occurs. + // We need to do this before we return early on FindOnly flag. + if IsLocalImport(path) && !ctxt.isDir(p.Dir) { + // package was not found + return p, fmt.Errorf("cannot find package %q in:\n\t%s", path, p.Dir) + } + + if mode&FindOnly != 0 { + return p, pkgerr + } + if binaryOnly && (mode&AllowBinary) != 0 { + return p, pkgerr + } + + dirs, err := ctxt.readDir(p.Dir) + if err != nil { + return p, err + } + + var badGoError error + var Sfiles []string // files with ".S" (capital S) + var firstFile, firstCommentFile string + imported := make(map[string][]token.Position) + testImported := make(map[string][]token.Position) + xTestImported := make(map[string][]token.Position) + allTags := make(map[string]bool) + fset := token.NewFileSet() + for _, d := range dirs { + if d.IsDir() { + continue + } + + name := d.Name() + ext := nameExt(name) + + badFile := func(err error) { + if badGoError == nil { + badGoError = err + } + p.InvalidGoFiles = append(p.InvalidGoFiles, name) + } + + match, data, filename, err := ctxt.matchFile(p.Dir, name, allTags, &p.BinaryOnly) + if err != nil { + badFile(err) + continue + } + if !match { + if ext == ".go" { + p.IgnoredGoFiles = append(p.IgnoredGoFiles, name) + } + continue + } + + // Going to save the file. For non-Go files, can stop here. + switch ext { + case ".c": + p.CFiles = append(p.CFiles, name) + continue + case ".cc", ".cpp", ".cxx": + p.CXXFiles = append(p.CXXFiles, name) + continue + case ".m": + p.MFiles = append(p.MFiles, name) + continue + case ".h", ".hh", ".hpp", ".hxx": + p.HFiles = append(p.HFiles, name) + continue + case ".f", ".F", ".for", ".f90": + p.FFiles = append(p.FFiles, name) + continue + case ".s": + p.SFiles = append(p.SFiles, name) + continue + case ".S": + Sfiles = append(Sfiles, name) + continue + case ".swig": + p.SwigFiles = append(p.SwigFiles, name) + continue + case ".swigcxx": + p.SwigCXXFiles = append(p.SwigCXXFiles, name) + continue + case ".syso": + // binary objects to add to package archive + // Likely of the form foo_windows.syso, but + // the name was vetted above with goodOSArchFile. + p.SysoFiles = append(p.SysoFiles, name) + continue + } + + pf, err := parser.ParseFile(fset, filename, data, parser.ImportsOnly|parser.ParseComments) + if err != nil { + badFile(err) + continue + } + + pkg := pf.Name.Name + if pkg == "documentation" { + p.IgnoredGoFiles = append(p.IgnoredGoFiles, name) + continue + } + + isTest := strings.HasSuffix(name, "_test.go") + isXTest := false + if isTest && strings.HasSuffix(pkg, "_test") { + isXTest = true + pkg = pkg[:len(pkg)-len("_test")] + } + + if p.Name == "" { + p.Name = pkg + firstFile = name + } else if pkg != p.Name { + badFile(&MultiplePackageError{ + Dir: p.Dir, + Packages: []string{p.Name, pkg}, + Files: []string{firstFile, name}, + }) + p.InvalidGoFiles = append(p.InvalidGoFiles, name) + } + if pf.Doc != nil && p.Doc == "" { + p.Doc = doc.Synopsis(pf.Doc.Text()) + } + + if mode&ImportComment != 0 { + qcom, line := findImportComment(data) + if line != 0 { + com, err := strconv.Unquote(qcom) + if err != nil { + badFile(fmt.Errorf("%s:%d: cannot parse import comment", filename, line)) + } else if p.ImportComment == "" { + p.ImportComment = com + firstCommentFile = name + } else if p.ImportComment != com { + badFile(fmt.Errorf("found import comments %q (%s) and %q (%s) in %s", p.ImportComment, firstCommentFile, com, name, p.Dir)) + } + } + } + + // Record imports and information about cgo. + isCgo := false + for _, decl := range pf.Decls { + d, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + for _, dspec := range d.Specs { + spec, ok := dspec.(*ast.ImportSpec) + if !ok { + continue + } + quoted := spec.Path.Value + path, err := strconv.Unquote(quoted) + if err != nil { + log.Panicf("%s: parser returned invalid quoted string: <%s>", filename, quoted) + } + if isXTest { + xTestImported[path] = append(xTestImported[path], fset.Position(spec.Pos())) + } else if isTest { + testImported[path] = append(testImported[path], fset.Position(spec.Pos())) + } else { + imported[path] = append(imported[path], fset.Position(spec.Pos())) + } + if path == "C" { + if isTest { + badFile(fmt.Errorf("use of cgo in test %s not supported", filename)) + } else { + cg := spec.Doc + if cg == nil && len(d.Specs) == 1 { + cg = d.Doc + } + if cg != nil { + if err := ctxt.saveCgo(filename, p, cg); err != nil { + badFile(err) + } + } + isCgo = true + } + } + } + } + if isCgo { + allTags["cgo"] = true + if ctxt.CgoEnabled { + p.CgoFiles = append(p.CgoFiles, name) + } else { + p.IgnoredGoFiles = append(p.IgnoredGoFiles, name) + } + } else if isXTest { + p.XTestGoFiles = append(p.XTestGoFiles, name) + } else if isTest { + p.TestGoFiles = append(p.TestGoFiles, name) + } else { + p.GoFiles = append(p.GoFiles, name) + } + } + if badGoError != nil { + return p, badGoError + } + if len(p.GoFiles)+len(p.CgoFiles)+len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 { + return p, &NoGoError{p.Dir} + } + + for tag := range allTags { + p.AllTags = append(p.AllTags, tag) + } + sort.Strings(p.AllTags) + + p.Imports, p.ImportPos = cleanImports(imported) + p.TestImports, p.TestImportPos = cleanImports(testImported) + p.XTestImports, p.XTestImportPos = cleanImports(xTestImported) + + // add the .S files only if we are using cgo + // (which means gcc will compile them). + // The standard assemblers expect .s files. + if len(p.CgoFiles) > 0 { + p.SFiles = append(p.SFiles, Sfiles...) + sort.Strings(p.SFiles) + } + + return p, pkgerr +} + +// hasGoFiles reports whether dir contains any files with names ending in .go. +// For a vendor check we must exclude directories that contain no .go files. +// Otherwise it is not possible to vendor just a/b/c and still import the +// non-vendored a/b. See golang.org/issue/13832. +func hasGoFiles(ctxt *Context, dir string) bool { + ents, _ := ctxt.readDir(dir) + for _, ent := range ents { + if !ent.IsDir() && strings.HasSuffix(ent.Name(), ".go") { + return true + } + } + return false +} + +func findImportComment(data []byte) (s string, line int) { + // expect keyword package + word, data := parseWord(data) + if string(word) != "package" { + return "", 0 + } + + // expect package name + _, data = parseWord(data) + + // now ready for import comment, a // or /* */ comment + // beginning and ending on the current line. + for len(data) > 0 && (data[0] == ' ' || data[0] == '\t' || data[0] == '\r') { + data = data[1:] + } + + var comment []byte + switch { + case bytes.HasPrefix(data, slashSlash): + i := bytes.Index(data, newline) + if i < 0 { + i = len(data) + } + comment = data[2:i] + case bytes.HasPrefix(data, slashStar): + data = data[2:] + i := bytes.Index(data, starSlash) + if i < 0 { + // malformed comment + return "", 0 + } + comment = data[:i] + if bytes.Contains(comment, newline) { + return "", 0 + } + } + comment = bytes.TrimSpace(comment) + + // split comment into `import`, `"pkg"` + word, arg := parseWord(comment) + if string(word) != "import" { + return "", 0 + } + + line = 1 + bytes.Count(data[:cap(data)-cap(arg)], newline) + return strings.TrimSpace(string(arg)), line +} + +var ( + slashSlash = []byte("//") + slashStar = []byte("/*") + starSlash = []byte("*/") + newline = []byte("\n") +) + +// skipSpaceOrComment returns data with any leading spaces or comments removed. +func skipSpaceOrComment(data []byte) []byte { + for len(data) > 0 { + switch data[0] { + case ' ', '\t', '\r', '\n': + data = data[1:] + continue + case '/': + if bytes.HasPrefix(data, slashSlash) { + i := bytes.Index(data, newline) + if i < 0 { + return nil + } + data = data[i+1:] + continue + } + if bytes.HasPrefix(data, slashStar) { + data = data[2:] + i := bytes.Index(data, starSlash) + if i < 0 { + return nil + } + data = data[i+2:] + continue + } + } + break + } + return data +} + +// parseWord skips any leading spaces or comments in data +// and then parses the beginning of data as an identifier or keyword, +// returning that word and what remains after the word. +func parseWord(data []byte) (word, rest []byte) { + data = skipSpaceOrComment(data) + + // Parse past leading word characters. + rest = data + for { + r, size := utf8.DecodeRune(rest) + if unicode.IsLetter(r) || '0' <= r && r <= '9' || r == '_' { + rest = rest[size:] + continue + } + break + } + + word = data[:len(data)-len(rest)] + if len(word) == 0 { + return nil, nil + } + + return word, rest +} + +// MatchFile reports whether the file with the given name in the given directory +// matches the context and would be included in a Package created by ImportDir +// of that directory. +// +// MatchFile considers the name of the file and may use ctxt.OpenFile to +// read some or all of the file's content. +func (ctxt *Context) MatchFile(dir, name string) (match bool, err error) { + match, _, _, err = ctxt.matchFile(dir, name, nil, nil) + return match, err + +} + +// matchFile determines whether the file with the given name in the given directory +// should be included in the package being constructed. +// It returns the data read from the file. +// If name denotes a Go program, matchFile reads until the end of the +// imports (and returns that data) even though it only considers text +// until the first non-comment. +// If allTags is non-nil, matchFile records any encountered build tag +// by setting allTags[tag] = true. +func (ctxt *Context) matchFile(dir, name string, allTags map[string]bool, binaryOnly *bool) (match bool, data []byte, filename string, err error) { + if strings.HasPrefix(name, "_") || + strings.HasPrefix(name, ".") { + return match, data, filename, err + } + + i := strings.LastIndex(name, ".") + if i < 0 { + i = len(name) + } + ext := name[i:] + + if !ctxt.goodOSArchFile(name, allTags) && !ctxt.UseAllFiles { + return match, data, filename, err + } + + switch ext { + case ".go", ".c", ".cc", ".cxx", ".cpp", ".m", ".s", ".h", ".hh", ".hpp", ".hxx", ".f", ".F", ".f90", ".S", ".swig", ".swigcxx": + // tentatively okay - read to make sure + case ".syso": + // binary, no reading + match = true + return match, data, filename, err + default: + // skip + return match, data, filename, err + } + + filename = ctxt.joinPath(dir, name) + f, err := ctxt.openFile(filename) + if err != nil { + return match, data, filename, err + } + + if strings.HasSuffix(filename, ".go") { + data, err = readImports(f, false, nil) + if strings.HasSuffix(filename, "_test.go") { + binaryOnly = nil // ignore //go:binary-only-package comments in _test.go files + } + } else { + binaryOnly = nil // ignore //go:binary-only-package comments in non-Go sources + data, err = readComments(f) + } + f.Close() + if err != nil { + err = fmt.Errorf("read %s: %v", filename, err) + return match, data, filename, err + } + + // Look for +build comments to accept or reject the file. + var sawBinaryOnly bool + if !ctxt.shouldBuild(data, allTags, &sawBinaryOnly) && !ctxt.UseAllFiles { + return match, data, filename, err + } + + if binaryOnly != nil && sawBinaryOnly { + *binaryOnly = true + } + match = true + return match, data, filename, err +} + +func cleanImports(m map[string][]token.Position) ([]string, map[string][]token.Position) { + all := make([]string, 0, len(m)) + for path := range m { + all = append(all, path) + } + sort.Strings(all) + return all, m +} + +// Import is shorthand for Default.Import. +func Import(path, srcDir string, mode ImportMode) (*Package, error) { + return Default.Import(path, srcDir, mode) +} + +// ImportDir is shorthand for Default.ImportDir. +func ImportDir(dir string, mode ImportMode) (*Package, error) { + return Default.ImportDir(dir, mode) +} + +var slashslash = []byte("//") + +// Special comment denoting a binary-only package. +// See https://golang.org/design/2775-binary-only-packages +// for more about the design of binary-only packages. +var binaryOnlyComment = []byte("//go:binary-only-package") + +// shouldBuild reports whether it is okay to use this file, +// The rule is that in the file's leading run of // comments +// and blank lines, which must be followed by a blank line +// (to avoid including a Go package clause doc comment), +// lines beginning with '// +build' are taken as build directives. +// +// The file is accepted only if each such line lists something +// matching the file. For example: +// +// // +build windows linux +// +// marks the file as applicable only on Windows and Linux. +// +// If shouldBuild finds a //go:binary-only-package comment in the file, +// it sets *binaryOnly to true. Otherwise it does not change *binaryOnly. +// +func (ctxt *Context) shouldBuild(content []byte, allTags map[string]bool, binaryOnly *bool) bool { + sawBinaryOnly := false + + // Pass 1. Identify leading run of // comments and blank lines, + // which must be followed by a blank line. + end := 0 + p := content + for len(p) > 0 { + line := p + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, p = line[:i], p[i+1:] + } else { + p = p[len(p):] + } + line = bytes.TrimSpace(line) + if len(line) == 0 { // Blank line + end = len(content) - len(p) + continue + } + if !bytes.HasPrefix(line, slashslash) { // Not comment line + break + } + } + content = content[:end] + + // Pass 2. Process each line in the run. + p = content + hasReq := len(ctxt.RequiredTags) > 0 + allok := !hasReq + for len(p) > 0 { + line := p + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, p = line[:i], p[i+1:] + } else { + p = p[len(p):] + } + line = bytes.TrimSpace(line) + if bytes.HasPrefix(line, slashslash) { + if bytes.Equal(line, binaryOnlyComment) { + sawBinaryOnly = true + } + line = bytes.TrimSpace(line[len(slashslash):]) + if len(line) > 0 && line[0] == '+' { + // Looks like a comment +line. + f := strings.Fields(string(line)) + if f[0] == "+build" { + ok := false + for _, tok := range f[1:] { + tags := map[string]bool{} + if ctxt.match(tok, tags) { + if containsAll(tags, ctxt.RequiredTags) { + ok = true + } + } + merge(allTags, tags) + } + if !hasReq { + if !ok { + allok = false + } + } else { + if ok { + allok = true + } + } + } + } + } + } + + if binaryOnly != nil && sawBinaryOnly { + *binaryOnly = true + } + + return allok +} + +func merge(to, from map[string]bool) { + if to == nil { + return + } + for k, v := range from { + to[k] = v + } +} + +func containsAll(m map[string]bool, vals []string) bool { + // yes this is N^2, but N is small. + for _, v := range vals { + if !m[v] { + return false + } + } + return true +} + +func contains(list []string, s string) bool { + for _, l := range list { + if l == s { + return true + } + } + return false +} + +// saveCgo saves the information from the #cgo lines in the import "C" comment. +// These lines set CFLAGS, CPPFLAGS, CXXFLAGS and LDFLAGS and pkg-config directives +// that affect the way cgo's C code is built. +func (ctxt *Context) saveCgo(filename string, di *Package, cg *ast.CommentGroup) error { + text := cg.Text() + for _, line := range strings.Split(text, "\n") { + orig := line + + // Line is + // #cgo [GOOS/GOARCH...] LDFLAGS: stuff + // + line = strings.TrimSpace(line) + if len(line) < 5 || line[:4] != "#cgo" || (line[4] != ' ' && line[4] != '\t') { + continue + } + + // Split at colon. + line = strings.TrimSpace(line[4:]) + i := strings.Index(line, ":") + if i < 0 { + return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig) + } + line, argstr := line[:i], line[i+1:] + + // Parse GOOS/GOARCH stuff. + f := strings.Fields(line) + if len(f) < 1 { + return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig) + } + + cond, verb := f[:len(f)-1], f[len(f)-1] + if len(cond) > 0 { + ok := false + for _, c := range cond { + if ctxt.match(c, nil) { + ok = true + break + } + } + if !ok { + continue + } + } + + args, err := splitQuoted(argstr) + if err != nil { + return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig) + } + var ok bool + for i, arg := range args { + if arg, ok = expandSrcDir(arg, di.Dir); !ok { + return fmt.Errorf("%s: malformed #cgo argument: %s", filename, arg) + } + args[i] = arg + } + + switch verb { + case "CFLAGS", "CPPFLAGS", "CXXFLAGS", "FFLAGS", "LDFLAGS": + // Change relative paths to absolute. + ctxt.makePathsAbsolute(args, di.Dir) + } + + switch verb { + case "CFLAGS": + di.CgoCFLAGS = append(di.CgoCFLAGS, args...) + case "CPPFLAGS": + di.CgoCPPFLAGS = append(di.CgoCPPFLAGS, args...) + case "CXXFLAGS": + di.CgoCXXFLAGS = append(di.CgoCXXFLAGS, args...) + case "FFLAGS": + di.CgoFFLAGS = append(di.CgoFFLAGS, args...) + case "LDFLAGS": + di.CgoLDFLAGS = append(di.CgoLDFLAGS, args...) + case "pkg-config": + di.CgoPkgConfig = append(di.CgoPkgConfig, args...) + default: + return fmt.Errorf("%s: invalid #cgo verb: %s", filename, orig) + } + } + return nil +} + +// expandSrcDir expands any occurrence of ${SRCDIR}, making sure +// the result is safe for the shell. +func expandSrcDir(str string, srcdir string) (string, bool) { + // "\" delimited paths cause safeCgoName to fail + // so convert native paths with a different delimiter + // to "/" before starting (eg: on windows). + srcdir = filepath.ToSlash(srcdir) + + chunks := strings.Split(str, "${SRCDIR}") + if len(chunks) < 2 { + return str, safeCgoName(str) + } + ok := true + for _, chunk := range chunks { + ok = ok && (chunk == "" || safeCgoName(chunk)) + } + ok = ok && (srcdir == "" || safeCgoName(srcdir)) + res := strings.Join(chunks, srcdir) + return res, ok && res != "" +} + +// makePathsAbsolute looks for compiler options that take paths and +// makes them absolute. We do this because through the 1.8 release we +// ran the compiler in the package directory, so any relative -I or -L +// options would be relative to that directory. In 1.9 we changed to +// running the compiler in the build directory, to get consistent +// build results (issue #19964). To keep builds working, we change any +// relative -I or -L options to be absolute. +// +// Using filepath.IsAbs and filepath.Join here means the results will be +// different on different systems, but that's OK: -I and -L options are +// inherently system-dependent. +func (ctxt *Context) makePathsAbsolute(args []string, srcDir string) { + nextPath := false + for i, arg := range args { + if nextPath { + if !filepath.IsAbs(arg) { + args[i] = filepath.Join(srcDir, arg) + } + nextPath = false + } else if strings.HasPrefix(arg, "-I") || strings.HasPrefix(arg, "-L") { + if len(arg) == 2 { + nextPath = true + } else { + if !filepath.IsAbs(arg[2:]) { + args[i] = arg[:2] + filepath.Join(srcDir, arg[2:]) + } + } + } + } +} + +// NOTE: $ is not safe for the shell, but it is allowed here because of linker options like -Wl,$ORIGIN. +// We never pass these arguments to a shell (just to programs we construct argv for), so this should be okay. +// See golang.org/issue/6038. +// The @ is for OS X. See golang.org/issue/13720. +// The % is for Jenkins. See golang.org/issue/16959. +const safeString = "+-.,/0123456789=ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz:$@% " + +func safeCgoName(s string) bool { + if s == "" { + return false + } + for i := 0; i < len(s); i++ { + if c := s[i]; c < utf8.RuneSelf && strings.IndexByte(safeString, c) < 0 { + return false + } + } + return true +} + +// splitQuoted splits the string s around each instance of one or more consecutive +// white space characters while taking into account quotes and escaping, and +// returns an array of substrings of s or an empty list if s contains only white space. +// Single quotes and double quotes are recognized to prevent splitting within the +// quoted region, and are removed from the resulting substrings. If a quote in s +// isn't closed err will be set and r will have the unclosed argument as the +// last element. The backslash is used for escaping. +// +// For example, the following string: +// +// a b:"c d" 'e''f' "g\"" +// +// Would be parsed as: +// +// []string{"a", "b:c d", "ef", `g"`} +// +func splitQuoted(s string) (r []string, err error) { + var args []string + arg := make([]rune, len(s)) + escaped := false + quoted := false + quote := '\x00' + i := 0 + for _, rune := range s { + switch { + case escaped: + escaped = false + case rune == '\\': + escaped = true + continue + case quote != '\x00': + if rune == quote { + quote = '\x00' + continue + } + case rune == '"' || rune == '\'': + quoted = true + quote = rune + continue + case unicode.IsSpace(rune): + if quoted || i > 0 { + quoted = false + args = append(args, string(arg[:i])) + i = 0 + } + continue + } + arg[i] = rune + i++ + } + if quoted || i > 0 { + args = append(args, string(arg[:i])) + } + if quote != 0 { + err = errors.New("unclosed quote") + } else if escaped { + err = errors.New("unfinished escaping") + } + return args, err +} + +// match reports whether the name is one of: +// +// $GOOS +// $GOARCH +// cgo (if cgo is enabled) +// !cgo (if cgo is disabled) +// ctxt.Compiler +// !ctxt.Compiler +// tag (if tag is listed in ctxt.BuildTags or ctxt.ReleaseTags) +// !tag (if tag is not listed in ctxt.BuildTags or ctxt.ReleaseTags) +// a comma-separated list of any of these +// +func (ctxt *Context) match(name string, allTags map[string]bool) bool { + if name == "" { + if allTags != nil { + allTags[name] = true + } + return false + } + if i := strings.Index(name, ","); i >= 0 { + // comma-separated list + ok1 := ctxt.match(name[:i], allTags) + ok2 := ctxt.match(name[i+1:], allTags) + return ok1 && ok2 + } + if strings.HasPrefix(name, "!!") { // bad syntax, reject always + return false + } + if strings.HasPrefix(name, "!") { // negation + return len(name) > 1 && !ctxt.match(name[1:], allTags) + } + + if allTags != nil { + allTags[name] = true + } + + // Tags must be letters, digits, underscores or dots. + // Unlike in Go identifiers, all digits are fine (e.g., "386"). + for _, c := range name { + if !unicode.IsLetter(c) && !unicode.IsDigit(c) && c != '_' && c != '.' { + return false + } + } + + // special tags + if ctxt.CgoEnabled && name == "cgo" { + return true + } + if name == ctxt.GOOS || name == ctxt.GOARCH || name == ctxt.Compiler { + return true + } + if ctxt.GOOS == "android" && name == "linux" { + return true + } + + // other tags + for _, tag := range ctxt.BuildTags { + if tag == name { + return true + } + } + for _, tag := range ctxt.ReleaseTags { + if tag == name { + return true + } + } + + return false +} + +// goodOSArchFile returns false if the name contains a $GOOS or $GOARCH +// suffix which does not match the current system. +// The recognized name formats are: +// +// name_$(GOOS).* +// name_$(GOARCH).* +// name_$(GOOS)_$(GOARCH).* +// name_$(GOOS)_test.* +// name_$(GOARCH)_test.* +// name_$(GOOS)_$(GOARCH)_test.* +// +// An exception: if GOOS=android, then files with GOOS=linux are also matched. +func (ctxt *Context) goodOSArchFile(name string, allTags map[string]bool) bool { + if dot := strings.Index(name, "."); dot != -1 { + name = name[:dot] + } + + // Before Go 1.4, a file called "linux.go" would be equivalent to having a + // build tag "linux" in that file. For Go 1.4 and beyond, we require this + // auto-tagging to apply only to files with a non-empty prefix, so + // "foo_linux.go" is tagged but "linux.go" is not. This allows new operating + // systems, such as android, to arrive without breaking existing code with + // innocuous source code in "android.go". The easiest fix: cut everything + // in the name before the initial _. + i := strings.Index(name, "_") + if i < 0 { + return true + } + name = name[i:] // ignore everything before first _ + + l := strings.Split(name, "_") + if n := len(l); n > 0 && l[n-1] == "test" { + l = l[:n-1] + } + n := len(l) + if n >= 2 && knownOS[l[n-2]] && knownArch[l[n-1]] { + if allTags != nil { + allTags[l[n-2]] = true + allTags[l[n-1]] = true + } + if l[n-1] != ctxt.GOARCH { + return false + } + if ctxt.GOOS == "android" && l[n-2] == "linux" { + return true + } + return l[n-2] == ctxt.GOOS + } + if n >= 1 && knownOS[l[n-1]] { + if allTags != nil { + allTags[l[n-1]] = true + } + if ctxt.GOOS == "android" && l[n-1] == "linux" { + return true + } + return l[n-1] == ctxt.GOOS + } + if n >= 1 && knownArch[l[n-1]] { + if allTags != nil { + allTags[l[n-1]] = true + } + return l[n-1] == ctxt.GOARCH + } + return true +} + +var knownOS = make(map[string]bool) +var knownArch = make(map[string]bool) + +func init() { + for _, v := range strings.Fields(goosList) { + knownOS[v] = true + } + for _, v := range strings.Fields(goarchList) { + knownArch[v] = true + } +} + +// ToolDir is the directory containing build tools. +var ToolDir = filepath.Join(runtime.GOROOT(), "pkg/tool/"+runtime.GOOS+"_"+runtime.GOARCH) + +// IsLocalImport reports whether the import path is +// a local import path, like ".", "..", "./foo", or "../foo". +func IsLocalImport(path string) bool { + return path == "." || path == ".." || + strings.HasPrefix(path, "./") || strings.HasPrefix(path, "../") +} + +// ArchChar returns "?" and an error. +// In earlier versions of Go, the returned string was used to derive +// the compiler and linker tool names, the default object file suffix, +// and the default linker output name. As of Go 1.5, those strings +// no longer vary by architecture; they are compile, link, .o, and a.out, respectively. +func ArchChar(goarch string) (string, error) { + return "?", errors.New("architecture letter no longer used") +} diff --git a/vendor/github.com/magefile/mage/build/doc.go b/vendor/github.com/magefile/mage/build/doc.go new file mode 100644 index 00000000..422e1a5f --- /dev/null +++ b/vendor/github.com/magefile/mage/build/doc.go @@ -0,0 +1,166 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package build gathers information about Go packages. +// +// Go Path +// +// The Go path is a list of directory trees containing Go source code. +// It is consulted to resolve imports that cannot be found in the standard +// Go tree. The default path is the value of the GOPATH environment +// variable, interpreted as a path list appropriate to the operating system +// (on Unix, the variable is a colon-separated string; +// on Windows, a semicolon-separated string; +// on Plan 9, a list). +// +// Each directory listed in the Go path must have a prescribed structure: +// +// The src/ directory holds source code. The path below 'src' determines +// the import path or executable name. +// +// The pkg/ directory holds installed package objects. +// As in the Go tree, each target operating system and +// architecture pair has its own subdirectory of pkg +// (pkg/GOOS_GOARCH). +// +// If DIR is a directory listed in the Go path, a package with +// source in DIR/src/foo/bar can be imported as "foo/bar" and +// has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a" +// (or, for gccgo, "DIR/pkg/gccgo/foo/libbar.a"). +// +// The bin/ directory holds compiled commands. +// Each command is named for its source directory, but only +// using the final element, not the entire path. That is, the +// command with source in DIR/src/foo/quux is installed into +// DIR/bin/quux, not DIR/bin/foo/quux. The foo/ is stripped +// so that you can add DIR/bin to your PATH to get at the +// installed commands. +// +// Here's an example directory layout: +// +// GOPATH=/home/user/gocode +// +// /home/user/gocode/ +// src/ +// foo/ +// bar/ (go code in package bar) +// x.go +// quux/ (go code in package main) +// y.go +// bin/ +// quux (installed command) +// pkg/ +// linux_amd64/ +// foo/ +// bar.a (installed package object) +// +// Build Constraints +// +// A build constraint, also known as a build tag, is a line comment that begins +// +// // +build +// +// that lists the conditions under which a file should be included in the package. +// Constraints may appear in any kind of source file (not just Go), but +// they must appear near the top of the file, preceded +// only by blank lines and other line comments. These rules mean that in Go +// files a build constraint must appear before the package clause. +// +// To distinguish build constraints from package documentation, a series of +// build constraints must be followed by a blank line. +// +// A build constraint is evaluated as the OR of space-separated options; +// each option evaluates as the AND of its comma-separated terms; +// and each term is an alphanumeric word or, preceded by !, its negation. +// That is, the build constraint: +// +// // +build linux,386 darwin,!cgo +// +// corresponds to the boolean formula: +// +// (linux AND 386) OR (darwin AND (NOT cgo)) +// +// A file may have multiple build constraints. The overall constraint is the AND +// of the individual constraints. That is, the build constraints: +// +// // +build linux darwin +// // +build 386 +// +// corresponds to the boolean formula: +// +// (linux OR darwin) AND 386 +// +// During a particular build, the following words are satisfied: +// +// - the target operating system, as spelled by runtime.GOOS +// - the target architecture, as spelled by runtime.GOARCH +// - the compiler being used, either "gc" or "gccgo" +// - "cgo", if ctxt.CgoEnabled is true +// - "go1.1", from Go version 1.1 onward +// - "go1.2", from Go version 1.2 onward +// - "go1.3", from Go version 1.3 onward +// - "go1.4", from Go version 1.4 onward +// - "go1.5", from Go version 1.5 onward +// - "go1.6", from Go version 1.6 onward +// - "go1.7", from Go version 1.7 onward +// - "go1.8", from Go version 1.8 onward +// - "go1.9", from Go version 1.9 onward +// - any additional words listed in ctxt.BuildTags +// +// If a file's name, after stripping the extension and a possible _test suffix, +// matches any of the following patterns: +// *_GOOS +// *_GOARCH +// *_GOOS_GOARCH +// (example: source_windows_amd64.go) where GOOS and GOARCH represent +// any known operating system and architecture values respectively, then +// the file is considered to have an implicit build constraint requiring +// those terms (in addition to any explicit constraints in the file). +// +// To keep a file from being considered for the build: +// +// // +build ignore +// +// (any other unsatisfied word will work as well, but ``ignore'' is conventional.) +// +// To build a file only when using cgo, and only on Linux and OS X: +// +// // +build linux,cgo darwin,cgo +// +// Such a file is usually paired with another file implementing the +// default functionality for other systems, which in this case would +// carry the constraint: +// +// // +build !linux,!darwin !cgo +// +// Naming a file dns_windows.go will cause it to be included only when +// building the package for Windows; similarly, math_386.s will be included +// only when building the package for 32-bit x86. +// +// Using GOOS=android matches build tags and files as for GOOS=linux +// in addition to android tags and files. +// +// Binary-Only Packages +// +// It is possible to distribute packages in binary form without including the +// source code used for compiling the package. To do this, the package must +// be distributed with a source file not excluded by build constraints and +// containing a "//go:binary-only-package" comment. +// Like a build constraint, this comment must appear near the top of the file, +// preceded only by blank lines and other line comments and with a blank line +// following the comment, to separate it from the package documentation. +// Unlike build constraints, this comment is only recognized in non-test +// Go source files. +// +// The minimal source code for a binary-only package is therefore: +// +// //go:binary-only-package +// +// package mypkg +// +// The source code may include additional Go code. That code is never compiled +// but will be processed by tools like godoc and might be useful as end-user +// documentation. +// +package build diff --git a/vendor/github.com/magefile/mage/build/read.go b/vendor/github.com/magefile/mage/build/read.go new file mode 100644 index 00000000..29b8cdc7 --- /dev/null +++ b/vendor/github.com/magefile/mage/build/read.go @@ -0,0 +1,247 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package build + +import ( + "bufio" + "errors" + "io" + "unicode/utf8" +) + +type importReader struct { + b *bufio.Reader + buf []byte + peek byte + err error + eof bool + nerr int +} + +func isIdent(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c >= utf8.RuneSelf +} + +var ( + errSyntax = errors.New("syntax error") + errNUL = errors.New("unexpected NUL in input") +) + +// syntaxError records a syntax error, but only if an I/O error has not already been recorded. +func (r *importReader) syntaxError() { + if r.err == nil { + r.err = errSyntax + } +} + +// readByte reads the next byte from the input, saves it in buf, and returns it. +// If an error occurs, readByte records the error in r.err and returns 0. +func (r *importReader) readByte() byte { + c, err := r.b.ReadByte() + if err == nil { + r.buf = append(r.buf, c) + if c == 0 { + err = errNUL + } + } + if err != nil { + if err == io.EOF { + r.eof = true + } else if r.err == nil { + r.err = err + } + c = 0 + } + return c +} + +// peekByte returns the next byte from the input reader but does not advance beyond it. +// If skipSpace is set, peekByte skips leading spaces and comments. +func (r *importReader) peekByte(skipSpace bool) byte { + if r.err != nil { + if r.nerr++; r.nerr > 10000 { + panic("go/build: import reader looping") + } + return 0 + } + + // Use r.peek as first input byte. + // Don't just return r.peek here: it might have been left by peekByte(false) + // and this might be peekByte(true). + c := r.peek + if c == 0 { + c = r.readByte() + } + for r.err == nil && !r.eof { + if skipSpace { + // For the purposes of this reader, semicolons are never necessary to + // understand the input and are treated as spaces. + switch c { + case ' ', '\f', '\t', '\r', '\n', ';': + c = r.readByte() + continue + + case '/': + c = r.readByte() + if c == '/' { + for c != '\n' && r.err == nil && !r.eof { + c = r.readByte() + } + } else if c == '*' { + var c1 byte + for (c != '*' || c1 != '/') && r.err == nil { + if r.eof { + r.syntaxError() + } + c, c1 = c1, r.readByte() + } + } else { + r.syntaxError() + } + c = r.readByte() + continue + } + } + break + } + r.peek = c + return r.peek +} + +// nextByte is like peekByte but advances beyond the returned byte. +func (r *importReader) nextByte(skipSpace bool) byte { + c := r.peekByte(skipSpace) + r.peek = 0 + return c +} + +// readKeyword reads the given keyword from the input. +// If the keyword is not present, readKeyword records a syntax error. +func (r *importReader) readKeyword(kw string) { + r.peekByte(true) + for i := 0; i < len(kw); i++ { + if r.nextByte(false) != kw[i] { + r.syntaxError() + return + } + } + if isIdent(r.peekByte(false)) { + r.syntaxError() + } +} + +// readIdent reads an identifier from the input. +// If an identifier is not present, readIdent records a syntax error. +func (r *importReader) readIdent() { + c := r.peekByte(true) + if !isIdent(c) { + r.syntaxError() + return + } + for isIdent(r.peekByte(false)) { + r.peek = 0 + } +} + +// readString reads a quoted string literal from the input. +// If an identifier is not present, readString records a syntax error. +func (r *importReader) readString(save *[]string) { + switch r.nextByte(true) { + case '`': + start := len(r.buf) - 1 + for r.err == nil { + if r.nextByte(false) == '`' { + if save != nil { + *save = append(*save, string(r.buf[start:])) + } + break + } + if r.eof { + r.syntaxError() + } + } + case '"': + start := len(r.buf) - 1 + for r.err == nil { + c := r.nextByte(false) + if c == '"' { + if save != nil { + *save = append(*save, string(r.buf[start:])) + } + break + } + if r.eof || c == '\n' { + r.syntaxError() + } + if c == '\\' { + r.nextByte(false) + } + } + default: + r.syntaxError() + } +} + +// readImport reads an import clause - optional identifier followed by quoted string - +// from the input. +func (r *importReader) readImport(imports *[]string) { + c := r.peekByte(true) + if c == '.' { + r.peek = 0 + } else if isIdent(c) { + r.readIdent() + } + r.readString(imports) +} + +// readComments is like ioutil.ReadAll, except that it only reads the leading +// block of comments in the file. +func readComments(f io.Reader) ([]byte, error) { + r := &importReader{b: bufio.NewReader(f)} + r.peekByte(true) + if r.err == nil && !r.eof { + // Didn't reach EOF, so must have found a non-space byte. Remove it. + r.buf = r.buf[:len(r.buf)-1] + } + return r.buf, r.err +} + +// readImports is like ioutil.ReadAll, except that it expects a Go file as input +// and stops reading the input once the imports have completed. +func readImports(f io.Reader, reportSyntaxError bool, imports *[]string) ([]byte, error) { + r := &importReader{b: bufio.NewReader(f)} + + r.readKeyword("package") + r.readIdent() + for r.peekByte(true) == 'i' { + r.readKeyword("import") + if r.peekByte(true) == '(' { + r.nextByte(false) + for r.peekByte(true) != ')' && r.err == nil { + r.readImport(imports) + } + r.nextByte(false) + } else { + r.readImport(imports) + } + } + + // If we stopped successfully before EOF, we read a byte that told us we were done. + // Return all but that last byte, which would cause a syntax error if we let it through. + if r.err == nil && !r.eof { + return r.buf[:len(r.buf)-1], nil + } + + // If we stopped for a syntax error, consume the whole file so that + // we are sure we don't change the errors that go/parser returns. + if r.err == errSyntax && !reportSyntaxError { + r.err = nil + for r.err == nil && !r.eof { + r.readByte() + } + } + + return r.buf, r.err +} diff --git a/vendor/github.com/magefile/mage/build/syslist.go b/vendor/github.com/magefile/mage/build/syslist.go new file mode 100644 index 00000000..73fdbe6c --- /dev/null +++ b/vendor/github.com/magefile/mage/build/syslist.go @@ -0,0 +1,8 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package build + +const goosList = "android darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris windows zos " +const goarchList = "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc s390 s390x sparc sparc64 " diff --git a/vendor/github.com/magefile/mage/build/zcgo.go b/vendor/github.com/magefile/mage/build/zcgo.go new file mode 100644 index 00000000..86e2a2d9 --- /dev/null +++ b/vendor/github.com/magefile/mage/build/zcgo.go @@ -0,0 +1,37 @@ +// auto generated by go tool dist + +package build + +const defaultCGO_ENABLED = "" + +var cgoEnabled = map[string]bool{ + "android/386": true, + "android/amd64": true, + "android/arm": true, + "android/arm64": true, + "darwin/386": true, + "darwin/amd64": true, + "darwin/arm": true, + "darwin/arm64": true, + "dragonfly/amd64": true, + "freebsd/386": true, + "freebsd/amd64": true, + "linux/386": true, + "linux/amd64": true, + "linux/arm": true, + "linux/arm64": true, + "linux/mips": true, + "linux/mips64": true, + "linux/mips64le": true, + "linux/mipsle": true, + "linux/ppc64le": true, + "linux/s390x": true, + "netbsd/386": true, + "netbsd/amd64": true, + "netbsd/arm": true, + "openbsd/386": true, + "openbsd/amd64": true, + "solaris/amd64": true, + "windows/386": true, + "windows/amd64": true, +} diff --git a/vendor/github.com/magefile/mage/mage/command_string.go b/vendor/github.com/magefile/mage/mage/command_string.go new file mode 100644 index 00000000..14dbd260 --- /dev/null +++ b/vendor/github.com/magefile/mage/mage/command_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=Command"; DO NOT EDIT. + +package mage + +import "fmt" + +const _Command_name = "NoneVersionInitClean" + +var _Command_index = [...]uint8{0, 4, 11, 15, 20} + +func (i Command) String() string { + if i < 0 || i >= Command(len(_Command_index)-1) { + return fmt.Sprintf("Command(%d)", i) + } + return _Command_name[_Command_index[i]:_Command_index[i+1]] +} diff --git a/vendor/github.com/magefile/mage/mage/magefile_tmpl.go b/vendor/github.com/magefile/mage/mage/magefile_tmpl.go new file mode 100644 index 00000000..01b87860 --- /dev/null +++ b/vendor/github.com/magefile/mage/mage/magefile_tmpl.go @@ -0,0 +1,46 @@ +package mage + +var mageTpl = `// +build mage + +package main + +import ( + "fmt" + "os" + "os/exec" + + "github.com/magefile/mage/mg" // mg contains helpful utility functions, like Deps +) + +// Default target to run when none is specified +// If not set, running mage will list available targets +// var Default = Build + +// A build step that requires additional params, or platform specific steps for example +func Build() error { + mg.Deps(InstallDeps) + fmt.Println("Building...") + cmd := exec.Command("go", "build", "-o", "MyApp", ".") + return cmd.Run() +} + +// A custom install step if you need your bin someplace other than go/bin +func Install() error { + mg.Deps(Build) + fmt.Println("Installing...") + return os.Rename("./MyApp", "/usr/bin/MyApp") +} + +// Manage your deps, or running package managers. +func InstallDeps() error { + fmt.Println("Installing Deps...") + cmd := exec.Command("go", "get", "github.com/stretchr/piglatin") + return cmd.Run() +} + +// Clean up after yourself +func Clean() { + fmt.Println("Cleaning...") + os.RemoveAll("MyApp") +} +` diff --git a/vendor/github.com/magefile/mage/mage/main.go b/vendor/github.com/magefile/mage/mage/main.go new file mode 100644 index 00000000..735bb21d --- /dev/null +++ b/vendor/github.com/magefile/mage/mage/main.go @@ -0,0 +1,459 @@ +package mage + +import ( + "crypto/sha1" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "text/template" + "time" + "unicode" + + "github.com/magefile/mage/build" + "github.com/magefile/mage/mg" + "github.com/magefile/mage/parse" + "github.com/magefile/mage/sh" +) + +// magicRebuildKey is used when hashing the output binary to ensure that we get +// a new binary even if nothing in the input files or generated mainfile has +// changed. This can be used when we change how we parse files, or otherwise +// change the inputs to the compiling process. +const magicRebuildKey = "v0.3" + +var output = template.Must(template.New("").Funcs(map[string]interface{}{ + "lower": strings.ToLower, + "lowerfirst": func(s string) string { + r := []rune(s) + return string(unicode.ToLower(r[0])) + string(r[1:]) + }, +}).Parse(tpl)) +var initOutput = template.Must(template.New("").Parse(mageTpl)) + +const mainfile = "mage_output_file.go" +const initFile = "magefile.go" + +// set by ldflags when you "mage build" +var ( + commitHash string + timestamp string + gitTag = "v2" +) + +//go:generate stringer -type=Command + +// Command tracks invocations of mage that run without targets or other flags. +type Command int + +const ( + None Command = iota + Version // report the current version of mage + Init // create a starting template for mage + Clean // clean out old compiled mage binaries from the cache +) + +// Main is the entrypoint for running mage. It exists external to mage's main +// function to allow it to be used from other programs, specifically so you can +// go run a simple file that run's mage's Main. +func Main() int { + return ParseAndRun(".", os.Stdout, os.Stderr, os.Stdin, os.Args[1:]) +} + +// Invocation contains the args for invoking a run of Mage. +type Invocation struct { + Dir string // directory to read magefiles from + Force bool // forces recreation of the compiled binary + Verbose bool // tells the magefile to print out log statements + List bool // tells the magefile to print out a list of targets + Help bool // tells the magefile to print out help for a specific target + Keep bool // tells mage to keep the generated main file after compiling + Timeout time.Duration // tells mage to set a timeout to running the targets + Stdout io.Writer // writer to write stdout messages to + Stderr io.Writer // writer to write stderr messages to + Stdin io.Reader // reader to read stdin from + Args []string // args to pass to the compiled binary +} + +// ParseAndRun parses the command line, and then compiles and runs the mage +// files in the given directory with the given args (do not include the command +// name in the args). +func ParseAndRun(dir string, stdout, stderr io.Writer, stdin io.Reader, args []string) int { + log := log.New(stderr, "", 0) + inv, cmd, err := Parse(stdout, args) + inv.Dir = dir + inv.Stderr = stderr + inv.Stdin = stdin + if err == flag.ErrHelp { + return 0 + } + if err != nil { + log.Println("Error:", err) + return 2 + } + + switch cmd { + case Version: + if timestamp == "" { + timestamp = "" + } + if commitHash == "" { + commitHash = "" + } + log.Println("Mage Build Tool", gitTag) + log.Println("Build Date:", timestamp) + log.Println("Commit:", commitHash) + return 0 + case Init: + if err := generateInit(dir); err != nil { + log.Println("Error:", err) + return 1 + } + log.Println(initFile, "created") + return 0 + case Clean: + dir := mg.CacheDir() + if err := removeContents(dir); err != nil { + log.Println("Error:", err) + return 1 + } + log.Println(dir, "cleaned") + return 0 + case None: + return Invoke(inv) + default: + panic(fmt.Errorf("Unknown command type: %v", cmd)) + } +} + +// Parse parses the given args and returns structured data. If parse returns +// flag.ErrHelp, the calling process should exit with code 0. +func Parse(stdout io.Writer, args []string) (inv Invocation, cmd Command, err error) { + inv.Stdout = stdout + fs := flag.FlagSet{} + fs.SetOutput(stdout) + fs.BoolVar(&inv.Force, "f", false, "force recreation of compiled magefile") + fs.BoolVar(&inv.Verbose, "v", false, "show verbose output when running mage targets") + fs.BoolVar(&inv.List, "l", false, "list mage targets in this directory") + fs.BoolVar(&inv.Help, "h", false, "show this help") + fs.DurationVar(&inv.Timeout, "t", 0, "timeout in duration parsable format (e.g. 5m30s)") + fs.BoolVar(&inv.Keep, "keep", false, "keep intermediate mage files around after running") + var showVersion bool + fs.BoolVar(&showVersion, "version", false, "show version info for the mage binary") + var mageInit bool + fs.BoolVar(&mageInit, "init", false, "create a starting template if no mage files exist") + var clean bool + fs.BoolVar(&clean, "clean", false, "clean out old generated binaries from CACHE_DIR") + + fs.Usage = func() { + fmt.Fprintln(stdout, "mage [options] [target]") + fmt.Fprintln(stdout, "Options:") + fs.PrintDefaults() + } + err = fs.Parse(args) + if err == flag.ErrHelp { + // parse will have already called fs.Usage() + return inv, cmd, err + } + if err == nil && inv.Help && len(fs.Args()) == 0 { + fs.Usage() + // tell upstream, to just exit + return inv, cmd, flag.ErrHelp + } + + numFlags := 0 + switch { + case mageInit: + numFlags++ + cmd = Init + case showVersion: + numFlags++ + cmd = Version + case clean: + numFlags++ + cmd = Clean + if fs.NArg() > 0 || fs.NFlag() > 1 { + // Temporary dupe of below check until we refactor the other commands to use this check + return inv, cmd, errors.New("-h, -init, -clean, and -version cannot be used simultaneously") + + } + } + if inv.Help { + numFlags++ + } + + // If verbose is still false, we're going to peek at the environment variable to see if + // MAGE_VERBOSE has been set. If so, we're going to use it for the value of MAGE_VERBOSE. + if inv.Verbose == false { + envVerbose, err := strconv.ParseBool(os.Getenv("MAGE_VERBOSE")) + if err == nil { + inv.Verbose = envVerbose + } + } + + if numFlags > 1 { + return inv, cmd, errors.New("-h, -init, -clean, and -version cannot be used simultaneously") + } + + inv.Args = fs.Args() + if inv.Help && len(inv.Args) > 1 { + return inv, cmd, errors.New("-h can only show help for a single target") + } + + return inv, cmd, err +} + +// Invoke runs Mage with the given arguments. +func Invoke(inv Invocation) int { + log := log.New(inv.Stderr, "", 0) + + files, err := Magefiles(inv.Dir) + if err != nil { + log.Println("Error:", err) + return 1 + } + + if len(files) == 0 { + log.Println("No .go files marked with the mage build tag in this directory.") + return 1 + } + + exePath, err := ExeName(files) + + if err != nil { + log.Println("Error:", err) + return 1 + } + + if !inv.Force { + if _, err := os.Stat(exePath); err == nil { + return RunCompiled(inv, exePath) + } + } + + // parse wants dir + filenames... arg + fnames := make([]string, 0, len(files)) + for i := range files { + fnames = append(fnames, filepath.Base(files[i])) + } + + info, err := parse.Package(inv.Dir, fnames) + if err != nil { + log.Println("Error:", err) + return 1 + } + + hasDupes, names := CheckDupes(info) + if hasDupes { + log.Println("Build targets must be case insensitive, thus the follow targets conflict:") + for _, v := range names { + if len(v) > 1 { + log.Println(" " + strings.Join(v, ", ")) + } + } + return 1 + } + + main := filepath.Join(inv.Dir, mainfile) + if err := GenerateMainfile(main, info); err != nil { + log.Println("Error:", err) + return 1 + } + if !inv.Keep { + defer os.Remove(main) + } + files = append(files, main) + if err := Compile(exePath, inv.Stdout, inv.Stderr, files); err != nil { + log.Println("Error:", err) + return 1 + } + if !inv.Keep { + // remove this file before we run the compiled version, in case the + // compiled file screws things up. Yes this doubles up with the above + // defer, that's ok. + os.Remove(main) + } + + return RunCompiled(inv, exePath) +} + +// CheckDupes checks a package for duplicate target names. +func CheckDupes(info *parse.PkgInfo) (hasDupes bool, names map[string][]string) { + names = map[string][]string{} + lowers := map[string]bool{} + for _, f := range info.Funcs { + low := strings.ToLower(f.Name) + if lowers[low] { + hasDupes = true + } + lowers[low] = true + names[low] = append(names[low], f.Name) + } + return hasDupes, names +} + +type data struct { + Funcs []parse.Function + DefaultError bool + Default string + DefaultFunc parse.Function + Aliases map[string]string +} + +// Magefiles returns the list of magefiles in dir. +func Magefiles(dir string) ([]string, error) { + ctx := build.Default + ctx.RequiredTags = []string{"mage"} + ctx.BuildTags = []string{"mage"} + p, err := ctx.ImportDir(dir, 0) + if err != nil { + if _, ok := err.(*build.NoGoError); ok { + return []string{}, nil + } + return nil, err + } + for i := range p.GoFiles { + p.GoFiles[i] = filepath.Join(dir, p.GoFiles[i]) + } + return p.GoFiles, nil +} + +// Compile uses the go tool to compile the files into an executable at path. +func Compile(path string, stdout, stderr io.Writer, gofiles []string) error { + c := exec.Command("go", append([]string{"build", "-o", path}, gofiles...)...) + c.Env = os.Environ() + c.Stderr = stderr + c.Stdout = stdout + err := c.Run() + if err != nil { + return errors.New("error compiling magefiles") + } + if _, err := os.Stat(path); err != nil { + return errors.New("failed to find compiled magefile") + } + return nil +} + +// GenerateMainfile creates the mainfile at path with the info from +func GenerateMainfile(path string, info *parse.PkgInfo) error { + f, err := os.Create(path) + if err != nil { + return fmt.Errorf("can't create mainfile: %v", err) + } + defer f.Close() + + data := data{ + Funcs: info.Funcs, + Default: info.DefaultName, + DefaultFunc: info.DefaultFunc, + Aliases: info.Aliases, + } + + data.DefaultError = info.DefaultIsError + + if err := output.Execute(f, data); err != nil { + return fmt.Errorf("can't execute mainfile template: %v", err) + } + return nil +} + +// ExeName reports the executable filename that this version of Mage would +// create for the given magefiles. +func ExeName(files []string) (string, error) { + var hashes []string + for _, s := range files { + h, err := hashFile(s) + if err != nil { + return "", err + } + hashes = append(hashes, h) + } + // hash the mainfile template to ensure if it gets updated, we make a new + // binary. + hashes = append(hashes, fmt.Sprintf("%x", sha1.Sum([]byte(tpl)))) + sort.Strings(hashes) + hash := sha1.Sum([]byte(strings.Join(hashes, "") + magicRebuildKey)) + filename := fmt.Sprintf("%x", hash) + + out := filepath.Join(mg.CacheDir(), filename) + if runtime.GOOS == "windows" { + out += ".exe" + } + return out, nil +} + +func hashFile(fn string) (string, error) { + f, err := os.Open(fn) + if err != nil { + return "", fmt.Errorf("can't open input file: %v", err) + } + defer f.Close() + + h := sha1.New() + if _, err := io.Copy(h, f); err != nil { + return "", fmt.Errorf("can't write data to hash: %v", err) + } + return fmt.Sprintf("%x", h.Sum(nil)), nil +} + +func generateInit(dir string) error { + f, err := os.Create(filepath.Join(dir, initFile)) + if err != nil { + return fmt.Errorf("could not create mage template: %v", err) + } + defer f.Close() + + if err := initOutput.Execute(f, nil); err != nil { + return fmt.Errorf("can't execute magefile template: %v", err) + } + + return nil +} + +// RunCompiled runs an already-compiled mage command with the given args, +func RunCompiled(inv Invocation, exePath string) int { + c := exec.Command(exePath, inv.Args...) + c.Stderr = inv.Stderr + c.Stdout = inv.Stdout + c.Stdin = inv.Stdin + c.Env = os.Environ() + if inv.Verbose { + c.Env = append(c.Env, "MAGEFILE_VERBOSE=1") + } + if inv.List { + c.Env = append(c.Env, "MAGEFILE_LIST=1") + } + if inv.Help { + c.Env = append(c.Env, "MAGEFILE_HELP=1") + } + if inv.Timeout > 0 { + c.Env = append(c.Env, fmt.Sprintf("MAGEFILE_TIMEOUT=%s", inv.Timeout.String())) + } + return sh.ExitStatus(c.Run()) +} + +func removeContents(dir string) error { + files, err := ioutil.ReadDir(dir) + if err != nil { + return err + } + for _, f := range files { + if f.IsDir() { + continue + } + err = os.Remove(filepath.Join(dir, f.Name())) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/magefile/mage/mage/template.go b/vendor/github.com/magefile/mage/mage/template.go new file mode 100644 index 00000000..9b75be79 --- /dev/null +++ b/vendor/github.com/magefile/mage/mage/template.go @@ -0,0 +1,202 @@ +package mage + +// var only for tests +var tpl = `// +build ignore + +package main + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + "text/tabwriter" + "time" +) + +func main() { + // These functions are local variables to avoid name conflicts with + // magefiles. + list := func() error { + {{- $default := .Default}} + w := tabwriter.NewWriter(os.Stdout, 0, 4, 4, ' ', 0) + fmt.Println("Targets:") + {{- range .Funcs}} + fmt.Fprintln(w, " {{lowerfirst .Name}}{{if eq .Name $default}}*{{end}}\t" + {{printf "%q" .Synopsis}}) + {{- end}} + err := w.Flush() + {{- if .Default}} + if err == nil { + fmt.Println("\n* default target") + } + {{- end}} + return err + } + + var ctx context.Context + var ctxCancel func() + + getContext := func() (context.Context, func()) { + if ctx != nil { + return ctx, ctxCancel + } + + if os.Getenv("MAGEFILE_TIMEOUT") != "" { + timeout, err := time.ParseDuration(os.Getenv("MAGEFILE_TIMEOUT")) + if err != nil { + fmt.Printf("timeout error: %v\n", err) + os.Exit(1) + } + + ctx, ctxCancel = context.WithTimeout(context.Background(), timeout) + } else { + ctx = context.Background() + ctxCancel = func() {} + } + return ctx, ctxCancel + } + + runTarget := func(fn func(context.Context) error) interface{} { + var err interface{} + ctx, cancel := getContext() + d := make(chan interface{}) + go func() { + defer func() { + err := recover() + d <- err + }() + err := fn(ctx) + d <- err + }() + select { + case <-ctx.Done(): + cancel() + e := ctx.Err() + fmt.Printf("ctx err: %v\n", e) + return e + case err = <-d: + cancel() + return err + } + } + // This is necessary in case there aren't any targets, to avoid an unused + // variable error. + _ = runTarget + + handleError := func(logger *log.Logger, err interface{}) { + if err != nil { + logger.Printf("Error: %v\n", err) + type code interface { + ExitStatus() int + } + if c, ok := err.(code); ok { + os.Exit(c.ExitStatus()) + } + os.Exit(1) + } + } + _ = handleError + + log.SetFlags(0) + if os.Getenv("MAGEFILE_VERBOSE") == "" { + log.SetOutput(ioutil.Discard) + } + logger := log.New(os.Stderr, "", 0) + if os.Getenv("MAGEFILE_LIST") != "" { + if err := list(); err != nil { + log.Println(err) + os.Exit(1) + } + return + } + + targets := map[string]bool { + {{range $alias, $funci := .Aliases}}"{{lower $alias}}": true, + {{end}} + {{range .Funcs}}"{{lower .Name}}": true, + {{end}} + } + + var unknown []string + for _, arg := range os.Args[1:] { + if !targets[strings.ToLower(arg)] { + unknown = append(unknown, arg) + } + } + if len(unknown) == 1 { + logger.Println("Unknown target specified:", unknown[0]) + os.Exit(2) + } + if len(unknown) > 1 { + logger.Println("Unknown targets specified:", strings.Join(unknown, ", ")) + os.Exit(2) + } + + if os.Getenv("MAGEFILE_HELP") != "" { + if len(os.Args) < 2 { + logger.Println("no target specified") + os.Exit(1) + } + switch strings.ToLower(os.Args[1]) { + {{range .Funcs}}case "{{lower .Name}}": + fmt.Print("mage {{lower .Name}}:\n\n") + {{if ne .Comment ""}}fmt.Println({{printf "%q" .Comment}}){{end}} + var aliases []string + {{- $name := .Name -}} + {{range $alias, $func := $.Aliases}} + {{if eq $name $func}}aliases = append(aliases, "{{$alias}}"){{end -}} + {{- end}} + if len(aliases) > 0 { + fmt.Printf("Aliases: %s\n\n", strings.Join(aliases, ", ")) + } + return + {{end}} + default: + logger.Printf("Unknown target: %q\n", os.Args[1]) + os.Exit(1) + } + } + + if len(os.Args) < 2 { + {{- if .Default}} + {{.DefaultFunc.TemplateString}} + handleError(logger, err) + return + {{- else}} + if err := list(); err != nil { + logger.Println("Error:", err) + os.Exit(1) + } + return + {{- end}} + } + for _, target := range os.Args[1:] { + switch strings.ToLower(target) { + {{range $alias, $func := .Aliases}} + case "{{lower $alias}}": + target = "{{$func}}" + {{- end}} + } + switch strings.ToLower(target) { + {{range .Funcs }} + case "{{lower .Name}}": + if os.Getenv("MAGEFILE_VERBOSE") != "" { + logger.Println("Running target:", "{{.Name}}") + } + {{.TemplateString}} + handleError(logger, err) + {{- end}} + default: + // should be impossible since we check this above. + logger.Printf("Unknown target: %q\n", os.Args[1]) + os.Exit(1) + } + } +} + + + + +` diff --git a/vendor/github.com/magefile/mage/magefile.go b/vendor/github.com/magefile/mage/magefile.go new file mode 100644 index 00000000..2bb3ace6 --- /dev/null +++ b/vendor/github.com/magefile/mage/magefile.go @@ -0,0 +1,94 @@ +//+build mage + +package main + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/magefile/mage/sh" +) + +// Runs "go install" for mage. This generates the version info the binary. +func Install() error { + ldf, err := flags() + if err != nil { + return err + } + + name := "mage" + if runtime.GOOS == "windows" { + name += ".exe" + } + gopath, err := sh.Output("go", "env", "GOPATH") + if err != nil { + return fmt.Errorf("can't determine GOPATH: %v", err) + } + paths := strings.Split(gopath, string([]rune{os.PathListSeparator})) + bin := filepath.Join(paths[0], "bin") + // specifically don't mkdirall, if you have an invalid gopath in the first + // place, that's not on us to fix. + if err := os.Mkdir(bin, 0700); err != nil && !os.IsExist(err) { + return fmt.Errorf("failed to create %q: %v", bin, err) + } + path := filepath.Join(bin, name) + + // we use go build here because if someone built with go get, then `go + // install` turns into a no-op, and `go install -a` fails on people's + // machines that have go installed in a non-writeable directory (such as + // normal OS installs in /usr/bin) + return sh.RunV("go", "build", "-o", path, "-ldflags="+ldf, "github.com/magefile/mage") +} + +// Generates a new release. Expects the TAG environment variable to be set, +// which will create a new tag with that name. +func Release() (err error) { + if os.Getenv("TAG") == "" { + return errors.New("MSG and TAG environment variables are required") + } + if err := sh.RunV("git", "tag", "-a", "$TAG"); err != nil { + return err + } + if err := sh.RunV("git", "push", "origin", "$TAG"); err != nil { + return err + } + defer func() { + if err != nil { + sh.RunV("git", "tag", "--delete", "$TAG") + sh.RunV("git", "push", "--delete", "origin", "$TAG") + } + }() + return sh.RunV("goreleaser") +} + +// Remove the temporarily generated files from Release. +func Clean() error { + return sh.Rm("dist") +} + +func flags() (string, error) { + timestamp := time.Now().Format(time.RFC3339) + hash := hash() + tag := tag() + if tag == "" { + tag = "dev" + } + return fmt.Sprintf(`-X "github.com/magefile/mage/mage.timestamp=%s" -X "github.com/magefile/mage/mage.commitHash=%s" -X "github.com/magefile/mage/mage.gitTag=%s"`, timestamp, hash, tag), nil +} + +// tag returns the git tag for the current branch or "" if none. +func tag() string { + s, _ := sh.Output("git", "describe", "--tags") + return s +} + +// hash returns the git hash for the current repo or "" if none. +func hash() string { + hash, _ := sh.Output("git", "rev-parse", "--short", "HEAD") + return hash +} diff --git a/vendor/github.com/magefile/mage/main.go b/vendor/github.com/magefile/mage/main.go new file mode 100644 index 00000000..d596ac7f --- /dev/null +++ b/vendor/github.com/magefile/mage/main.go @@ -0,0 +1,11 @@ +package main + +import ( + "os" + + "github.com/magefile/mage/mage" +) + +func main() { + os.Exit(mage.Main()) +} diff --git a/vendor/github.com/magefile/mage/mg/deps.go b/vendor/github.com/magefile/mage/mg/deps.go new file mode 100644 index 00000000..30d6edc4 --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/deps.go @@ -0,0 +1,166 @@ +package mg + +import ( + "context" + "fmt" + "reflect" + "runtime" + "strings" + "sync" + + "github.com/magefile/mage/types" +) + +type onceMap struct { + mu *sync.Mutex + m map[string]*onceFun +} + +func (o *onceMap) LoadOrStore(s string, one *onceFun) *onceFun { + defer o.mu.Unlock() + o.mu.Lock() + + existing, ok := o.m[s] + if ok { + return existing + } + o.m[s] = one + return one +} + +var onces = &onceMap{ + mu: &sync.Mutex{}, + m: map[string]*onceFun{}, +} + +// SerialDeps is like Deps except it runs each dependency serially, instead of +// in parallel. This can be useful for resource intensive dependencies that +// shouldn't be run at the same time. +func SerialDeps(fns ...interface{}) { + checkFns(fns) + ctx := context.Background() + for _, f := range fns { + runDeps(ctx, f) + } +} + +// SerialCtxDeps is like CtxDeps except it runs each dependency serially, +// instead of in parallel. This can be useful for resource intensive +// dependencies that shouldn't be run at the same time. +func SerialCtxDeps(ctx context.Context, fns ...interface{}) { + checkFns(fns) + for _, f := range fns { + runDeps(ctx, f) + } +} + +// CtxDeps runs the given functions as dependencies of the calling function. +// Dependencies must only be of type: github.com/magefile/mage/types.FuncType. +// The function calling Deps is guaranteed that all dependent functions will be +// run exactly once when Deps returns. Dependent functions may in turn declare +// their own dependencies using Deps. Each dependency is run in their own +// goroutines. Each function is given the context provided if the function +// prototype allows for it. +func CtxDeps(ctx context.Context, fns ...interface{}) { + checkFns(fns) + runDeps(ctx, fns...) +} + +// runDeps assumes you've already called checkFns. +func runDeps(ctx context.Context, fns ...interface{}) { + mu := &sync.Mutex{} + var errs []string + var exit int + wg := &sync.WaitGroup{} + for _, f := range fns { + fn := addDep(ctx, f) + wg.Add(1) + go func() { + defer func() { + if v := recover(); v != nil { + mu.Lock() + if err, ok := v.(error); ok { + exit = changeExit(exit, ExitStatus(err)) + } else { + exit = changeExit(exit, 1) + } + errs = append(errs, fmt.Sprint(v)) + mu.Unlock() + } + wg.Done() + }() + if err := fn.run(); err != nil { + mu.Lock() + errs = append(errs, fmt.Sprint(err)) + exit = changeExit(exit, ExitStatus(err)) + mu.Unlock() + } + }() + } + + wg.Wait() + if len(errs) > 0 { + panic(Fatal(exit, strings.Join(errs, "\n"))) + } +} + +func checkFns(fns []interface{}) { + for _, f := range fns { + if err := types.FuncCheck(f); err != nil { + panic(err) + } + } +} + +// Deps runs the given functions with the default runtime context +func Deps(fns ...interface{}) { + CtxDeps(context.Background(), fns...) +} + +func changeExit(old, new int) int { + if new == 0 { + return old + } + if old == 0 { + return new + } + if old == new { + return old + } + // both different and both non-zero, just set + // exit to 1. Nothing more we can do. + return 1 +} + +func addDep(ctx context.Context, f interface{}) *onceFun { + var fn func(context.Context) error + if fn = types.FuncTypeWrap(f); fn == nil { + // should be impossible, since we already checked this + panic("attempted to add a dep that did not match required type") + } + + n := name(f) + of := onces.LoadOrStore(n, &onceFun{ + fn: fn, + ctx: ctx, + }) + return of +} + +func name(i interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() +} + +type onceFun struct { + once sync.Once + fn func(context.Context) error + ctx context.Context +} + +func (o *onceFun) run() error { + var err error + o.once.Do(func() { + err = o.fn(o.ctx) + }) + return err +} diff --git a/vendor/github.com/magefile/mage/mg/errors.go b/vendor/github.com/magefile/mage/mg/errors.go new file mode 100644 index 00000000..06a86908 --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/errors.go @@ -0,0 +1,51 @@ +package mg + +import ( + "errors" + "fmt" +) + +type fatalErr struct { + code int + error +} + +func (f fatalErr) ExitStatus() int { + return f.code +} + +type exitStatus interface { + ExitStatus() int +} + +// Fatal returns an error that will cause mage to print out the +// given args and exit with the given exit code. +func Fatal(code int, args ...interface{}) error { + return fatalErr{ + code: code, + error: errors.New(fmt.Sprint(args...)), + } +} + +// Fatalf returns an error that will cause mage to print out the +// given message and exit with an exit code of 1. +func Fatalf(code int, format string, args ...interface{}) error { + return fatalErr{ + code: code, + error: fmt.Errorf(format, args...), + } +} + +// ExitStatus queries the error for an exit status. If the error is nil, it +// returns 0. If the error does not implement ExitStatus() int, it returns 1. +// Otherwise it retiurns the value from ExitStatus(). +func ExitStatus(err error) int { + if err == nil { + return 0 + } + exit, ok := err.(exitStatus) + if !ok { + return 1 + } + return exit.ExitStatus() +} diff --git a/vendor/github.com/magefile/mage/mg/runtime.go b/vendor/github.com/magefile/mage/mg/runtime.go new file mode 100644 index 00000000..8b99613d --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/runtime.go @@ -0,0 +1,36 @@ +package mg + +import ( + "os" + "path/filepath" + "runtime" +) + +// CacheEnv is the environment variable that users may set to change the +// location where mage stores its compiled binaries. +const CacheEnv = "MAGEFILE_CACHE" + +// verboseEnv is the environment variable that indicates the user requested +// verbose mode when running a magefile. +const verboseEnv = "MAGEFILE_VERBOSE" + +// Verbose reports whether a magefile was run with the verbose flag. +func Verbose() bool { + return os.Getenv(verboseEnv) != "" +} + +// CacheDir returns the directory where mage caches compiled binaries. It +// defaults to $HOME/.magefile, but may be overridden by the MAGEFILE_CACHE +// environment variable. +func CacheDir() string { + d := os.Getenv(CacheEnv) + if d != "" { + return d + } + switch runtime.GOOS { + case "windows": + return filepath.Join(os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"), "magefile") + default: + return filepath.Join(os.Getenv("HOME"), ".magefile") + } +} diff --git a/vendor/github.com/magefile/mage/parse/import_go1.9.go b/vendor/github.com/magefile/mage/parse/import_go1.9.go new file mode 100644 index 00000000..9b5c7121 --- /dev/null +++ b/vendor/github.com/magefile/mage/parse/import_go1.9.go @@ -0,0 +1,13 @@ +// +build go1.9 + +package parse + +import ( + "go/importer" + "go/token" + "go/types" +) + +func getImporter(*token.FileSet) types.Importer { + return importer.For("source", nil) +} diff --git a/vendor/github.com/magefile/mage/parse/import_not_go1.9.go b/vendor/github.com/magefile/mage/parse/import_not_go1.9.go new file mode 100644 index 00000000..ed4e951e --- /dev/null +++ b/vendor/github.com/magefile/mage/parse/import_not_go1.9.go @@ -0,0 +1,15 @@ +// +build !go1.9 + +package parse + +import ( + "go/build" + "go/token" + "go/types" + + "github.com/magefile/mage/parse/srcimporter" +) + +func getImporter(fset *token.FileSet) types.Importer { + return srcimporter.New(&build.Default, fset, make(map[string]*types.Package)) +} diff --git a/vendor/github.com/magefile/mage/parse/parse.go b/vendor/github.com/magefile/mage/parse/parse.go new file mode 100644 index 00000000..05226290 --- /dev/null +++ b/vendor/github.com/magefile/mage/parse/parse.go @@ -0,0 +1,341 @@ +package parse + +import ( + "fmt" + "go/ast" + "go/build" + "go/doc" + "go/parser" + "go/token" + "go/types" + "log" + "os" + "os/exec" + "strings" + + mgTypes "github.com/magefile/mage/types" +) + +type PkgInfo struct { + Funcs []Function + DefaultIsError bool + DefaultIsContext bool + DefaultName string + DefaultFunc Function + Aliases map[string]string +} + +// Function represented a job function from a mage file +type Function struct { + Name string + IsError bool + IsContext bool + Synopsis string + Comment string +} + +// TemplateString returns code for the template switch to run the target. +// It wraps each target call to match the func(context.Context) error that +// runTarget requires. +func (f Function) TemplateString() string { + if f.IsContext && f.IsError { + out := `wrapFn := func(ctx context.Context) error { + return %s(ctx) + } + err := runTarget(wrapFn)` + return fmt.Sprintf(out, f.Name) + } + if f.IsContext && !f.IsError { + out := `wrapFn := func(ctx context.Context) error { + %s(ctx) + return nil + } + err := runTarget(wrapFn)` + return fmt.Sprintf(out, f.Name) + } + if !f.IsContext && f.IsError { + out := `wrapFn := func(ctx context.Context) error { + return %s() + } + err := runTarget(wrapFn)` + return fmt.Sprintf(out, f.Name) + } + if !f.IsContext && !f.IsError { + out := `wrapFn := func(ctx context.Context) error { + %s() + return nil + } + err := runTarget(wrapFn)` + return fmt.Sprintf(out, f.Name) + } + return `fmt.Printf("Error formatting job code\n") + os.Exit(1)` +} + +// Package parses a package +func Package(path string, files []string) (*PkgInfo, error) { + fset := token.NewFileSet() + + pkg, err := getPackage(path, files, fset) + if err != nil { + return nil, err + } + + info, err := makeInfo(path, fset, pkg.Files) + if err != nil { + return nil, err + } + + pi := &PkgInfo{} + + p := doc.New(pkg, "./", 0) + for _, f := range p.Funcs { + if f.Recv != "" { + // skip methods + continue + } + if !ast.IsExported(f.Name) { + // skip non-exported functions + continue + } + if typ := voidOrError(f.Decl.Type, info); typ != mgTypes.InvalidType { + pi.Funcs = append(pi.Funcs, Function{ + Name: f.Name, + Comment: f.Doc, + Synopsis: sanitizeSynopsis(f), + IsError: typ == mgTypes.ErrorType || typ == mgTypes.ContextErrorType, + IsContext: typ == mgTypes.ContextVoidType || typ == mgTypes.ContextErrorType, + }) + } + } + + setDefault(p, pi, info) + setAliases(p, pi, info) + + return pi, nil +} + +// sanitizeSynopsis sanitizes function Doc to create a summary. +func sanitizeSynopsis(f *doc.Func) string { + synopsis := doc.Synopsis(f.Doc) + + // If the synopsis begins with the function name, remove it. This is done to + // not repeat the text. + // From: + // clean Clean removes the temporarily generated files + // To: + // clean removes the temporarily generated files + if syns := strings.Split(synopsis, " "); strings.EqualFold(f.Name, syns[0]) { + return strings.Join(syns[1:], " ") + } + + return synopsis +} + +func setDefault(p *doc.Package, pi *PkgInfo, info types.Info) { + for _, v := range p.Vars { + for x, name := range v.Names { + if name != "Default" { + continue + } + spec := v.Decl.Specs[x].(*ast.ValueSpec) + if len(spec.Values) != 1 { + log.Println("warning: default declaration has multiple values") + } + id, ok := spec.Values[0].(*ast.Ident) + if !ok { + log.Println("warning: default declaration is not a function name") + } + for _, f := range pi.Funcs { + if f.Name == id.Name { + pi.DefaultName = f.Name + pi.DefaultIsError = f.IsError + pi.DefaultIsContext = f.IsContext + pi.DefaultFunc = f + return + } + } + log.Println("warning: default declaration does not reference a mage target") + } + } +} + +func setAliases(p *doc.Package, pi *PkgInfo, info types.Info) { + for _, v := range p.Vars { + for x, name := range v.Names { + if name != "Aliases" { + continue + } + spec, ok := v.Decl.Specs[x].(*ast.ValueSpec) + if !ok { + log.Println("warning: aliases declaration is not a value") + return + } + if len(spec.Values) != 1 { + log.Println("warning: aliases declaration has multiple values") + } + comp, ok := spec.Values[0].(*ast.CompositeLit) + if !ok { + log.Println("warning: aliases declaration is not a map") + return + } + pi.Aliases = make(map[string]string) + for _, elem := range comp.Elts { + kv, ok := elem.(*ast.KeyValueExpr) + if !ok { + log.Println("warning: alias declaration is not a map element") + return + } + k, ok := kv.Key.(*ast.BasicLit) + if !ok || k.Kind != token.STRING { + log.Println("warning: alias is not a string") + return + } + v, ok := kv.Value.(*ast.Ident) + if !ok { + log.Println("warning: alias target is not a function") + return + } + alias := strings.Trim(k.Value, "\"") + valid := false + for _, f := range pi.Funcs { + valid = valid || f.Name == v.Name + } + if !valid { + log.Printf("warning: alias declaration (%s) does not reference a mage target", alias) + } + pi.Aliases[alias] = v.Name + } + return + } + } +} + +// getPackage returns the non-test package at the given path. +func getPackage(path string, files []string, fset *token.FileSet) (*ast.Package, error) { + fm := make(map[string]bool, len(files)) + for _, f := range files { + fm[f] = true + } + + filter := func(f os.FileInfo) bool { + return fm[f.Name()] + } + + pkgs, err := parser.ParseDir(fset, path, filter, parser.ParseComments) + if err != nil { + return nil, fmt.Errorf("failed to parse directory: %v", err) + } + + for name, pkg := range pkgs { + if !strings.HasSuffix(name, "_test") { + return pkg, nil + } + } + return nil, fmt.Errorf("no non-test packages found in %s", path) +} + +func makeInfo(dir string, fset *token.FileSet, files map[string]*ast.File) (types.Info, error) { + goroot := os.Getenv("GOROOT") + if goroot == "" { + c := exec.Command("go", "env", "GOROOT") + b, err := c.Output() + if err != nil { + return types.Info{}, fmt.Errorf("failed to get GOROOT from 'go env': %v", err) + } + goroot = strings.TrimSpace(string(b)) + if goroot == "" { + return types.Info{}, fmt.Errorf("could not determine GOROOT") + } + } + + build.Default.GOROOT = goroot + + cfg := types.Config{ + Importer: getImporter(fset), + } + + info := types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + } + + fs := make([]*ast.File, 0, len(files)) + for _, v := range files { + fs = append(fs, v) + } + + _, err := cfg.Check(dir, fset, fs, &info) + if err != nil { + return info, fmt.Errorf("failed to check types in directory: %v", err) + } + return info, nil +} + +// errorOrVoid filters the list of functions to only those that return only an +// error or have no return value, and have no parameters. +func errorOrVoid(fns []*ast.FuncDecl, info types.Info) []*ast.FuncDecl { + fds := []*ast.FuncDecl{} + + for _, fn := range fns { + if voidOrError(fn.Type, info) != mgTypes.InvalidType { + fds = append(fds, fn) + } + } + return fds +} + +func hasContextParam(ft *ast.FuncType, info types.Info) bool { + if ft.Params.NumFields() == 1 { + ret := ft.Params.List[0] + t := info.TypeOf(ret.Type) + if t != nil && t.String() == "context.Context" { + return true + } + } + return false +} + +func hasVoidReturn(ft *ast.FuncType, info types.Info) bool { + res := ft.Results + if res.NumFields() == 0 { + return true + } + return false +} + +func hasErrorReturn(ft *ast.FuncType, info types.Info) bool { + res := ft.Results + if res.NumFields() == 1 { + ret := res.List[0] + if len(ret.Names) > 1 { + return false + } + t := info.TypeOf(ret.Type) + if t != nil && t.String() == "error" { + return true + } + } + return false +} + +func voidOrError(ft *ast.FuncType, info types.Info) mgTypes.FuncType { + if hasContextParam(ft, info) { + if hasVoidReturn(ft, info) { + return mgTypes.ContextVoidType + } + if hasErrorReturn(ft, info) { + return mgTypes.ContextErrorType + } + } + if ft.Params.NumFields() == 0 { + if hasVoidReturn(ft, info) { + return mgTypes.VoidType + } + if hasErrorReturn(ft, info) { + return mgTypes.ErrorType + } + } + return mgTypes.InvalidType +} diff --git a/vendor/github.com/magefile/mage/parse/srcimporter/sizes.go b/vendor/github.com/magefile/mage/parse/srcimporter/sizes.go new file mode 100644 index 00000000..a9e1b329 --- /dev/null +++ b/vendor/github.com/magefile/mage/parse/srcimporter/sizes.go @@ -0,0 +1,40 @@ +// +build !go1.9 + +package srcimporter + +import "go/types" + +// common architecture word sizes and alignments +var gcArchSizes = map[string]*types.StdSizes{ + "386": {4, 4}, + "arm": {4, 4}, + "arm64": {8, 8}, + "amd64": {8, 8}, + "amd64p32": {4, 8}, + "mips": {4, 4}, + "mipsle": {4, 4}, + "mips64": {8, 8}, + "mips64le": {8, 8}, + "ppc64": {8, 8}, + "ppc64le": {8, 8}, + "s390x": {8, 8}, + // When adding more architectures here, + // update the doc string of SizesFor below. +} + +// SizesFor returns the Sizes used by a compiler for an architecture. +// The result is nil if a compiler/architecture pair is not known. +// +// Supported architectures for compiler "gc": +// "386", "arm", "arm64", "amd64", "amd64p32", "mips", "mipsle", +// "mips64", "mips64le", "ppc64", "ppc64le", "s390x". +func SizesFor(compiler, arch string) types.Sizes { + if compiler != "gc" { + return nil + } + s, ok := gcArchSizes[arch] + if !ok { + return nil + } + return s +} diff --git a/vendor/github.com/magefile/mage/parse/srcimporter/srcimporter.go b/vendor/github.com/magefile/mage/parse/srcimporter/srcimporter.go new file mode 100644 index 00000000..a488a990 --- /dev/null +++ b/vendor/github.com/magefile/mage/parse/srcimporter/srcimporter.go @@ -0,0 +1,213 @@ +// +build !go1.9 + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package srcimporter implements importing directly +// from source files rather than installed packages. +package srcimporter + +import ( + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "go/types" + "path/filepath" + "sync" +) + +// An Importer provides the context for importing packages from source code. +type Importer struct { + ctxt *build.Context + fset *token.FileSet + sizes types.Sizes + packages map[string]*types.Package +} + +// NewImporter returns a new Importer for the given context, file set, and map +// of packages. The context is used to resolve import paths to package paths, +// and identifying the files belonging to the package. If the context provides +// non-nil file system functions, they are used instead of the regular package +// os functions. The file set is used to track position information of package +// files; and imported packages are added to the packages map. +func New(ctxt *build.Context, fset *token.FileSet, packages map[string]*types.Package) *Importer { + return &Importer{ + ctxt: ctxt, + fset: fset, + sizes: SizesFor(ctxt.Compiler, ctxt.GOARCH), // uses go/types default if GOARCH not found + packages: packages, + } +} + +// Importing is a sentinel taking the place in Importer.packages +// for a package that is in the process of being imported. +var importing types.Package + +// Import(path) is a shortcut for ImportFrom(path, "", 0). +func (p *Importer) Import(path string) (*types.Package, error) { + return p.ImportFrom(path, "", 0) +} + +// ImportFrom imports the package with the given import path resolved from the given srcDir, +// adds the new package to the set of packages maintained by the importer, and returns the +// package. Package path resolution and file system operations are controlled by the context +// maintained with the importer. The import mode must be zero but is otherwise ignored. +// Packages that are not comprised entirely of pure Go files may fail to import because the +// type checker may not be able to determine all exported entities (e.g. due to cgo dependencies). +func (p *Importer) ImportFrom(path, srcDir string, mode types.ImportMode) (*types.Package, error) { + if mode != 0 { + panic("non-zero import mode") + } + + // determine package path (do vendor resolution) + var bp *build.Package + var err error + switch { + default: + if abs, err := p.absPath(srcDir); err == nil { // see issue #14282 + srcDir = abs + } + bp, err = p.ctxt.Import(path, srcDir, build.FindOnly) + + case build.IsLocalImport(path): + // "./x" -> "srcDir/x" + bp, err = p.ctxt.ImportDir(filepath.Join(srcDir, path), build.FindOnly) + + case p.isAbsPath(path): + return nil, fmt.Errorf("invalid absolute import path %q", path) + } + if err != nil { + return nil, err // err may be *build.NoGoError - return as is + } + + // package unsafe is known to the type checker + if bp.ImportPath == "unsafe" { + return types.Unsafe, nil + } + + // no need to re-import if the package was imported completely before + pkg := p.packages[bp.ImportPath] + if pkg != nil { + if pkg == &importing { + return nil, fmt.Errorf("import cycle through package %q", bp.ImportPath) + } + if !pkg.Complete() { + // Package exists but is not complete - we cannot handle this + // at the moment since the source importer replaces the package + // wholesale rather than augmenting it (see #19337 for details). + // Return incomplete package with error (see #16088). + return pkg, fmt.Errorf("reimported partially imported package %q", bp.ImportPath) + } + return pkg, nil + } + + p.packages[bp.ImportPath] = &importing + defer func() { + // clean up in case of error + // TODO(gri) Eventually we may want to leave a (possibly empty) + // package in the map in all cases (and use that package to + // identify cycles). See also issue 16088. + if p.packages[bp.ImportPath] == &importing { + p.packages[bp.ImportPath] = nil + } + }() + + // collect package files + bp, err = p.ctxt.ImportDir(bp.Dir, 0) + if err != nil { + return nil, err // err may be *build.NoGoError - return as is + } + var filenames []string + filenames = append(filenames, bp.GoFiles...) + filenames = append(filenames, bp.CgoFiles...) + + files, err := p.parseFiles(bp.Dir, filenames) + if err != nil { + return nil, err + } + + // type-check package files + conf := types.Config{ + IgnoreFuncBodies: true, + FakeImportC: true, + Importer: p, + Sizes: p.sizes, + } + pkg, err = conf.Check(bp.ImportPath, p.fset, files, nil) + if err != nil { + // Type-checking stops after the first error (types.Config.Error is not set), + // so the returned package is very likely incomplete. Don't return it since + // we don't know its condition: It's very likely unsafe to use and it's also + // not added to p.packages which may cause further problems (issue #20837). + return nil, fmt.Errorf("type-checking package %q failed (%v)", bp.ImportPath, err) + } + + p.packages[bp.ImportPath] = pkg + return pkg, nil +} + +func (p *Importer) parseFiles(dir string, filenames []string) ([]*ast.File, error) { + open := p.ctxt.OpenFile // possibly nil + + files := make([]*ast.File, len(filenames)) + errors := make([]error, len(filenames)) + + var wg sync.WaitGroup + wg.Add(len(filenames)) + for i, filename := range filenames { + go func(i int, filepath string) { + defer wg.Done() + if open != nil { + src, err := open(filepath) + if err != nil { + errors[i] = fmt.Errorf("opening package file %s failed (%v)", filepath, err) + return + } + files[i], errors[i] = parser.ParseFile(p.fset, filepath, src, 0) + src.Close() // ignore Close error - parsing may have succeeded which is all we need + } else { + // Special-case when ctxt doesn't provide a custom OpenFile and use the + // parser's file reading mechanism directly. This appears to be quite a + // bit faster than opening the file and providing an io.ReaderCloser in + // both cases. + // TODO(gri) investigate performance difference (issue #19281) + files[i], errors[i] = parser.ParseFile(p.fset, filepath, nil, 0) + } + }(i, p.joinPath(dir, filename)) + } + wg.Wait() + + // if there are errors, return the first one for deterministic results + for _, err := range errors { + if err != nil { + return nil, err + } + } + + return files, nil +} + +// context-controlled file system operations + +func (p *Importer) absPath(path string) (string, error) { + // TODO(gri) This should be using p.ctxt.AbsPath which doesn't + // exist but probably should. See also issue #14282. + return filepath.Abs(path) +} + +func (p *Importer) isAbsPath(path string) bool { + if f := p.ctxt.IsAbsPath; f != nil { + return f(path) + } + return filepath.IsAbs(path) +} + +func (p *Importer) joinPath(elem ...string) string { + if f := p.ctxt.JoinPath; f != nil { + return f(elem...) + } + return filepath.Join(elem...) +} diff --git a/vendor/github.com/magefile/mage/sh/cmd.go b/vendor/github.com/magefile/mage/sh/cmd.go new file mode 100644 index 00000000..23fc3722 --- /dev/null +++ b/vendor/github.com/magefile/mage/sh/cmd.go @@ -0,0 +1,165 @@ +package sh + +import ( + "bytes" + "fmt" + "io" + "log" + "os" + "os/exec" + "strings" + + "github.com/magefile/mage/mg" +) + +// RunCmd returns a function that will call Run with the given command. This is +// useful for creating command aliases to make your scripts easier to read, like +// this: +// +// // in a helper file somewhere +// var g0 = sh.RunCmd("go") // go is a keyword :( +// +// // somewhere in your main code +// if err := g0("install", "github.com/gohugo/hugo"); err != nil { +// return err +// } +// +// Args passed to command get baked in as args to the command when you run it. +// Any args passed in when you run the returned function will be appended to the +// original args. For example, this is equivalent to the above: +// +// var goInstall = sh.RunCmd("go", "install") goInstall("github.com/gohugo/hugo") +// +// RunCmd uses Exec underneath, so see those docs for more details. +func RunCmd(cmd string, args ...string) func(args ...string) error { + return func(args2 ...string) error { + return Run(cmd, append(args, args2...)...) + } +} + +// OutCmd is like RunCmd except the command returns the output of the +// command. +func OutCmd(cmd string, args ...string) func(args ...string) (string, error) { + return func(args2 ...string) (string, error) { + return Output(cmd, append(args, args2...)...) + } +} + +// Run is like RunWith, but doesn't specify any environment variables. +func Run(cmd string, args ...string) error { + return RunWith(nil, cmd, args...) +} + +// RunV is like Run, but always sends the command's stdout to os.Stdout. +func RunV(cmd string, args ...string) error { + _, err := Exec(nil, os.Stdout, os.Stderr, cmd, args...) + return err +} + +// RunWith runs the given command, directing stderr to this program's stderr and +// printing stdout to stdout if mage was run with -v. It adds adds env to the +// environment variables for the command being run. Environment variables should +// be in the format name=value. +func RunWith(env map[string]string, cmd string, args ...string) error { + var output io.Writer + if mg.Verbose() { + output = os.Stdout + } + _, err := Exec(env, output, os.Stderr, cmd, args...) + return err +} + +// Output runs the command and returns the text from stdout. +func Output(cmd string, args ...string) (string, error) { + buf := &bytes.Buffer{} + _, err := Exec(nil, buf, os.Stderr, cmd, args...) + return strings.TrimSuffix(buf.String(), "\n"), err +} + +// OutputWith is like RunWith, ubt returns what is written to stdout. +func OutputWith(env map[string]string, cmd string, args ...string) (string, error) { + buf := &bytes.Buffer{} + _, err := Exec(env, buf, os.Stderr, cmd, args...) + return strings.TrimSuffix(buf.String(), "\n"), err +} + +// Exec executes the command, piping its stderr to mage's stderr and +// piping its stdout to the given writer. If the command fails, it will return +// an error that, if returned from a target or mg.Deps call, will cause mage to +// exit with the same code as the command failed with. Env is a list of +// environment variables to set when running the command, these override the +// current environment variables set (which are also passed to the command). cmd +// and args may include references to environment variables in $FOO format, in +// which case these will be expanded before the command is run. +// +// Ran reports if the command ran (rather than was not found or not executable). +// Code reports the exit code the command returned if it ran. If err == nil, ran +// is always true and code is always 0. +func Exec(env map[string]string, stdout, stderr io.Writer, cmd string, args ...string) (ran bool, err error) { + expand := func(s string) string { + s2, ok := env[s] + if ok { + return s2 + } + return os.Getenv(s) + } + cmd = os.Expand(cmd, expand) + for i := range args { + args[i] = os.Expand(args[i], expand) + } + ran, code, err := run(env, stdout, stderr, cmd, args...) + if err == nil { + return true, nil + } + if ran { + return ran, mg.Fatalf(code, `running "%s %s" failed with exit code %d`, cmd, strings.Join(args, " "), code) + } + return ran, fmt.Errorf(`failed to run "%s %s: %v"`, cmd, strings.Join(args, " "), err) +} + +func run(env map[string]string, stdout, stderr io.Writer, cmd string, args ...string) (ran bool, code int, err error) { + c := exec.Command(cmd, args...) + c.Env = os.Environ() + for k, v := range env { + c.Env = append(c.Env, k+"="+v) + } + c.Stderr = stderr + c.Stdout = stdout + c.Stdin = os.Stdin + log.Println("exec:", cmd, strings.Join(args, " ")) + err = c.Run() + return cmdRan(err), ExitStatus(err), err +} + +func cmdRan(err error) bool { + if err == nil { + return true + } + ee, ok := err.(*exec.ExitError) + if ok { + return ee.Exited() + } + return false +} + +type exitStatus interface { + ExitStatus() int +} + +// ExitStatus returns the exit status of the error if it is an exec.ExitError +// or if it implements ExitStatus() int. +// 0 if it is nil or 1 if it is a different error. +func ExitStatus(err error) int { + if err == nil { + return 0 + } + if e, ok := err.(exitStatus); ok { + return e.ExitStatus() + } + if e, ok := err.(*exec.ExitError); ok { + if ex, ok := e.Sys().(exitStatus); ok { + return ex.ExitStatus() + } + } + return 1 +} diff --git a/vendor/github.com/magefile/mage/sh/helpers.go b/vendor/github.com/magefile/mage/sh/helpers.go new file mode 100644 index 00000000..86b075ef --- /dev/null +++ b/vendor/github.com/magefile/mage/sh/helpers.go @@ -0,0 +1,16 @@ +package sh + +import ( + "fmt" + "os" +) + +// Rm removes the given file or directory even if non-empty. It will not return +// an error if the target doesn't exist, only if the target cannot be removed. +func Rm(path string) error { + err := os.RemoveAll(path) + if err == nil || os.IsNotExist(err) { + return nil + } + return fmt.Errorf(`failed to remove %s: %v`, path, err) +} diff --git a/vendor/github.com/magefile/mage/target/target.go b/vendor/github.com/magefile/mage/target/target.go new file mode 100644 index 00000000..a2866e92 --- /dev/null +++ b/vendor/github.com/magefile/mage/target/target.go @@ -0,0 +1,122 @@ +package target + +import ( + "os" + "path/filepath" + "time" +) + +// Path reports if any of the sources have been modified more recently +// than the destination. Path does not descend into directories, it literally +// just checks the modtime of each thing you pass to it. +func Path(dst string, sources ...string) (bool, error) { + stat, err := os.Stat(dst) + if err != nil { + return false, err + } + srcTime := stat.ModTime() + dt, err := loadTargets(sources) + if err != nil { + return false, err + } + t := dt.modTime() + if t.After(srcTime) { + return true, nil + } + return false, nil +} + +// Dir reports whether any of the sources have been modified +// more recently than the destination. If a source or destination is +// a directory, modtimes of files under those directories are compared +// instead. +func Dir(dst string, sources ...string) (bool, error) { + stat, err := os.Stat(dst) + if err != nil { + return false, err + } + srcTime := stat.ModTime() + if stat.IsDir() { + srcTime, err = calDirModTimeRecursive(stat) + if err != nil { + return false, err + } + } + dt, err := loadTargets(sources) + if err != nil { + return false, err + } + t, err := dt.modTimeDir() + if err != nil { + return false, err + } + if t.After(srcTime) { + return true, nil + } + return false, nil +} + +func calDirModTimeRecursive(dir os.FileInfo) (time.Time, error) { + t := dir.ModTime() + ferr := filepath.Walk(dir.Name(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.ModTime().After(t) { + t = info.ModTime() + } + return nil + }) + if ferr != nil { + return time.Time{}, ferr + } + return t, nil +} + +type depTargets struct { + src []os.FileInfo + hasdir bool + latest time.Time +} + +func loadTargets(targets []string) (*depTargets, error) { + d := &depTargets{} + for _, v := range targets { + stat, err := os.Stat(v) + if err != nil { + return nil, err + } + if stat.IsDir() { + d.hasdir = true + } + d.src = append(d.src, stat) + if stat.ModTime().After(d.latest) { + d.latest = stat.ModTime() + } + } + return d, nil +} + +func (d *depTargets) modTime() time.Time { + return d.latest +} + +func (d *depTargets) modTimeDir() (time.Time, error) { + if !d.hasdir { + return d.latest, nil + } + var err error + for _, i := range d.src { + t := i.ModTime() + if i.IsDir() { + t, err = calDirModTimeRecursive(i) + if err != nil { + return time.Time{}, err + } + } + if t.After(d.latest) { + d.latest = t + } + } + return d.latest, nil +} diff --git a/vendor/github.com/magefile/mage/types/funcs.go b/vendor/github.com/magefile/mage/types/funcs.go new file mode 100644 index 00000000..9e2e1331 --- /dev/null +++ b/vendor/github.com/magefile/mage/types/funcs.go @@ -0,0 +1,58 @@ +package types + +import ( + "context" + "fmt" +) + +// FuncType indicates a prototype of build job function +type FuncType int + +// FuncTypes +const ( + InvalidType FuncType = iota + VoidType + ErrorType + ContextVoidType + ContextErrorType +) + +// FuncCheck tests if a function is one of FuncType +func FuncCheck(fn interface{}) error { + switch fn.(type) { + case func(): + return nil + case func() error: + return nil + case func(context.Context): + return nil + case func(context.Context) error: + return nil + } + return fmt.Errorf("Invalid type for dependent function: %T. Dependencies must be func(), func() error, func(context.Context) or func(context.Context) error", fn) +} + +// FuncTypeWrap wraps a valid FuncType to FuncContextError +func FuncTypeWrap(fn interface{}) func(context.Context) error { + if FuncCheck(fn) == nil { + switch f := fn.(type) { + case func(): + return func(context.Context) error { + f() + return nil + } + case func() error: + return func(context.Context) error { + return f() + } + case func(context.Context): + return func(ctx context.Context) error { + f(ctx) + return nil + } + case func(context.Context) error: + return f + } + } + return nil +} diff --git a/vendor/github.com/mholt/archiver/LICENSE b/vendor/github.com/mholt/archiver/LICENSE new file mode 100644 index 00000000..315d04f2 --- /dev/null +++ b/vendor/github.com/mholt/archiver/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Matthew Holt + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/mholt/archiver/README.md b/vendor/github.com/mholt/archiver/README.md new file mode 100644 index 00000000..33d0a3ee --- /dev/null +++ b/vendor/github.com/mholt/archiver/README.md @@ -0,0 +1,83 @@ +archiver [![archiver GoDoc](https://img.shields.io/badge/reference-godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/mholt/archiver) [![Linux Build Status](https://img.shields.io/travis/mholt/archiver.svg?style=flat-square&label=linux+build)](https://travis-ci.org/mholt/archiver) [![Windows Build Status](https://img.shields.io/appveyor/ci/mholt/archiver.svg?style=flat-square&label=windows+build)](https://ci.appveyor.com/project/mholt/archiver) +======== + +Package archiver makes it trivially easy to make and extract common archive formats such as .zip, and .tar.gz. Simply name the input and output file(s). + +Files are put into the root of the archive; directories are recursively added, preserving structure. + +The `archiver` command runs the same cross-platform and has no external dependencies (not even libc); powered by the Go standard library, [dsnet/compress](https://github.com/dsnet/compress), [nwaples/rardecode](https://github.com/nwaples/rardecode), and [ulikunitz/xz](https://github.com/ulikunitz/xz). Enjoy! + +Supported formats/extensions: + +- .zip +- .tar +- .tar.gz & .tgz +- .tar.bz2 & .tbz2 +- .tar.xz & .txz +- .tar.lz4 & .tlz4 +- .tar.sz & .tsz +- .rar (open only) + + +## Install + +```bash +go get github.com/mholt/archiver/cmd/archiver +``` + +Or download binaries from the [releases](https://github.com/mholt/archiver/releases) page. + + +## Command Use + +Make a new archive: + +```bash +$ archiver make [archive name] [input files...] +``` + +(At least one input file is required.) + +To extract an archive: + +```bash +$ archiver open [archive name] [destination] +``` + +(The destination path is optional; default is current directory.) + +The archive name must end with a supported file extension—this is how it knows what kind of archive to make. Run `archiver -h` for more help. + + +## Library Use + +```go +import "github.com/mholt/archiver" +``` + +Create a .zip file: + +```go +err := archiver.Zip.Make("output.zip", []string{"file.txt", "folder"}) +``` + +Extract a .zip file: + +```go +err := archiver.Zip.Open("input.zip", "output_folder") +``` + +Working with other file formats is exactly the same, but with [their own Archiver implementations](https://godoc.org/github.com/mholt/archiver#Archiver). + + + +## FAQ + +#### Can I list a file in one folder to go into a different folder in the archive? + +No. This works just like your OS would make an archive in the file explorer: organize your input files to mirror the structure you want in the archive. + + +#### Can it add files to an existing archive? + +Nope. This is a simple tool; it just makes new archives or extracts existing ones. diff --git a/vendor/github.com/mholt/archiver/appveyor.yml b/vendor/github.com/mholt/archiver/appveyor.yml new file mode 100644 index 00000000..f8f804f9 --- /dev/null +++ b/vendor/github.com/mholt/archiver/appveyor.yml @@ -0,0 +1,32 @@ +version: "{build}" + +os: Windows Server 2012 R2 + +clone_folder: c:\gopath\src\github.com\mholt\archiver + +environment: + GOPATH: c:\gopath + CGO_ENABLED: 0 + +install: + - rmdir c:\go /s /q + - appveyor DownloadFile https://storage.googleapis.com/golang/go1.7.1.windows-amd64.zip + - 7z x go1.7.1.windows-amd64.zip -y -oC:\ > NUL + - go version + - go env + - go get -t ./... + - go get github.com/golang/lint/golint + - go get github.com/gordonklaus/ineffassign + - set PATH=%GOPATH%\bin;%PATH% + +build: off + +test_script: + - go vet ./... + - go test ./... + - ineffassign . + +after_test: + - golint ./... + +deploy: off diff --git a/vendor/github.com/mholt/archiver/archiver.go b/vendor/github.com/mholt/archiver/archiver.go new file mode 100644 index 00000000..68f8ea38 --- /dev/null +++ b/vendor/github.com/mholt/archiver/archiver.go @@ -0,0 +1,107 @@ +package archiver + +import ( + "fmt" + "io" + "log" + "os" + "path/filepath" + "runtime" +) + +// Archiver represent a archive format +type Archiver interface { + // Match checks supported files + Match(filename string) bool + // Make makes an archive file on disk. + Make(destination string, sources []string) error + // Open extracts an archive file on disk. + Open(source, destination string) error + // Write writes an archive to a Writer. + Write(output io.Writer, sources []string) error + // Read reads an archive from a Reader. + Read(input io.Reader, destination string) error +} + +// SupportedFormats contains all supported archive formats +var SupportedFormats = map[string]Archiver{} + +// RegisterFormat adds a supported archive format +func RegisterFormat(name string, format Archiver) { + if _, ok := SupportedFormats[name]; ok { + log.Printf("Format %s already exists, skip!\n", name) + return + } + SupportedFormats[name] = format +} + +// MatchingFormat returns the first archive format that matches +// the given file, or nil if there is no match +func MatchingFormat(fpath string) Archiver { + for _, fmt := range SupportedFormats { + if fmt.Match(fpath) { + return fmt + } + } + return nil +} + +func writeNewFile(fpath string, in io.Reader, fm os.FileMode) error { + err := os.MkdirAll(filepath.Dir(fpath), 0755) + if err != nil { + return fmt.Errorf("%s: making directory for file: %v", fpath, err) + } + + out, err := os.Create(fpath) + if err != nil { + return fmt.Errorf("%s: creating new file: %v", fpath, err) + } + defer out.Close() + + err = out.Chmod(fm) + if err != nil && runtime.GOOS != "windows" { + return fmt.Errorf("%s: changing file mode: %v", fpath, err) + } + + _, err = io.Copy(out, in) + if err != nil { + return fmt.Errorf("%s: writing file: %v", fpath, err) + } + return nil +} + +func writeNewSymbolicLink(fpath string, target string) error { + err := os.MkdirAll(filepath.Dir(fpath), 0755) + if err != nil { + return fmt.Errorf("%s: making directory for file: %v", fpath, err) + } + + err = os.Symlink(target, fpath) + if err != nil { + return fmt.Errorf("%s: making symbolic link for: %v", fpath, err) + } + + return nil +} + +func writeNewHardLink(fpath string, target string) error { + err := os.MkdirAll(filepath.Dir(fpath), 0755) + if err != nil { + return fmt.Errorf("%s: making directory for file: %v", fpath, err) + } + + err = os.Link(target, fpath) + if err != nil { + return fmt.Errorf("%s: making hard link for: %v", fpath, err) + } + + return nil +} + +func mkdir(dirPath string) error { + err := os.MkdirAll(dirPath, 0755) + if err != nil { + return fmt.Errorf("%s: making directory: %v", dirPath, err) + } + return nil +} diff --git a/vendor/github.com/mholt/archiver/build.bash b/vendor/github.com/mholt/archiver/build.bash new file mode 100755 index 00000000..b1bef114 --- /dev/null +++ b/vendor/github.com/mholt/archiver/build.bash @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +set -ex + +# This script builds archiver for most common platforms. + +export CGO_ENABLED=0 + +cd cmd/archiver +GOOS=linux GOARCH=386 go build -o ../../builds/archiver_linux_386 +GOOS=linux GOARCH=amd64 go build -o ../../builds/archiver_linux_amd64 +GOOS=linux GOARCH=arm go build -o ../../builds/archiver_linux_arm7 +GOOS=linux GOARCH=arm64 go build -o ../../builds/archiver_linux_arm64 +GOOS=darwin GOARCH=amd64 go build -o ../../builds/archiver_mac_amd64 +GOOS=windows GOARCH=386 go build -o ../../builds/archiver_windows_386.exe +GOOS=windows GOARCH=amd64 go build -o ../../builds/archiver_windows_amd64.exe +GOOS=freebsd GOARCH=386 go build -o ../../builds/archiver_freebsd_386 +GOOS=freebsd GOARCH=amd64 go build -o ../../builds/archiver_freebsd_amd64 +GOOS=freebsd GOARCH=arm go build -o ../../builds/archiver_freebsd_arm7 +GOOS=openbsd GOARCH=386 go build -o ../../builds/archiver_openbsd_386 +GOOS=openbsd GOARCH=amd64 go build -o ../../builds/archiver_openbsd_amd64 +cd ../.. \ No newline at end of file diff --git a/vendor/github.com/mholt/archiver/rar.go b/vendor/github.com/mholt/archiver/rar.go new file mode 100644 index 00000000..7744c24a --- /dev/null +++ b/vendor/github.com/mholt/archiver/rar.go @@ -0,0 +1,109 @@ +package archiver + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/nwaples/rardecode" +) + +// Rar is for RAR archive format +var Rar rarFormat + +func init() { + RegisterFormat("Rar", Rar) +} + +type rarFormat struct{} + +func (rarFormat) Match(filename string) bool { + return strings.HasSuffix(strings.ToLower(filename), ".rar") || isRar(filename) +} + +// isRar checks the file has the RAR 1.5 or 5.0 format signature by reading its +// beginning bytes and matching it +func isRar(rarPath string) bool { + f, err := os.Open(rarPath) + if err != nil { + return false + } + defer f.Close() + + buf := make([]byte, 8) + if n, err := f.Read(buf); err != nil || n < 8 { + return false + } + + return bytes.Equal(buf[:7], []byte("Rar!\x1a\x07\x00")) || // ver 1.5 + bytes.Equal(buf, []byte("Rar!\x1a\x07\x01\x00")) // ver 5.0 +} + +// Write outputs a .rar archive, but this is not implemented because +// RAR is a proprietary format. It is here only for symmetry with +// the other archive formats in this package. +func (rarFormat) Write(output io.Writer, filePaths []string) error { + return fmt.Errorf("write: RAR not implemented (proprietary format)") +} + +// Make makes a .rar archive, but this is not implemented because +// RAR is a proprietary format. It is here only for symmetry with +// the other archive formats in this package. +func (rarFormat) Make(rarPath string, filePaths []string) error { + return fmt.Errorf("make %s: RAR not implemented (proprietary format)", rarPath) +} + +// Read extracts the RAR file read from input and puts the contents +// into destination. +func (rarFormat) Read(input io.Reader, destination string) error { + rr, err := rardecode.NewReader(input, "") + if err != nil { + return fmt.Errorf("read: failed to create reader: %v", err) + } + + for { + header, err := rr.Next() + if err == io.EOF { + break + } else if err != nil { + return err + } + + if header.IsDir { + err = mkdir(filepath.Join(destination, header.Name)) + if err != nil { + return err + } + continue + } + + // if files come before their containing folders, then we must + // create their folders before writing the file + err = mkdir(filepath.Dir(filepath.Join(destination, header.Name))) + if err != nil { + return err + } + + err = writeNewFile(filepath.Join(destination, header.Name), rr, header.Mode()) + if err != nil { + return err + } + } + + return nil +} + +// Open extracts the RAR file at source and puts the contents +// into destination. +func (rarFormat) Open(source, destination string) error { + rf, err := os.Open(source) + if err != nil { + return fmt.Errorf("%s: failed to open file: %v", source, err) + } + defer rf.Close() + + return Rar.Read(rf, destination) +} diff --git a/vendor/github.com/mholt/archiver/tar.go b/vendor/github.com/mholt/archiver/tar.go new file mode 100644 index 00000000..2fbfbb27 --- /dev/null +++ b/vendor/github.com/mholt/archiver/tar.go @@ -0,0 +1,234 @@ +package archiver + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" +) + +// Tar is for Tar format +var Tar tarFormat + +func init() { + RegisterFormat("Tar", Tar) +} + +type tarFormat struct{} + +func (tarFormat) Match(filename string) bool { + return strings.HasSuffix(strings.ToLower(filename), ".tar") || isTar(filename) +} + +const tarBlockSize int = 512 + +// isTar checks the file has the Tar format header by reading its beginning +// block. +func isTar(tarPath string) bool { + f, err := os.Open(tarPath) + if err != nil { + return false + } + defer f.Close() + + buf := make([]byte, tarBlockSize) + if _, err = io.ReadFull(f, buf); err != nil { + return false + } + + return hasTarHeader(buf) +} + +// hasTarHeader checks passed bytes has a valid tar header or not. buf must +// contain at least 512 bytes and if not, it always returns false. +func hasTarHeader(buf []byte) bool { + if len(buf) < tarBlockSize { + return false + } + + b := buf[148:156] + b = bytes.Trim(b, " \x00") // clean up all spaces and null bytes + if len(b) == 0 { + return false // unknown format + } + hdrSum, err := strconv.ParseUint(string(b), 8, 64) + if err != nil { + return false + } + + // According to the go official archive/tar, Sun tar uses signed byte + // values so this calcs both signed and unsigned + var usum uint64 + var sum int64 + for i, c := range buf { + if 148 <= i && i < 156 { + c = ' ' // checksum field itself is counted as branks + } + usum += uint64(uint8(c)) + sum += int64(int8(c)) + } + + if hdrSum != usum && int64(hdrSum) != sum { + return false // invalid checksum + } + + return true +} + +// Write outputs a .tar file to a Writer containing the +// contents of files listed in filePaths. File paths can +// be those of regular files or directories. Regular +// files are stored at the 'root' of the archive, and +// directories are recursively added. +func (tarFormat) Write(output io.Writer, filePaths []string) error { + return writeTar(filePaths, output, "") +} + +// Make creates a .tar file at tarPath containing the +// contents of files listed in filePaths. File paths can +// be those of regular files or directories. Regular +// files are stored at the 'root' of the archive, and +// directories are recursively added. +func (tarFormat) Make(tarPath string, filePaths []string) error { + out, err := os.Create(tarPath) + if err != nil { + return fmt.Errorf("error creating %s: %v", tarPath, err) + } + defer out.Close() + + return writeTar(filePaths, out, tarPath) +} + +func writeTar(filePaths []string, output io.Writer, dest string) error { + tarWriter := tar.NewWriter(output) + defer tarWriter.Close() + + return tarball(filePaths, tarWriter, dest) +} + +// tarball writes all files listed in filePaths into tarWriter, which is +// writing into a file located at dest. +func tarball(filePaths []string, tarWriter *tar.Writer, dest string) error { + for _, fpath := range filePaths { + err := tarFile(tarWriter, fpath, dest) + if err != nil { + return err + } + } + return nil +} + +// tarFile writes the file at source into tarWriter. It does so +// recursively for directories. +func tarFile(tarWriter *tar.Writer, source, dest string) error { + sourceInfo, err := os.Stat(source) + if err != nil { + return fmt.Errorf("%s: stat: %v", source, err) + } + + var baseDir string + if sourceInfo.IsDir() { + baseDir = filepath.Base(source) + } + + return filepath.Walk(source, func(path string, info os.FileInfo, err error) error { + if err != nil { + return fmt.Errorf("error walking to %s: %v", path, err) + } + + header, err := tar.FileInfoHeader(info, path) + if err != nil { + return fmt.Errorf("%s: making header: %v", path, err) + } + + if baseDir != "" { + header.Name = filepath.Join(baseDir, strings.TrimPrefix(path, source)) + } + + if header.Name == dest { + // our new tar file is inside the directory being archived; skip it + return nil + } + + if info.IsDir() { + header.Name += "/" + } + + err = tarWriter.WriteHeader(header) + if err != nil { + return fmt.Errorf("%s: writing header: %v", path, err) + } + + if info.IsDir() { + return nil + } + + if header.Typeflag == tar.TypeReg { + file, err := os.Open(path) + if err != nil { + return fmt.Errorf("%s: open: %v", path, err) + } + defer file.Close() + + _, err = io.CopyN(tarWriter, file, info.Size()) + if err != nil && err != io.EOF { + return fmt.Errorf("%s: copying contents: %v", path, err) + } + } + return nil + }) +} + +// Read untars a .tar file read from a Reader and puts +// the contents into destination. +func (tarFormat) Read(input io.Reader, destination string) error { + return untar(tar.NewReader(input), destination) +} + +// Open untars source and puts the contents into destination. +func (tarFormat) Open(source, destination string) error { + f, err := os.Open(source) + if err != nil { + return fmt.Errorf("%s: failed to open archive: %v", source, err) + } + defer f.Close() + + return Tar.Read(f, destination) +} + +// untar un-tarballs the contents of tr into destination. +func untar(tr *tar.Reader, destination string) error { + for { + header, err := tr.Next() + if err == io.EOF { + break + } else if err != nil { + return err + } + + if err := untarFile(tr, header, destination); err != nil { + return err + } + } + return nil +} + +// untarFile untars a single file from tr with header header into destination. +func untarFile(tr *tar.Reader, header *tar.Header, destination string) error { + switch header.Typeflag { + case tar.TypeDir: + return mkdir(filepath.Join(destination, header.Name)) + case tar.TypeReg, tar.TypeRegA, tar.TypeChar, tar.TypeBlock, tar.TypeFifo: + return writeNewFile(filepath.Join(destination, header.Name), tr, header.FileInfo().Mode()) + case tar.TypeSymlink: + return writeNewSymbolicLink(filepath.Join(destination, header.Name), header.Linkname) + case tar.TypeLink: + return writeNewHardLink(filepath.Join(destination, header.Name), filepath.Join(destination, header.Linkname)) + default: + return fmt.Errorf("%s: unknown type flag: %c", header.Name, header.Typeflag) + } +} diff --git a/vendor/github.com/mholt/archiver/tarbz2.go b/vendor/github.com/mholt/archiver/tarbz2.go new file mode 100644 index 00000000..e0051d3c --- /dev/null +++ b/vendor/github.com/mholt/archiver/tarbz2.go @@ -0,0 +1,106 @@ +package archiver + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/dsnet/compress/bzip2" +) + +// TarBz2 is for TarBz2 format +var TarBz2 tarBz2Format + +func init() { + RegisterFormat("TarBz2", TarBz2) +} + +type tarBz2Format struct{} + +func (tarBz2Format) Match(filename string) bool { + return strings.HasSuffix(strings.ToLower(filename), ".tar.bz2") || + strings.HasSuffix(strings.ToLower(filename), ".tbz2") || + isTarBz2(filename) +} + +// isTarBz2 checks the file has the bzip2 compressed Tar format header by +// reading its beginning block. +func isTarBz2(tarbz2Path string) bool { + f, err := os.Open(tarbz2Path) + if err != nil { + return false + } + defer f.Close() + + bz2r, err := bzip2.NewReader(f, nil) + if err != nil { + return false + } + defer bz2r.Close() + + buf := make([]byte, tarBlockSize) + n, err := bz2r.Read(buf) + if err != nil || n < tarBlockSize { + return false + } + + return hasTarHeader(buf) +} + +// Write outputs a .tar.bz2 file to a Writer containing +// the contents of files listed in filePaths. File paths +// can be those of regular files or directories. Regular +// files are stored at the 'root' of the archive, and +// directories are recursively added. +func (tarBz2Format) Write(output io.Writer, filePaths []string) error { + return writeTarBz2(filePaths, output, "") +} + +// Make creates a .tar.bz2 file at tarbz2Path containing +// the contents of files listed in filePaths. File paths +// can be those of regular files or directories. Regular +// files are stored at the 'root' of the archive, and +// directories are recursively added. +func (tarBz2Format) Make(tarbz2Path string, filePaths []string) error { + out, err := os.Create(tarbz2Path) + if err != nil { + return fmt.Errorf("error creating %s: %v", tarbz2Path, err) + } + defer out.Close() + + return writeTarBz2(filePaths, out, tarbz2Path) +} + +func writeTarBz2(filePaths []string, output io.Writer, dest string) error { + bz2w, err := bzip2.NewWriter(output, nil) + if err != nil { + return fmt.Errorf("error compressing bzip2: %v", err) + } + defer bz2w.Close() + + return writeTar(filePaths, bz2w, dest) +} + +// Read untars a .tar.bz2 file read from a Reader and decompresses +// the contents into destination. +func (tarBz2Format) Read(input io.Reader, destination string) error { + bz2r, err := bzip2.NewReader(input, nil) + if err != nil { + return fmt.Errorf("error decompressing bzip2: %v", err) + } + defer bz2r.Close() + + return Tar.Read(bz2r, destination) +} + +// Open untars source and decompresses the contents into destination. +func (tarBz2Format) Open(source, destination string) error { + f, err := os.Open(source) + if err != nil { + return fmt.Errorf("%s: failed to open archive: %v", source, err) + } + defer f.Close() + + return TarBz2.Read(f, destination) +} diff --git a/vendor/github.com/mholt/archiver/targz.go b/vendor/github.com/mholt/archiver/targz.go new file mode 100644 index 00000000..6751d49d --- /dev/null +++ b/vendor/github.com/mholt/archiver/targz.go @@ -0,0 +1,98 @@ +package archiver + +import ( + "compress/gzip" + "fmt" + "io" + "os" + "strings" +) + +// TarGz is for TarGz format +var TarGz tarGzFormat + +func init() { + RegisterFormat("TarGz", TarGz) +} + +type tarGzFormat struct{} + +func (tarGzFormat) Match(filename string) bool { + return strings.HasSuffix(strings.ToLower(filename), ".tar.gz") || + strings.HasSuffix(strings.ToLower(filename), ".tgz") || + isTarGz(filename) +} + +// isTarGz checks the file has the gzip compressed Tar format header by reading +// its beginning block. +func isTarGz(targzPath string) bool { + f, err := os.Open(targzPath) + if err != nil { + return false + } + defer f.Close() + + gzr, err := gzip.NewReader(f) + if err != nil { + return false + } + defer gzr.Close() + + buf := make([]byte, tarBlockSize) + n, err := gzr.Read(buf) + if err != nil || n < tarBlockSize { + return false + } + + return hasTarHeader(buf) +} + +// Write outputs a .tar.gz file to a Writer containing +// the contents of files listed in filePaths. It works +// the same way Tar does, but with gzip compression. +func (tarGzFormat) Write(output io.Writer, filePaths []string) error { + return writeTarGz(filePaths, output, "") +} + +// Make creates a .tar.gz file at targzPath containing +// the contents of files listed in filePaths. It works +// the same way Tar does, but with gzip compression. +func (tarGzFormat) Make(targzPath string, filePaths []string) error { + out, err := os.Create(targzPath) + if err != nil { + return fmt.Errorf("error creating %s: %v", targzPath, err) + } + defer out.Close() + + return writeTarGz(filePaths, out, targzPath) +} + +func writeTarGz(filePaths []string, output io.Writer, dest string) error { + gzw := gzip.NewWriter(output) + defer gzw.Close() + + return writeTar(filePaths, gzw, dest) +} + +// Read untars a .tar.gz file read from a Reader and decompresses +// the contents into destination. +func (tarGzFormat) Read(input io.Reader, destination string) error { + gzr, err := gzip.NewReader(input) + if err != nil { + return fmt.Errorf("error decompressing: %v", err) + } + defer gzr.Close() + + return Tar.Read(gzr, destination) +} + +// Open untars source and decompresses the contents into destination. +func (tarGzFormat) Open(source, destination string) error { + f, err := os.Open(source) + if err != nil { + return fmt.Errorf("%s: failed to open archive: %v", source, err) + } + defer f.Close() + + return TarGz.Read(f, destination) +} diff --git a/vendor/github.com/mholt/archiver/tarlz4.go b/vendor/github.com/mholt/archiver/tarlz4.go new file mode 100644 index 00000000..1ddc881f --- /dev/null +++ b/vendor/github.com/mholt/archiver/tarlz4.go @@ -0,0 +1,92 @@ +package archiver + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/pierrec/lz4" +) + +// TarLz4 is for TarLz4 format +var TarLz4 tarLz4Format + +func init() { + RegisterFormat("TarLz4", TarLz4) +} + +type tarLz4Format struct{} + +func (tarLz4Format) Match(filename string) bool { + return strings.HasSuffix(strings.ToLower(filename), ".tar.lz4") || strings.HasSuffix(strings.ToLower(filename), ".tlz4") || isTarLz4(filename) +} + +// isTarLz4 checks the file has the lz4 compressed Tar format header by +// reading its beginning block. +func isTarLz4(tarlz4Path string) bool { + f, err := os.Open(tarlz4Path) + if err != nil { + return false + } + defer f.Close() + + lz4r := lz4.NewReader(f) + buf := make([]byte, tarBlockSize) + n, err := lz4r.Read(buf) + if err != nil || n < tarBlockSize { + return false + } + + return hasTarHeader(buf) +} + +// Write outputs a .tar.lz4 file to a Writer containing +// the contents of files listed in filePaths. File paths +// can be those of regular files or directories. Regular +// files are stored at the 'root' of the archive, and +// directories are recursively added. +func (tarLz4Format) Write(output io.Writer, filePaths []string) error { + return writeTarLz4(filePaths, output, "") +} + +// Make creates a .tar.lz4 file at tarlz4Path containing +// the contents of files listed in filePaths. File paths +// can be those of regular files or directories. Regular +// files are stored at the 'root' of the archive, and +// directories are recursively added. +func (tarLz4Format) Make(tarlz4Path string, filePaths []string) error { + out, err := os.Create(tarlz4Path) + if err != nil { + return fmt.Errorf("error creating %s: %v", tarlz4Path, err) + } + defer out.Close() + + return writeTarLz4(filePaths, out, tarlz4Path) +} + +func writeTarLz4(filePaths []string, output io.Writer, dest string) error { + lz4w := lz4.NewWriter(output) + defer lz4w.Close() + + return writeTar(filePaths, lz4w, dest) +} + +// Read untars a .tar.xz file read from a Reader and decompresses +// the contents into destination. +func (tarLz4Format) Read(input io.Reader, destination string) error { + lz4r := lz4.NewReader(input) + + return Tar.Read(lz4r, destination) +} + +// Open untars source and decompresses the contents into destination. +func (tarLz4Format) Open(source, destination string) error { + f, err := os.Open(source) + if err != nil { + return fmt.Errorf("%s: failed to open archive: %v", source, err) + } + defer f.Close() + + return TarLz4.Read(f, destination) +} diff --git a/vendor/github.com/mholt/archiver/tarsz.go b/vendor/github.com/mholt/archiver/tarsz.go new file mode 100644 index 00000000..2e290190 --- /dev/null +++ b/vendor/github.com/mholt/archiver/tarsz.go @@ -0,0 +1,92 @@ +package archiver + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/golang/snappy" +) + +// TarSz is for TarSz format +var TarSz tarSzFormat + +func init() { + RegisterFormat("TarSz", TarSz) +} + +type tarSzFormat struct{} + +func (tarSzFormat) Match(filename string) bool { + return strings.HasSuffix(strings.ToLower(filename), ".tar.sz") || strings.HasSuffix(strings.ToLower(filename), ".tsz") || isTarSz(filename) +} + +// isTarSz checks the file has the sz compressed Tar format header by +// reading its beginning block. +func isTarSz(tarszPath string) bool { + f, err := os.Open(tarszPath) + if err != nil { + return false + } + defer f.Close() + + szr := snappy.NewReader(f) + buf := make([]byte, tarBlockSize) + n, err := szr.Read(buf) + if err != nil || n < tarBlockSize { + return false + } + + return hasTarHeader(buf) +} + +// Write outputs a .tar.sz file to a Writer containing +// the contents of files listed in filePaths. File paths +// can be those of regular files or directories. Regular +// files are stored at the 'root' of the archive, and +// directories are recursively added. +func (tarSzFormat) Write(output io.Writer, filePaths []string) error { + return writeTarSz(filePaths, output, "") +} + +// Make creates a .tar.sz file at tarszPath containing +// the contents of files listed in filePaths. File paths +// can be those of regular files or directories. Regular +// files are stored at the 'root' of the archive, and +// directories are recursively added. +func (tarSzFormat) Make(tarszPath string, filePaths []string) error { + out, err := os.Create(tarszPath) + if err != nil { + return fmt.Errorf("error creating %s: %v", tarszPath, err) + } + defer out.Close() + + return writeTarSz(filePaths, out, tarszPath) +} + +func writeTarSz(filePaths []string, output io.Writer, dest string) error { + szw := snappy.NewBufferedWriter(output) + defer szw.Close() + + return writeTar(filePaths, szw, dest) +} + +// Read untars a .tar.sz file read from a Reader and decompresses +// the contents into destination. +func (tarSzFormat) Read(input io.Reader, destination string) error { + szr := snappy.NewReader(input) + + return Tar.Read(szr, destination) +} + +// Open untars source and decompresses the contents into destination. +func (tarSzFormat) Open(source, destination string) error { + f, err := os.Open(source) + if err != nil { + return fmt.Errorf("%s: failed to open archive: %v", source, err) + } + defer f.Close() + + return TarSz.Read(f, destination) +} diff --git a/vendor/github.com/mholt/archiver/tarxz.go b/vendor/github.com/mholt/archiver/tarxz.go new file mode 100644 index 00000000..e222fb4a --- /dev/null +++ b/vendor/github.com/mholt/archiver/tarxz.go @@ -0,0 +1,105 @@ +package archiver + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/ulikunitz/xz" +) + +// TarXZ is for TarXZ format +var TarXZ xzFormat + +func init() { + RegisterFormat("TarXZ", TarXZ) +} + +type xzFormat struct{} + +// Match returns whether filename matches this format. +func (xzFormat) Match(filename string) bool { + return strings.HasSuffix(strings.ToLower(filename), ".tar.xz") || + strings.HasSuffix(strings.ToLower(filename), ".txz") || + isTarXz(filename) +} + +// isTarXz checks the file has the xz compressed Tar format header by reading +// its beginning block. +func isTarXz(tarxzPath string) bool { + f, err := os.Open(tarxzPath) + if err != nil { + return false + } + defer f.Close() + + xzr, err := xz.NewReader(f) + if err != nil { + return false + } + + buf := make([]byte, tarBlockSize) + n, err := xzr.Read(buf) + if err != nil || n < tarBlockSize { + return false + } + + return hasTarHeader(buf) +} + +// Write outputs a .tar.xz file to a Writer containing +// the contents of files listed in filePaths. File paths +// can be those of regular files or directories. Regular +// files are stored at the 'root' of the archive, and +// directories are recursively added. +func (xzFormat) Write(output io.Writer, filePaths []string) error { + return writeTarXZ(filePaths, output, "") +} + +// Make creates a .tar.xz file at xzPath containing +// the contents of files listed in filePaths. File +// paths can be those of regular files or directories. +// Regular files are stored at the 'root' of the +// archive, and directories are recursively added. +func (xzFormat) Make(xzPath string, filePaths []string) error { + out, err := os.Create(xzPath) + if err != nil { + return fmt.Errorf("error creating %s: %v", xzPath, err) + } + defer out.Close() + + return writeTarXZ(filePaths, out, xzPath) +} + +func writeTarXZ(filePaths []string, output io.Writer, dest string) error { + xzw, err := xz.NewWriter(output) + if err != nil { + return fmt.Errorf("error compressing xz: %v", err) + } + defer xzw.Close() + + return writeTar(filePaths, xzw, dest) +} + +// Read untars a .tar.xz file read from a Reader and decompresses +// the contents into destination. +func (xzFormat) Read(input io.Reader, destination string) error { + xzr, err := xz.NewReader(input) + if err != nil { + return fmt.Errorf("error decompressing xz: %v", err) + } + + return Tar.Read(xzr, destination) +} + +// Open untars source and decompresses the contents into destination. +func (xzFormat) Open(source, destination string) error { + f, err := os.Open(source) + if err != nil { + return fmt.Errorf("%s: failed to open archive: %v", source, err) + } + defer f.Close() + + return TarXZ.Read(f, destination) +} diff --git a/vendor/github.com/mholt/archiver/zip.go b/vendor/github.com/mholt/archiver/zip.go new file mode 100644 index 00000000..5f458ada --- /dev/null +++ b/vendor/github.com/mholt/archiver/zip.go @@ -0,0 +1,233 @@ +// Package archiver makes it super easy to create and open .zip, +// .tar.gz, and .tar.bz2 files. +package archiver + +import ( + "archive/zip" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" +) + +// Zip is for Zip format +var Zip zipFormat + +func init() { + RegisterFormat("Zip", Zip) +} + +type zipFormat struct{} + +func (zipFormat) Match(filename string) bool { + return strings.HasSuffix(strings.ToLower(filename), ".zip") || isZip(filename) +} + +// isZip checks the file has the Zip format signature by reading its beginning +// bytes and matching it against "PK\x03\x04" +func isZip(zipPath string) bool { + f, err := os.Open(zipPath) + if err != nil { + return false + } + defer f.Close() + + buf := make([]byte, 4) + if n, err := f.Read(buf); err != nil || n < 4 { + return false + } + + return bytes.Equal(buf, []byte("PK\x03\x04")) +} + +// Write outputs a .zip file to the given writer with +// the contents of files listed in filePaths. File paths +// can be those of regular files or directories. Regular +// files are stored at the 'root' of the archive, and +// directories are recursively added. +// +// Files with an extension for formats that are already +// compressed will be stored only, not compressed. +func (zipFormat) Write(output io.Writer, filePaths []string) error { + w := zip.NewWriter(output) + for _, fpath := range filePaths { + if err := zipFile(w, fpath); err != nil { + w.Close() + return err + } + } + + return w.Close() +} + +// Make creates a .zip file in the location zipPath containing +// the contents of files listed in filePaths. File paths +// can be those of regular files or directories. Regular +// files are stored at the 'root' of the archive, and +// directories are recursively added. +// +// Files with an extension for formats that are already +// compressed will be stored only, not compressed. +func (zipFormat) Make(zipPath string, filePaths []string) error { + out, err := os.Create(zipPath) + if err != nil { + return fmt.Errorf("error creating %s: %v", zipPath, err) + } + defer out.Close() + + return Zip.Write(out, filePaths) +} + +func zipFile(w *zip.Writer, source string) error { + sourceInfo, err := os.Stat(source) + if err != nil { + return fmt.Errorf("%s: stat: %v", source, err) + } + + var baseDir string + if sourceInfo.IsDir() { + baseDir = filepath.Base(source) + } + + return filepath.Walk(source, func(fpath string, info os.FileInfo, err error) error { + if err != nil { + return fmt.Errorf("walking to %s: %v", fpath, err) + } + + header, err := zip.FileInfoHeader(info) + if err != nil { + return fmt.Errorf("%s: getting header: %v", fpath, err) + } + + if baseDir != "" { + name, err := filepath.Rel(source, fpath) + if err != nil { + return err + } + header.Name = path.Join(baseDir, filepath.ToSlash(name)) + } + + if info.IsDir() { + header.Name += "/" + header.Method = zip.Store + } else { + ext := strings.ToLower(path.Ext(header.Name)) + if _, ok := compressedFormats[ext]; ok { + header.Method = zip.Store + } else { + header.Method = zip.Deflate + } + } + + writer, err := w.CreateHeader(header) + if err != nil { + return fmt.Errorf("%s: making header: %v", fpath, err) + } + + if info.IsDir() { + return nil + } + + if header.Mode().IsRegular() { + file, err := os.Open(fpath) + if err != nil { + return fmt.Errorf("%s: opening: %v", fpath, err) + } + defer file.Close() + + _, err = io.CopyN(writer, file, info.Size()) + if err != nil && err != io.EOF { + return fmt.Errorf("%s: copying contents: %v", fpath, err) + } + } + + return nil + }) +} + +// Read unzips the .zip file read from the input Reader into destination. +func (zipFormat) Read(input io.Reader, destination string) error { + buf, err := ioutil.ReadAll(input) + if err != nil { + return err + } + + rdr := bytes.NewReader(buf) + r, err := zip.NewReader(rdr, rdr.Size()) + if err != nil { + return err + } + + return unzipAll(r, destination) +} + +// Open unzips the .zip file at source into destination. +func (zipFormat) Open(source, destination string) error { + r, err := zip.OpenReader(source) + if err != nil { + return err + } + defer r.Close() + + return unzipAll(&r.Reader, destination) +} + +func unzipAll(r *zip.Reader, destination string) error { + for _, zf := range r.File { + if err := unzipFile(zf, destination); err != nil { + return err + } + } + + return nil +} + +func unzipFile(zf *zip.File, destination string) error { + if strings.HasSuffix(zf.Name, "/") { + return mkdir(filepath.Join(destination, zf.Name)) + } + + rc, err := zf.Open() + if err != nil { + return fmt.Errorf("%s: open compressed file: %v", zf.Name, err) + } + defer rc.Close() + + return writeNewFile(filepath.Join(destination, zf.Name), rc, zf.FileInfo().Mode()) +} + +// compressedFormats is a (non-exhaustive) set of lowercased +// file extensions for formats that are typically already +// compressed. Compressing already-compressed files often +// results in a larger file, so when possible, we check this +// set to avoid that. +var compressedFormats = map[string]struct{}{ + ".7z": {}, + ".avi": {}, + ".bz2": {}, + ".cab": {}, + ".gif": {}, + ".gz": {}, + ".jar": {}, + ".jpeg": {}, + ".jpg": {}, + ".lz": {}, + ".lzma": {}, + ".mov": {}, + ".mp3": {}, + ".mp4": {}, + ".mpeg": {}, + ".mpg": {}, + ".png": {}, + ".rar": {}, + ".tbz2": {}, + ".tgz": {}, + ".txz": {}, + ".xz": {}, + ".zip": {}, + ".zipx": {}, +} diff --git a/vendor/github.com/nwaples/rardecode/LICENSE b/vendor/github.com/nwaples/rardecode/LICENSE new file mode 100644 index 00000000..0050f92d --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015, Nicholas Waples +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/nwaples/rardecode/README.md b/vendor/github.com/nwaples/rardecode/README.md new file mode 100644 index 00000000..513464c2 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/README.md @@ -0,0 +1,4 @@ +# rardecode +[![GoDoc](https://godoc.org/github.com/nwaples/rardecode?status.svg)](https://godoc.org/github.com/nwaples/rardecode) + +A go package for reading RAR archives. diff --git a/vendor/github.com/nwaples/rardecode/archive.go b/vendor/github.com/nwaples/rardecode/archive.go new file mode 100644 index 00000000..8929f126 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/archive.go @@ -0,0 +1,306 @@ +package rardecode + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +const ( + maxSfxSize = 0x100000 // maximum number of bytes to read when searching for RAR signature + sigPrefix = "Rar!\x1A\x07" + + fileFmt15 = iota + 1 // Version 1.5 archive file format + fileFmt50 // Version 5.0 archive file format +) + +var ( + errNoSig = errors.New("rardecode: RAR signature not found") + errVerMismatch = errors.New("rardecode: volume version mistmatch") + errCorruptHeader = errors.New("rardecode: corrupt block header") + errCorruptFileHeader = errors.New("rardecode: corrupt file header") + errBadHeaderCrc = errors.New("rardecode: bad header crc") + errUnknownArc = errors.New("rardecode: unknown archive version") + errUnknownDecoder = errors.New("rardecode: unknown decoder version") + errUnsupportedDecoder = errors.New("rardecode: unsupported decoder version") + errArchiveContinues = errors.New("rardecode: archive continues in next volume") + errArchiveEnd = errors.New("rardecode: archive end reached") + errDecoderOutOfData = errors.New("rardecode: decoder expected more data than is in packed file") + + reDigits = regexp.MustCompile(`\d+`) +) + +type readBuf []byte + +func (b *readBuf) byte() byte { + v := (*b)[0] + *b = (*b)[1:] + return v +} + +func (b *readBuf) uint16() uint16 { + v := uint16((*b)[0]) | uint16((*b)[1])<<8 + *b = (*b)[2:] + return v +} + +func (b *readBuf) uint32() uint32 { + v := uint32((*b)[0]) | uint32((*b)[1])<<8 | uint32((*b)[2])<<16 | uint32((*b)[3])<<24 + *b = (*b)[4:] + return v +} + +func (b *readBuf) bytes(n int) []byte { + v := (*b)[:n] + *b = (*b)[n:] + return v +} + +func (b *readBuf) uvarint() uint64 { + var x uint64 + var s uint + for i, n := range *b { + if n < 0x80 { + *b = (*b)[i+1:] + return x | uint64(n)< '9' || v.file[i+3] < '0' || v.file[i+3] > '9') { + v.file = v.file[:i+2] + "00" + return + } + } + // new style volume naming + if !v.old { + // find all numbers in volume name + m := reDigits.FindAllStringIndex(v.file, -1) + if l := len(m); l > 1 { + // More than 1 match so assume name.part###of###.rar style. + // Take the last 2 matches where the first is the volume number. + m = m[l-2 : l] + if strings.Contains(v.file[m[0][1]:m[1][0]], ".") || !strings.Contains(v.file[:m[0][0]], ".") { + // Didn't match above style as volume had '.' between the two numbers or didnt have a '.' + // before the first match. Use the second number as volume number. + m = m[1:] + } + } + // extract and increment volume number + lo, hi := m[0][0], m[0][1] + n, err := strconv.Atoi(v.file[lo:hi]) + if err != nil { + n = 0 + } else { + n++ + } + // volume number must use at least the same number of characters as previous volume + vol := fmt.Sprintf("%0"+fmt.Sprint(hi-lo)+"d", n) + v.file = v.file[:lo] + vol + v.file[hi:] + return + } + // old style volume naming + i := strings.LastIndex(v.file, ".") + // get file extension + b := []byte(v.file[i+1:]) + // start incrementing volume number digits from rightmost + for j := 2; j >= 0; j-- { + if b[j] != '9' { + b[j]++ + break + } + // digit overflow + if j == 0 { + // last character before '.' + b[j] = 'A' + } else { + // set to '0' and loop to next character + b[j] = '0' + } + } + v.file = v.file[:i+1] + string(b) +} + +func (v *volume) next() (*fileBlockHeader, error) { + for { + var atEOF bool + + h, err := v.fileBlockReader.next() + switch err { + case errArchiveContinues: + case io.EOF: + // Read all of volume without finding an end block. The only way + // to tell if the archive continues is to try to open the next volume. + atEOF = true + default: + return h, err + } + + v.f.Close() + v.nextVolName() + v.f, err = os.Open(v.dir + v.file) // Open next volume file + if err != nil { + if atEOF && os.IsNotExist(err) { + // volume not found so assume that the archive has ended + return nil, io.EOF + } + return nil, err + } + v.num++ + v.br.Reset(v.f) + ver, err := findSig(v.br) + if err != nil { + return nil, err + } + if v.version() != ver { + return nil, errVerMismatch + } + v.reset() // reset encryption + } +} + +func (v *volume) Close() error { + // may be nil if os.Open fails in next() + if v.f == nil { + return nil + } + return v.f.Close() +} + +func openVolume(name, password string) (*volume, error) { + var err error + v := new(volume) + v.dir, v.file = filepath.Split(name) + v.f, err = os.Open(name) + if err != nil { + return nil, err + } + v.br = bufio.NewReader(v.f) + v.fileBlockReader, err = newFileBlockReader(v.br, password) + if err != nil { + v.f.Close() + return nil, err + } + return v, nil +} + +func newFileBlockReader(br *bufio.Reader, pass string) (fileBlockReader, error) { + runes := []rune(pass) + if len(runes) > maxPassword { + pass = string(runes[:maxPassword]) + } + ver, err := findSig(br) + if err != nil { + return nil, err + } + switch ver { + case fileFmt15: + return newArchive15(br, pass), nil + case fileFmt50: + return newArchive50(br, pass), nil + } + return nil, errUnknownArc +} diff --git a/vendor/github.com/nwaples/rardecode/archive15.go b/vendor/github.com/nwaples/rardecode/archive15.go new file mode 100644 index 00000000..260176c0 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/archive15.go @@ -0,0 +1,468 @@ +package rardecode + +import ( + "bufio" + "bytes" + "crypto/sha1" + "errors" + "hash" + "hash/crc32" + "io" + "io/ioutil" + "strconv" + "strings" + "time" + "unicode/utf16" +) + +const ( + // block types + blockArc = 0x73 + blockFile = 0x74 + blockService = 0x7a + blockEnd = 0x7b + + // block flags + blockHasData = 0x8000 + + // archive block flags + arcVolume = 0x0001 + arcSolid = 0x0008 + arcNewNaming = 0x0010 + arcEncrypted = 0x0080 + + // file block flags + fileSplitBefore = 0x0001 + fileSplitAfter = 0x0002 + fileEncrypted = 0x0004 + fileSolid = 0x0010 + fileWindowMask = 0x00e0 + fileLargeData = 0x0100 + fileUnicode = 0x0200 + fileSalt = 0x0400 + fileVersion = 0x0800 + fileExtTime = 0x1000 + + // end block flags + endArcNotLast = 0x0001 + + saltSize = 8 // size of salt for calculating AES keys + cacheSize30 = 4 // number of AES keys to cache + hashRounds = 0x40000 +) + +var ( + errMultipleDecoders = errors.New("rardecode: multiple decoders in a single archive not supported") +) + +type blockHeader15 struct { + htype byte // block header type + flags uint16 + data readBuf // header data + dataSize int64 // size of extra block data +} + +// fileHash32 implements fileChecksum for 32-bit hashes +type fileHash32 struct { + hash.Hash32 // hash to write file contents to + sum uint32 // 32bit checksum for file +} + +func (h *fileHash32) valid() bool { + return h.sum == h.Sum32() +} + +// archive15 implements fileBlockReader for RAR 1.5 file format archives +type archive15 struct { + byteReader // reader for current block data + v *bufio.Reader // reader for current archive volume + dec decoder // current decoder + decVer byte // current decoder version + multi bool // archive is multi-volume + old bool // archive uses old naming scheme + solid bool // archive is a solid archive + encrypted bool + pass []uint16 // password in UTF-16 + checksum fileHash32 // file checksum + buf readBuf // temporary buffer + keyCache [cacheSize30]struct { // cache of previously calculated decryption keys + salt []byte + key []byte + iv []byte + } +} + +// Calculates the key and iv for AES decryption given a password and salt. +func calcAes30Params(pass []uint16, salt []byte) (key, iv []byte) { + p := make([]byte, 0, len(pass)*2+len(salt)) + for _, v := range pass { + p = append(p, byte(v), byte(v>>8)) + } + p = append(p, salt...) + + hash := sha1.New() + iv = make([]byte, 16) + s := make([]byte, 0, hash.Size()) + for i := 0; i < hashRounds; i++ { + hash.Write(p) + hash.Write([]byte{byte(i), byte(i >> 8), byte(i >> 16)}) + if i%(hashRounds/16) == 0 { + s = hash.Sum(s[:0]) + iv[i/(hashRounds/16)] = s[4*4+3] + } + } + key = hash.Sum(s[:0]) + key = key[:16] + + for k := key; len(k) >= 4; k = k[4:] { + k[0], k[1], k[2], k[3] = k[3], k[2], k[1], k[0] + } + return key, iv +} + +// parseDosTime converts a 32bit DOS time value to time.Time +func parseDosTime(t uint32) time.Time { + n := int(t) + sec := n & 0x1f << 1 + min := n >> 5 & 0x3f + hr := n >> 11 & 0x1f + day := n >> 16 & 0x1f + mon := time.Month(n >> 21 & 0x0f) + yr := n>>25&0x7f + 1980 + return time.Date(yr, mon, day, hr, min, sec, 0, time.Local) +} + +// decodeName decodes a non-unicode filename from a file header. +func decodeName(buf []byte) string { + i := bytes.IndexByte(buf, 0) + if i < 0 { + return string(buf) // filename is UTF-8 + } + + name := buf[:i] + encName := readBuf(buf[i+1:]) + if len(encName) < 2 { + return "" // invalid encoding + } + highByte := uint16(encName.byte()) << 8 + flags := encName.byte() + flagBits := 8 + var wchars []uint16 // decoded characters are UTF-16 + for len(wchars) < len(name) && len(encName) > 0 { + if flagBits == 0 { + flags = encName.byte() + flagBits = 8 + if len(encName) == 0 { + break + } + } + switch flags >> 6 { + case 0: + wchars = append(wchars, uint16(encName.byte())) + case 1: + wchars = append(wchars, uint16(encName.byte())|highByte) + case 2: + if len(encName) < 2 { + break + } + wchars = append(wchars, encName.uint16()) + case 3: + n := encName.byte() + b := name[len(wchars):] + if l := int(n&0x7f) + 2; l < len(b) { + b = b[:l] + } + if n&0x80 > 0 { + if len(encName) < 1 { + break + } + ec := encName.byte() + for _, c := range b { + wchars = append(wchars, uint16(c+ec)|highByte) + } + } else { + for _, c := range b { + wchars = append(wchars, uint16(c)) + } + } + } + flags <<= 2 + flagBits -= 2 + } + return string(utf16.Decode(wchars)) +} + +// readExtTimes reads and parses the optional extra time field from the file header. +func readExtTimes(f *fileBlockHeader, b *readBuf) { + if len(*b) < 2 { + return // invalid, not enough data + } + flags := b.uint16() + + ts := []*time.Time{&f.ModificationTime, &f.CreationTime, &f.AccessTime} + + for i, t := range ts { + n := flags >> uint((3-i)*4) + if n&0x8 == 0 { + continue + } + if i != 0 { // ModificationTime already read so skip + if len(*b) < 4 { + return // invalid, not enough data + } + *t = parseDosTime(b.uint32()) + } + if n&0x4 > 0 { + *t = t.Add(time.Second) + } + n &= 0x3 + if n == 0 { + continue + } + if len(*b) < int(n) { + return // invalid, not enough data + } + // add extra time data in 100's of nanoseconds + d := time.Duration(0) + for j := 3 - n; j < n; j++ { + d |= time.Duration(b.byte()) << (j * 8) + } + d *= 100 + *t = t.Add(d) + } +} + +func (a *archive15) getKeys(salt []byte) (key, iv []byte) { + // check cache of keys + for _, v := range a.keyCache { + if bytes.Equal(v.salt[:], salt) { + return v.key, v.iv + } + } + key, iv = calcAes30Params(a.pass, salt) + + // save a copy in the cache + copy(a.keyCache[1:], a.keyCache[:]) + a.keyCache[0].salt = append([]byte(nil), salt...) // copy so byte slice can be reused + a.keyCache[0].key = key + a.keyCache[0].iv = iv + + return key, iv +} + +func (a *archive15) parseFileHeader(h *blockHeader15) (*fileBlockHeader, error) { + f := new(fileBlockHeader) + + f.first = h.flags&fileSplitBefore == 0 + f.last = h.flags&fileSplitAfter == 0 + + f.solid = h.flags&fileSolid > 0 + f.IsDir = h.flags&fileWindowMask == fileWindowMask + if !f.IsDir { + f.winSize = uint(h.flags&fileWindowMask)>>5 + 16 + } + + b := h.data + if len(b) < 21 { + return nil, errCorruptFileHeader + } + + f.PackedSize = h.dataSize + f.UnPackedSize = int64(b.uint32()) + f.HostOS = b.byte() + 1 + if f.HostOS > HostOSBeOS { + f.HostOS = HostOSUnknown + } + a.checksum.sum = b.uint32() + + f.ModificationTime = parseDosTime(b.uint32()) + unpackver := b.byte() // decoder version + method := b.byte() - 0x30 // decryption method + namesize := int(b.uint16()) + f.Attributes = int64(b.uint32()) + if h.flags&fileLargeData > 0 { + if len(b) < 8 { + return nil, errCorruptFileHeader + } + _ = b.uint32() // already read large PackedSize in readBlockHeader + f.UnPackedSize |= int64(b.uint32()) << 32 + f.UnKnownSize = f.UnPackedSize == -1 + } else if int32(f.UnPackedSize) == -1 { + f.UnKnownSize = true + f.UnPackedSize = -1 + } + if len(b) < namesize { + return nil, errCorruptFileHeader + } + name := b.bytes(namesize) + if h.flags&fileUnicode == 0 { + f.Name = string(name) + } else { + f.Name = decodeName(name) + } + // Rar 4.x uses '\' as file separator + f.Name = strings.Replace(f.Name, "\\", "/", -1) + + if h.flags&fileVersion > 0 { + // file version is stored as ';n' appended to file name + i := strings.LastIndex(f.Name, ";") + if i > 0 { + j, err := strconv.Atoi(f.Name[i+1:]) + if err == nil && j >= 0 { + f.Version = j + f.Name = f.Name[:i] + } + } + } + + var salt []byte + if h.flags&fileSalt > 0 { + if len(b) < saltSize { + return nil, errCorruptFileHeader + } + salt = b.bytes(saltSize) + } + if h.flags&fileExtTime > 0 { + readExtTimes(f, &b) + } + + if !f.first { + return f, nil + } + // fields only needed for first block in a file + if h.flags&fileEncrypted > 0 && len(salt) == saltSize { + f.key, f.iv = a.getKeys(salt) + } + a.checksum.Reset() + f.cksum = &a.checksum + if method == 0 { + return f, nil + } + if a.dec == nil { + switch unpackver { + case 15, 20, 26: + return nil, errUnsupportedDecoder + case 29: + a.dec = new(decoder29) + default: + return nil, errUnknownDecoder + } + a.decVer = unpackver + } else if a.decVer != unpackver { + return nil, errMultipleDecoders + } + f.decoder = a.dec + return f, nil +} + +// readBlockHeader returns the next block header in the archive. +// It will return io.EOF if there were no bytes read. +func (a *archive15) readBlockHeader() (*blockHeader15, error) { + var err error + b := a.buf[:7] + r := io.Reader(a.v) + if a.encrypted { + salt := a.buf[:saltSize] + _, err = io.ReadFull(r, salt) + if err != nil { + return nil, err + } + key, iv := a.getKeys(salt) + r = newAesDecryptReader(r, key, iv) + err = readFull(r, b) + } else { + _, err = io.ReadFull(r, b) + } + if err != nil { + return nil, err + } + + crc := b.uint16() + hash := crc32.NewIEEE() + hash.Write(b) + h := new(blockHeader15) + h.htype = b.byte() + h.flags = b.uint16() + size := b.uint16() + if size < 7 { + return nil, errCorruptHeader + } + size -= 7 + if int(size) > cap(a.buf) { + a.buf = readBuf(make([]byte, size)) + } + h.data = a.buf[:size] + if err := readFull(r, h.data); err != nil { + return nil, err + } + hash.Write(h.data) + if crc != uint16(hash.Sum32()) { + return nil, errBadHeaderCrc + } + if h.flags&blockHasData > 0 { + if len(h.data) < 4 { + return nil, errCorruptHeader + } + h.dataSize = int64(h.data.uint32()) + } + if (h.htype == blockService || h.htype == blockFile) && h.flags&fileLargeData > 0 { + if len(h.data) < 25 { + return nil, errCorruptHeader + } + b := h.data[21:25] + h.dataSize |= int64(b.uint32()) << 32 + } + return h, nil +} + +// next advances to the next file block in the archive +func (a *archive15) next() (*fileBlockHeader, error) { + for { + // could return an io.EOF here as 1.5 archives may not have an end block. + h, err := a.readBlockHeader() + if err != nil { + return nil, err + } + a.byteReader = limitByteReader(a.v, h.dataSize) // reader for block data + + switch h.htype { + case blockFile: + return a.parseFileHeader(h) + case blockArc: + a.encrypted = h.flags&arcEncrypted > 0 + a.multi = h.flags&arcVolume > 0 + a.old = h.flags&arcNewNaming == 0 + a.solid = h.flags&arcSolid > 0 + case blockEnd: + if h.flags&endArcNotLast == 0 || !a.multi { + return nil, errArchiveEnd + } + return nil, errArchiveContinues + default: + _, err = io.Copy(ioutil.Discard, a.byteReader) + } + if err != nil { + return nil, err + } + } +} + +func (a *archive15) version() int { return fileFmt15 } + +func (a *archive15) reset() { + a.encrypted = false // reset encryption when opening new volume file +} + +func (a *archive15) isSolid() bool { + return a.solid +} + +// newArchive15 creates a new fileBlockReader for a Version 1.5 archive +func newArchive15(r *bufio.Reader, password string) fileBlockReader { + a := new(archive15) + a.v = r + a.pass = utf16.Encode([]rune(password)) // convert to UTF-16 + a.checksum.Hash32 = crc32.NewIEEE() + a.buf = readBuf(make([]byte, 100)) + return a +} diff --git a/vendor/github.com/nwaples/rardecode/archive50.go b/vendor/github.com/nwaples/rardecode/archive50.go new file mode 100644 index 00000000..1d8f850d --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/archive50.go @@ -0,0 +1,475 @@ +package rardecode + +import ( + "bufio" + "bytes" + "crypto/hmac" + "crypto/sha256" + "errors" + "hash" + "hash/crc32" + "io" + "io/ioutil" + "time" +) + +const ( + // block types + block5Arc = 1 + block5File = 2 + block5Service = 3 + block5Encrypt = 4 + block5End = 5 + + // block flags + block5HasExtra = 0x0001 + block5HasData = 0x0002 + block5DataNotFirst = 0x0008 + block5DataNotLast = 0x0010 + + // end block flags + endArc5NotLast = 0x0001 + + // archive encryption block flags + enc5CheckPresent = 0x0001 // password check data is present + + // main archive block flags + arc5MultiVol = 0x0001 + arc5Solid = 0x0004 + + // file block flags + file5IsDir = 0x0001 + file5HasUnixMtime = 0x0002 + file5HasCRC32 = 0x0004 + file5UnpSizeUnknown = 0x0008 + + // file encryption record flags + file5EncCheckPresent = 0x0001 // password check data is present + file5EncUseMac = 0x0002 // use MAC instead of plain checksum + + cacheSize50 = 4 + maxPbkdf2Salt = 64 + pwCheckSize = 8 + maxKdfCount = 24 + + minHeaderSize = 7 +) + +var ( + errBadPassword = errors.New("rardecode: incorrect password") + errCorruptEncrypt = errors.New("rardecode: corrupt encryption data") + errUnknownEncMethod = errors.New("rardecode: unknown encryption method") +) + +type extra struct { + ftype uint64 // field type + data readBuf // field data +} + +type blockHeader50 struct { + htype uint64 // block type + flags uint64 + data readBuf // block header data + extra []extra // extra fields + dataSize int64 // size of block data +} + +// leHash32 wraps a hash.Hash32 to return the result of Sum in little +// endian format. +type leHash32 struct { + hash.Hash32 +} + +func (h leHash32) Sum(b []byte) []byte { + s := h.Sum32() + return append(b, byte(s), byte(s>>8), byte(s>>16), byte(s>>24)) +} + +func newLittleEndianCRC32() hash.Hash32 { + return leHash32{crc32.NewIEEE()} +} + +// hash50 implements fileChecksum for RAR 5 archives +type hash50 struct { + hash.Hash // hash file data is written to + sum []byte // file checksum + key []byte // if present used with hmac in calculating checksum from hash +} + +func (h *hash50) valid() bool { + sum := h.Sum(nil) + if len(h.key) > 0 { + mac := hmac.New(sha256.New, h.key) + mac.Write(sum) + sum = mac.Sum(sum[:0]) + if len(h.sum) == 4 { + // CRC32 + for i, v := range sum[4:] { + sum[i&3] ^= v + } + sum = sum[:4] + } + } + return bytes.Equal(sum, h.sum) +} + +// archive50 implements fileBlockReader for RAR 5 file format archives +type archive50 struct { + byteReader // reader for current block data + v *bufio.Reader // reader for current archive volume + pass []byte + blockKey []byte // key used to encrypt blocks + multi bool // archive is multi-volume + solid bool // is a solid archive + checksum hash50 // file checksum + dec decoder // optional decoder used to unpack file + buf readBuf // temporary buffer + keyCache [cacheSize50]struct { // encryption key cache + kdfCount int + salt []byte + keys [][]byte + } +} + +// calcKeys50 calculates the keys used in RAR 5 archive processing. +// The returned slice of byte slices contains 3 keys. +// Key 0 is used for block or file decryption. +// Key 1 is optionally used for file checksum calculation. +// Key 2 is optionally used for password checking. +func calcKeys50(pass, salt []byte, kdfCount int) [][]byte { + if len(salt) > maxPbkdf2Salt { + salt = salt[:maxPbkdf2Salt] + } + keys := make([][]byte, 3) + if len(keys) == 0 { + return keys + } + + prf := hmac.New(sha256.New, pass) + prf.Write(salt) + prf.Write([]byte{0, 0, 0, 1}) + + t := prf.Sum(nil) + u := append([]byte(nil), t...) + + kdfCount-- + + for i, iter := range []int{kdfCount, 16, 16} { + for iter > 0 { + prf.Reset() + prf.Write(u) + u = prf.Sum(u[:0]) + for j := range u { + t[j] ^= u[j] + } + iter-- + } + keys[i] = append([]byte(nil), t...) + } + + pwcheck := keys[2] + for i, v := range pwcheck[pwCheckSize:] { + pwcheck[i&(pwCheckSize-1)] ^= v + } + keys[2] = pwcheck[:pwCheckSize] + + return keys +} + +// getKeys reads kdfcount and salt from b and returns the corresponding encryption keys. +func (a *archive50) getKeys(b *readBuf) (keys [][]byte, err error) { + if len(*b) < 17 { + return nil, errCorruptEncrypt + } + // read kdf count and salt + kdfCount := int(b.byte()) + if kdfCount > maxKdfCount { + return nil, errCorruptEncrypt + } + kdfCount = 1 << uint(kdfCount) + salt := b.bytes(16) + + // check cache of keys for match + for _, v := range a.keyCache { + if kdfCount == v.kdfCount && bytes.Equal(salt, v.salt) { + return v.keys, nil + } + } + // not found, calculate keys + keys = calcKeys50(a.pass, salt, kdfCount) + + // store in cache + copy(a.keyCache[1:], a.keyCache[:]) + a.keyCache[0].kdfCount = kdfCount + a.keyCache[0].salt = append([]byte(nil), salt...) + a.keyCache[0].keys = keys + + return keys, nil +} + +// checkPassword calculates if a password is correct given password check data and keys. +func checkPassword(b *readBuf, keys [][]byte) error { + if len(*b) < 12 { + return nil // not enough bytes, ignore for the moment + } + pwcheck := b.bytes(8) + sum := b.bytes(4) + csum := sha256.Sum256(pwcheck) + if bytes.Equal(sum, csum[:len(sum)]) && !bytes.Equal(pwcheck, keys[2]) { + return errBadPassword + } + return nil +} + +// parseFileEncryptionRecord processes the optional file encryption record from a file header. +func (a *archive50) parseFileEncryptionRecord(b readBuf, f *fileBlockHeader) error { + if ver := b.uvarint(); ver != 0 { + return errUnknownEncMethod + } + flags := b.uvarint() + + keys, err := a.getKeys(&b) + if err != nil { + return err + } + + f.key = keys[0] + if len(b) < 16 { + return errCorruptEncrypt + } + f.iv = b.bytes(16) + + if flags&file5EncCheckPresent > 0 { + if err := checkPassword(&b, keys); err != nil { + return err + } + } + if flags&file5EncUseMac > 0 { + a.checksum.key = keys[1] + } + return nil +} + +func (a *archive50) parseFileHeader(h *blockHeader50) (*fileBlockHeader, error) { + a.checksum.sum = nil + a.checksum.key = nil + + f := new(fileBlockHeader) + + f.first = h.flags&block5DataNotFirst == 0 + f.last = h.flags&block5DataNotLast == 0 + + flags := h.data.uvarint() // file flags + f.IsDir = flags&file5IsDir > 0 + f.UnKnownSize = flags&file5UnpSizeUnknown > 0 + f.UnPackedSize = int64(h.data.uvarint()) + f.PackedSize = h.dataSize + f.Attributes = int64(h.data.uvarint()) + if flags&file5HasUnixMtime > 0 { + if len(h.data) < 4 { + return nil, errCorruptFileHeader + } + f.ModificationTime = time.Unix(int64(h.data.uint32()), 0) + } + if flags&file5HasCRC32 > 0 { + if len(h.data) < 4 { + return nil, errCorruptFileHeader + } + a.checksum.sum = append([]byte(nil), h.data.bytes(4)...) + if f.first { + a.checksum.Hash = newLittleEndianCRC32() + f.cksum = &a.checksum + } + } + + flags = h.data.uvarint() // compression flags + f.solid = flags&0x0040 > 0 + f.winSize = uint(flags&0x3C00)>>10 + 17 + method := (flags >> 7) & 7 // compression method (0 == none) + if f.first && method != 0 { + unpackver := flags & 0x003f + if unpackver != 0 { + return nil, errUnknownDecoder + } + if a.dec == nil { + a.dec = new(decoder50) + } + f.decoder = a.dec + } + switch h.data.uvarint() { + case 0: + f.HostOS = HostOSWindows + case 1: + f.HostOS = HostOSUnix + default: + f.HostOS = HostOSUnknown + } + nlen := int(h.data.uvarint()) + if len(h.data) < nlen { + return nil, errCorruptFileHeader + } + f.Name = string(h.data.bytes(nlen)) + + // parse optional extra records + for _, e := range h.extra { + var err error + switch e.ftype { + case 1: // encryption + err = a.parseFileEncryptionRecord(e.data, f) + case 2: + // TODO: hash + case 3: + // TODO: time + case 4: // version + _ = e.data.uvarint() // ignore flags field + f.Version = int(e.data.uvarint()) + case 5: + // TODO: redirection + case 6: + // TODO: owner + } + if err != nil { + return nil, err + } + } + return f, nil +} + +// parseEncryptionBlock calculates the key for block encryption. +func (a *archive50) parseEncryptionBlock(b readBuf) error { + if ver := b.uvarint(); ver != 0 { + return errUnknownEncMethod + } + flags := b.uvarint() + keys, err := a.getKeys(&b) + if err != nil { + return err + } + if flags&enc5CheckPresent > 0 { + if err := checkPassword(&b, keys); err != nil { + return err + } + } + a.blockKey = keys[0] + return nil +} + +func (a *archive50) readBlockHeader() (*blockHeader50, error) { + r := io.Reader(a.v) + if a.blockKey != nil { + // block is encrypted + iv := a.buf[:16] + if err := readFull(r, iv); err != nil { + return nil, err + } + r = newAesDecryptReader(r, a.blockKey, iv) + } + + b := a.buf[:minHeaderSize] + if err := readFull(r, b); err != nil { + return nil, err + } + crc := b.uint32() + + hash := crc32.NewIEEE() + hash.Write(b) + + size := int(b.uvarint()) // header size + if size > cap(a.buf) { + a.buf = readBuf(make([]byte, size)) + } else { + a.buf = a.buf[:size] + } + n := copy(a.buf, b) // copy left over bytes + if err := readFull(r, a.buf[n:]); err != nil { // read rest of header + return nil, err + } + + // check header crc + hash.Write(a.buf[n:]) + if crc != hash.Sum32() { + return nil, errBadHeaderCrc + } + + b = a.buf + h := new(blockHeader50) + h.htype = b.uvarint() + h.flags = b.uvarint() + + var extraSize int + if h.flags&block5HasExtra > 0 { + extraSize = int(b.uvarint()) + } + if h.flags&block5HasData > 0 { + h.dataSize = int64(b.uvarint()) + } + if len(b) < extraSize { + return nil, errCorruptHeader + } + h.data = b.bytes(len(b) - extraSize) + + // read header extra records + for len(b) > 0 { + size = int(b.uvarint()) + if len(b) < size { + return nil, errCorruptHeader + } + data := readBuf(b.bytes(size)) + ftype := data.uvarint() + h.extra = append(h.extra, extra{ftype, data}) + } + + return h, nil +} + +// next advances to the next file block in the archive +func (a *archive50) next() (*fileBlockHeader, error) { + for { + h, err := a.readBlockHeader() + if err != nil { + return nil, err + } + a.byteReader = limitByteReader(a.v, h.dataSize) + switch h.htype { + case block5File: + return a.parseFileHeader(h) + case block5Arc: + flags := h.data.uvarint() + a.multi = flags&arc5MultiVol > 0 + a.solid = flags&arc5Solid > 0 + case block5Encrypt: + err = a.parseEncryptionBlock(h.data) + case block5End: + flags := h.data.uvarint() + if flags&endArc5NotLast == 0 || !a.multi { + return nil, errArchiveEnd + } + return nil, errArchiveContinues + default: + // discard block data + _, err = io.Copy(ioutil.Discard, a.byteReader) + } + if err != nil { + return nil, err + } + } +} + +func (a *archive50) version() int { return fileFmt50 } + +func (a *archive50) reset() { + a.blockKey = nil // reset encryption when opening new volume file +} + +func (a *archive50) isSolid() bool { + return a.solid +} + +// newArchive50 creates a new fileBlockReader for a Version 5 archive. +func newArchive50(r *bufio.Reader, password string) fileBlockReader { + a := new(archive50) + a.v = r + a.pass = []byte(password) + a.buf = make([]byte, 100) + return a +} diff --git a/vendor/github.com/nwaples/rardecode/bit_reader.go b/vendor/github.com/nwaples/rardecode/bit_reader.go new file mode 100644 index 00000000..9b284efa --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/bit_reader.go @@ -0,0 +1,119 @@ +package rardecode + +import "io" + +type bitReader interface { + readBits(n uint) (int, error) // read n bits of data + unreadBits(n uint) // revert the reading of the last n bits read +} + +type limitedBitReader struct { + br bitReader + n int + err error // error to return if br returns EOF before all n bits have been read +} + +// limitBitReader returns a bitReader that reads from br and stops with io.EOF after n bits. +// If br returns an io.EOF before reading n bits, err is returned. +func limitBitReader(br bitReader, n int, err error) bitReader { + return &limitedBitReader{br, n, err} +} + +func (l *limitedBitReader) readBits(n uint) (int, error) { + if int(n) > l.n { + return 0, io.EOF + } + v, err := l.br.readBits(n) + if err == nil { + l.n -= int(n) + } else if err == io.EOF { + err = l.err + } + return v, err +} + +func (l *limitedBitReader) unreadBits(n uint) { + l.n += int(n) + l.br.unreadBits(n) +} + +// rarBitReader wraps an io.ByteReader to perform various bit and byte +// reading utility functions used in RAR file processing. +type rarBitReader struct { + r io.ByteReader + v int + n uint +} + +func (r *rarBitReader) reset(br io.ByteReader) { + r.r = br + r.n = 0 + r.v = 0 +} + +func (r *rarBitReader) readBits(n uint) (int, error) { + for n > r.n { + c, err := r.r.ReadByte() + if err != nil { + return 0, err + } + r.v = r.v<<8 | int(c) + r.n += 8 + } + r.n -= n + return (r.v >> r.n) & ((1 << n) - 1), nil +} + +func (r *rarBitReader) unreadBits(n uint) { + r.n += n +} + +// alignByte aligns the current bit reading input to the next byte boundary. +func (r *rarBitReader) alignByte() { + r.n -= r.n % 8 +} + +// readUint32 reads a RAR V3 encoded uint32 +func (r *rarBitReader) readUint32() (uint32, error) { + n, err := r.readBits(2) + if err != nil { + return 0, err + } + if n != 1 { + n, err = r.readBits(4 << uint(n)) + return uint32(n), err + } + n, err = r.readBits(4) + if err != nil { + return 0, err + } + if n == 0 { + n, err = r.readBits(8) + n |= -1 << 8 + return uint32(n), err + } + nlow, err := r.readBits(4) + n = n<<4 | nlow + return uint32(n), err +} + +func (r *rarBitReader) ReadByte() (byte, error) { + n, err := r.readBits(8) + return byte(n), err +} + +// readFull reads len(p) bytes into p. If fewer bytes are read an error is returned. +func (r *rarBitReader) readFull(p []byte) error { + for i := range p { + c, err := r.ReadByte() + if err != nil { + return err + } + p[i] = c + } + return nil +} + +func newRarBitReader(r io.ByteReader) *rarBitReader { + return &rarBitReader{r: r} +} diff --git a/vendor/github.com/nwaples/rardecode/decode29.go b/vendor/github.com/nwaples/rardecode/decode29.go new file mode 100644 index 00000000..638645e7 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/decode29.go @@ -0,0 +1,264 @@ +package rardecode + +import ( + "bytes" + "errors" + "io" +) + +const ( + maxCodeSize = 0x10000 + maxUniqueFilters = 1024 +) + +var ( + // Errors marking the end of the decoding block and/or file + endOfFile = errors.New("rardecode: end of file") + endOfBlock = errors.New("rardecode: end of block") + endOfBlockAndFile = errors.New("rardecode: end of block and file") +) + +// decoder29 implements the decoder interface for RAR 3.0 compression (unpack version 29) +// Decode input is broken up into 1 or more blocks. The start of each block specifies +// the decoding algorithm (ppm or lz) and optional data to initialize with. +// Block length is not stored, it is determined only after decoding an end of file and/or +// block marker in the data. +type decoder29 struct { + br *rarBitReader + eof bool // at file eof + fnum int // current filter number (index into filters) + flen []int // filter block length history + filters []v3Filter // list of current filters used by archive encoding + + // current decode function (lz or ppm). + // When called it should perform a single decode operation, and either apply the + // data to the window or return they raw bytes for a filter. + decode func(w *window) ([]byte, error) + + lz lz29Decoder // lz decoder + ppm ppm29Decoder // ppm decoder +} + +// init intializes the decoder for decoding a new file. +func (d *decoder29) init(r io.ByteReader, reset bool) error { + if d.br == nil { + d.br = newRarBitReader(r) + } else { + d.br.reset(r) + } + d.eof = false + if reset { + d.initFilters() + d.lz.reset() + d.ppm.reset() + d.decode = nil + } + if d.decode == nil { + return d.readBlockHeader() + } + return nil +} + +func (d *decoder29) initFilters() { + d.fnum = 0 + d.flen = nil + d.filters = nil +} + +// readVMCode reads the raw bytes for the code/commands used in a vm filter +func readVMCode(br *rarBitReader) ([]byte, error) { + n, err := br.readUint32() + if err != nil { + return nil, err + } + if n > maxCodeSize || n == 0 { + return nil, errInvalidFilter + } + buf := make([]byte, n) + err = br.readFull(buf) + if err != nil { + return nil, err + } + var x byte + for _, c := range buf[1:] { + x ^= c + } + // simple xor checksum on data + if x != buf[0] { + return nil, errInvalidFilter + } + return buf, nil +} + +func (d *decoder29) parseVMFilter(buf []byte) (*filterBlock, error) { + flags := buf[0] + br := newRarBitReader(bytes.NewReader(buf[1:])) + fb := new(filterBlock) + + // Find the filter number which is an index into d.filters. + // If filter number == len(d.filters) it is a new filter to be added. + if flags&0x80 > 0 { + n, err := br.readUint32() + if err != nil { + return nil, err + } + if n == 0 { + d.initFilters() + fb.reset = true + } else { + n-- + if n > maxUniqueFilters { + return nil, errInvalidFilter + } + if int(n) > len(d.filters) { + return nil, errInvalidFilter + } + } + d.fnum = int(n) + } + + // filter offset + n, err := br.readUint32() + if err != nil { + return nil, err + } + if flags&0x40 > 0 { + n += 258 + } + fb.offset = int(n) + + // filter length + if d.fnum == len(d.flen) { + d.flen = append(d.flen, 0) + } + if flags&0x20 > 0 { + n, err = br.readUint32() + if err != nil { + return nil, err + } + //fb.length = int(n) + d.flen[d.fnum] = int(n) + } + fb.length = d.flen[d.fnum] + + // initial register values + r := make(map[int]uint32) + if flags&0x10 > 0 { + bits, err := br.readBits(vmRegs - 1) + if err != nil { + return nil, err + } + for i := 0; i < vmRegs-1; i++ { + if bits&1 > 0 { + r[i], err = br.readUint32() + if err != nil { + return nil, err + } + } + bits >>= 1 + } + } + + // filter is new so read the code for it + if d.fnum == len(d.filters) { + code, err := readVMCode(br) + if err != nil { + return nil, err + } + f, err := getV3Filter(code) + if err != nil { + return nil, err + } + d.filters = append(d.filters, f) + d.flen = append(d.flen, fb.length) + } + + // read global data + var g []byte + if flags&0x08 > 0 { + n, err := br.readUint32() + if err != nil { + return nil, err + } + if n > vmGlobalSize-vmFixedGlobalSize { + return nil, errInvalidFilter + } + g = make([]byte, n) + err = br.readFull(g) + if err != nil { + return nil, err + } + } + + // create filter function + f := d.filters[d.fnum] + fb.filter = func(buf []byte, offset int64) ([]byte, error) { + return f(r, g, buf, offset) + } + + return fb, nil +} + +// readBlockHeader determines and initializes the current decoder for a new decode block. +func (d *decoder29) readBlockHeader() error { + d.br.alignByte() + n, err := d.br.readBits(1) + if err == nil { + if n > 0 { + d.decode = d.ppm.decode + err = d.ppm.init(d.br) + } else { + d.decode = d.lz.decode + err = d.lz.init(d.br) + } + } + if err == io.EOF { + err = errDecoderOutOfData + } + return err + +} + +func (d *decoder29) fill(w *window) ([]*filterBlock, error) { + if d.eof { + return nil, io.EOF + } + + var fl []*filterBlock + + for w.available() > 0 { + b, err := d.decode(w) // perform a single decode operation + if len(b) > 0 && err == nil { + // parse raw data for filter and add to list of filters + var f *filterBlock + f, err = d.parseVMFilter(b) + if f != nil { + // make offset relative to read index (from write index) + f.offset += w.buffered() + fl = append(fl, f) + } + } + + switch err { + case nil: + continue + case endOfBlock: + err = d.readBlockHeader() + if err == nil { + continue + } + case endOfFile: + d.eof = true + err = io.EOF + case endOfBlockAndFile: + d.eof = true + d.decode = nil // clear decoder, it will be setup by next init() + err = io.EOF + case io.EOF: + err = errDecoderOutOfData + } + return fl, err + } + // return filters + return fl, nil +} diff --git a/vendor/github.com/nwaples/rardecode/decode29_lz.go b/vendor/github.com/nwaples/rardecode/decode29_lz.go new file mode 100644 index 00000000..94470853 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/decode29_lz.go @@ -0,0 +1,247 @@ +package rardecode + +const ( + mainSize = 299 + offsetSize = 60 + lowOffsetSize = 17 + lengthSize = 28 + tableSize = mainSize + offsetSize + lowOffsetSize + lengthSize +) + +var ( + lengthBase = [28]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, + 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224} + lengthExtraBits = [28]uint{0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, + 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5} + + offsetBase = [60]int{0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, + 128, 192, 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, + 6144, 8192, 12288, 16384, 24576, 32768, 49152, 65536, 98304, + 131072, 196608, 262144, 327680, 393216, 458752, 524288, + 589824, 655360, 720896, 786432, 851968, 917504, 983040, + 1048576, 1310720, 1572864, 1835008, 2097152, 2359296, 2621440, + 2883584, 3145728, 3407872, 3670016, 3932160} + offsetExtraBits = [60]uint{0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, + 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, + 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18} + + shortOffsetBase = [8]int{0, 4, 8, 16, 32, 64, 128, 192} + shortOffsetExtraBits = [8]uint{2, 2, 3, 4, 5, 6, 6, 6} +) + +type lz29Decoder struct { + codeLength [tableSize]byte + + mainDecoder huffmanDecoder + offsetDecoder huffmanDecoder + lowOffsetDecoder huffmanDecoder + lengthDecoder huffmanDecoder + + offset [4]int // history of previous offsets + length int // previous length + lowOffset int + lowOffsetRepeats int + + br *rarBitReader +} + +func (d *lz29Decoder) reset() { + for i := range d.offset { + d.offset[i] = 0 + } + d.length = 0 + for i := range d.codeLength { + d.codeLength[i] = 0 + } +} + +func (d *lz29Decoder) init(br *rarBitReader) error { + d.br = br + d.lowOffset = 0 + d.lowOffsetRepeats = 0 + + n, err := d.br.readBits(1) + if err != nil { + return err + } + addOld := n > 0 + + cl := d.codeLength[:] + if err = readCodeLengthTable(d.br, cl, addOld); err != nil { + return err + } + + d.mainDecoder.init(cl[:mainSize]) + cl = cl[mainSize:] + d.offsetDecoder.init(cl[:offsetSize]) + cl = cl[offsetSize:] + d.lowOffsetDecoder.init(cl[:lowOffsetSize]) + cl = cl[lowOffsetSize:] + d.lengthDecoder.init(cl) + + return nil +} + +func (d *lz29Decoder) readFilterData() (b []byte, err error) { + flags, err := d.br.ReadByte() + if err != nil { + return nil, err + } + + n := (int(flags) & 7) + 1 + switch n { + case 7: + n, err = d.br.readBits(8) + n += 7 + if err != nil { + return nil, err + } + case 8: + n, err = d.br.readBits(16) + if err != nil { + return nil, err + } + } + + buf := make([]byte, n+1) + buf[0] = flags + err = d.br.readFull(buf[1:]) + + return buf, err +} + +func (d *lz29Decoder) readEndOfBlock() error { + n, err := d.br.readBits(1) + if err != nil { + return err + } + if n > 0 { + return endOfBlock + } + n, err = d.br.readBits(1) + if err != nil { + return err + } + if n > 0 { + return endOfBlockAndFile + } + return endOfFile +} + +func (d *lz29Decoder) decode(win *window) ([]byte, error) { + sym, err := d.mainDecoder.readSym(d.br) + if err != nil { + return nil, err + } + + switch { + case sym < 256: + // literal + win.writeByte(byte(sym)) + return nil, nil + case sym == 256: + return nil, d.readEndOfBlock() + case sym == 257: + return d.readFilterData() + case sym == 258: + // use previous offset and length + case sym < 263: + i := sym - 259 + offset := d.offset[i] + copy(d.offset[1:i+1], d.offset[:i]) + d.offset[0] = offset + + i, err := d.lengthDecoder.readSym(d.br) + if err != nil { + return nil, err + } + d.length = lengthBase[i] + 2 + bits := lengthExtraBits[i] + if bits > 0 { + n, err := d.br.readBits(bits) + if err != nil { + return nil, err + } + d.length += n + } + case sym < 271: + i := sym - 263 + copy(d.offset[1:], d.offset[:]) + offset := shortOffsetBase[i] + 1 + bits := shortOffsetExtraBits[i] + if bits > 0 { + n, err := d.br.readBits(bits) + if err != nil { + return nil, err + } + offset += n + } + d.offset[0] = offset + + d.length = 2 + default: + i := sym - 271 + d.length = lengthBase[i] + 3 + bits := lengthExtraBits[i] + if bits > 0 { + n, err := d.br.readBits(bits) + if err != nil { + return nil, err + } + d.length += n + } + + i, err = d.offsetDecoder.readSym(d.br) + if err != nil { + return nil, err + } + offset := offsetBase[i] + 1 + bits = offsetExtraBits[i] + + switch { + case bits >= 4: + if bits > 4 { + n, err := d.br.readBits(bits - 4) + if err != nil { + return nil, err + } + offset += n << 4 + } + + if d.lowOffsetRepeats > 0 { + d.lowOffsetRepeats-- + offset += d.lowOffset + } else { + n, err := d.lowOffsetDecoder.readSym(d.br) + if err != nil { + return nil, err + } + if n == 16 { + d.lowOffsetRepeats = 15 + offset += d.lowOffset + } else { + offset += n + d.lowOffset = n + } + } + case bits > 0: + n, err := d.br.readBits(bits) + if err != nil { + return nil, err + } + offset += n + } + + if offset >= 0x2000 { + d.length++ + if offset >= 0x40000 { + d.length++ + } + } + copy(d.offset[1:], d.offset[:]) + d.offset[0] = offset + } + win.copyBytes(d.length, d.offset[0]) + return nil, nil +} diff --git a/vendor/github.com/nwaples/rardecode/decode29_ppm.go b/vendor/github.com/nwaples/rardecode/decode29_ppm.go new file mode 100644 index 00000000..39c31995 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/decode29_ppm.go @@ -0,0 +1,132 @@ +package rardecode + +import "io" + +type ppm29Decoder struct { + m model // ppm model + esc byte // escape character + br io.ByteReader +} + +func (d *ppm29Decoder) init(br *rarBitReader) error { + maxOrder, err := br.readBits(7) + if err != nil { + return err + } + reset := maxOrder&0x20 > 0 + + // Should have flushed all unread bits from bitReader by now, + // use underlying ByteReader + d.br = br.r + + var maxMB int + if reset { + c, err := d.br.ReadByte() + if err != nil { + return err + } + maxMB = int(c) + 1 + } + + if maxOrder&0x40 > 0 { + d.esc, err = d.br.ReadByte() + if err != nil { + return err + } + } + + maxOrder = (maxOrder & 0x1f) + 1 + if maxOrder > 16 { + maxOrder = 16 + (maxOrder-16)*3 + } + + return d.m.init(d.br, reset, maxOrder, maxMB) +} + +func (d *ppm29Decoder) reset() { + d.esc = 2 +} + +func (d *ppm29Decoder) readFilterData() ([]byte, error) { + c, err := d.m.ReadByte() + if err != nil { + return nil, err + } + n := int(c&7) + 1 + if n == 7 { + b, err := d.m.ReadByte() + if err != nil { + return nil, err + } + n += int(b) + } else if n == 8 { + b, err := d.m.ReadByte() + if err != nil { + return nil, err + } + n = int(b) << 8 + b, err = d.m.ReadByte() + if err != nil { + return nil, err + } + n |= int(b) + } + + n++ + buf := make([]byte, n) + buf[0] = byte(c) + for i := 1; i < n; i++ { + buf[i], err = d.m.ReadByte() + if err != nil { + return nil, err + } + } + return buf, nil +} + +func (d *ppm29Decoder) decode(w *window) ([]byte, error) { + c, err := d.m.ReadByte() + if err != nil { + return nil, err + } + if c != d.esc { + w.writeByte(c) + return nil, nil + } + c, err = d.m.ReadByte() + if err != nil { + return nil, err + } + + switch c { + case 0: + return nil, endOfBlock + case 2: + return nil, endOfBlockAndFile + case 3: + return d.readFilterData() + case 4: + offset := 0 + for i := 0; i < 3; i++ { + c, err = d.m.ReadByte() + if err != nil { + return nil, err + } + offset = offset<<8 | int(c) + } + len, err := d.m.ReadByte() + if err != nil { + return nil, err + } + w.copyBytes(int(len)+32, offset+2) + case 5: + len, err := d.m.ReadByte() + if err != nil { + return nil, err + } + w.copyBytes(int(len)+4, 1) + default: + w.writeByte(d.esc) + } + return nil, nil +} diff --git a/vendor/github.com/nwaples/rardecode/decode50.go b/vendor/github.com/nwaples/rardecode/decode50.go new file mode 100644 index 00000000..1939a444 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/decode50.go @@ -0,0 +1,294 @@ +package rardecode + +import ( + "errors" + "io" +) + +const ( + mainSize5 = 306 + offsetSize5 = 64 + lowoffsetSize5 = 16 + lengthSize5 = 44 + tableSize5 = mainSize5 + offsetSize5 + lowoffsetSize5 + lengthSize5 +) + +var ( + errUnknownFilter = errors.New("rardecode: unknown V5 filter") + errCorruptDecodeHeader = errors.New("rardecode: corrupt decode header") +) + +// decoder50 implements the decoder interface for RAR 5 compression. +// Decode input it broken up into 1 or more blocks. Each block starts with +// a header containing block length and optional code length tables to initialize +// the huffman decoders with. +type decoder50 struct { + r io.ByteReader + br bitReader // bit reader for current data block + codeLength [tableSize5]byte + + lastBlock bool // current block is last block in compressed file + + mainDecoder huffmanDecoder + offsetDecoder huffmanDecoder + lowoffsetDecoder huffmanDecoder + lengthDecoder huffmanDecoder + + offset [4]int + length int +} + +func (d *decoder50) init(r io.ByteReader, reset bool) error { + d.r = r + d.lastBlock = false + + if reset { + for i := range d.offset { + d.offset[i] = 0 + } + d.length = 0 + for i := range d.codeLength { + d.codeLength[i] = 0 + } + } + err := d.readBlockHeader() + if err == io.EOF { + return errDecoderOutOfData + } + return err +} + +func (d *decoder50) readBlockHeader() error { + flags, err := d.r.ReadByte() + if err != nil { + return err + } + + bytecount := (flags>>3)&3 + 1 + if bytecount == 4 { + return errCorruptDecodeHeader + } + + hsum, err := d.r.ReadByte() + if err != nil { + return err + } + + blockBits := int(flags)&0x07 + 1 + blockBytes := 0 + sum := 0x5a ^ flags + for i := byte(0); i < bytecount; i++ { + n, err := d.r.ReadByte() + if err != nil { + return err + } + sum ^= n + blockBytes |= int(n) << (i * 8) + } + if sum != hsum { // bad header checksum + return errCorruptDecodeHeader + } + blockBits += (blockBytes - 1) * 8 + + // create bit reader for block + d.br = limitBitReader(newRarBitReader(d.r), blockBits, errDecoderOutOfData) + d.lastBlock = flags&0x40 > 0 + + if flags&0x80 > 0 { + // read new code length tables and reinitialize huffman decoders + cl := d.codeLength[:] + err = readCodeLengthTable(d.br, cl, false) + if err != nil { + return err + } + d.mainDecoder.init(cl[:mainSize5]) + cl = cl[mainSize5:] + d.offsetDecoder.init(cl[:offsetSize5]) + cl = cl[offsetSize5:] + d.lowoffsetDecoder.init(cl[:lowoffsetSize5]) + cl = cl[lowoffsetSize5:] + d.lengthDecoder.init(cl) + } + return nil +} + +func slotToLength(br bitReader, n int) (int, error) { + if n >= 8 { + bits := uint(n/4 - 1) + n = (4 | (n & 3)) << bits + if bits > 0 { + b, err := br.readBits(bits) + if err != nil { + return 0, err + } + n |= b + } + } + n += 2 + return n, nil +} + +// readFilter5Data reads an encoded integer used in V5 filters. +func readFilter5Data(br bitReader) (int, error) { + // TODO: should data really be uint? (for 32bit ints). + // It will be masked later anyway by decode window mask. + bytes, err := br.readBits(2) + if err != nil { + return 0, err + } + bytes++ + + var data int + for i := 0; i < bytes; i++ { + n, err := br.readBits(8) + if err != nil { + return 0, err + } + data |= n << (uint(i) * 8) + } + return data, nil +} + +func readFilter(br bitReader) (*filterBlock, error) { + fb := new(filterBlock) + var err error + + fb.offset, err = readFilter5Data(br) + if err != nil { + return nil, err + } + fb.length, err = readFilter5Data(br) + if err != nil { + return nil, err + } + ftype, err := br.readBits(3) + if err != nil { + return nil, err + } + switch ftype { + case 0: + n, err := br.readBits(5) + if err != nil { + return nil, err + } + fb.filter = func(buf []byte, offset int64) ([]byte, error) { return filterDelta(n+1, buf) } + case 1: + fb.filter = func(buf []byte, offset int64) ([]byte, error) { return filterE8(0xe8, true, buf, offset) } + case 2: + fb.filter = func(buf []byte, offset int64) ([]byte, error) { return filterE8(0xe9, true, buf, offset) } + case 3: + fb.filter = filterArm + default: + return nil, errUnknownFilter + } + return fb, nil +} + +func (d *decoder50) decodeSym(win *window, sym int) (*filterBlock, error) { + switch { + case sym < 256: + // literal + win.writeByte(byte(sym)) + return nil, nil + case sym == 256: + f, err := readFilter(d.br) + f.offset += win.buffered() + return f, err + case sym == 257: + // use previous offset and length + case sym < 262: + i := sym - 258 + offset := d.offset[i] + copy(d.offset[1:i+1], d.offset[:i]) + d.offset[0] = offset + + sl, err := d.lengthDecoder.readSym(d.br) + if err != nil { + return nil, err + } + d.length, err = slotToLength(d.br, sl) + if err != nil { + return nil, err + } + default: + length, err := slotToLength(d.br, sym-262) + if err != nil { + return nil, err + } + + offset := 1 + slot, err := d.offsetDecoder.readSym(d.br) + if err != nil { + return nil, err + } + if slot < 4 { + offset += slot + } else { + bits := uint(slot/2 - 1) + offset += (2 | (slot & 1)) << bits + + if bits >= 4 { + if bits > 4 { + n, err := d.br.readBits(bits - 4) + if err != nil { + return nil, err + } + offset += n << 4 + } + n, err := d.lowoffsetDecoder.readSym(d.br) + if err != nil { + return nil, err + } + offset += n + } else { + n, err := d.br.readBits(bits) + if err != nil { + return nil, err + } + offset += n + } + } + if offset > 0x100 { + length++ + if offset > 0x2000 { + length++ + if offset > 0x40000 { + length++ + } + } + } + copy(d.offset[1:], d.offset[:]) + d.offset[0] = offset + d.length = length + } + win.copyBytes(d.length, d.offset[0]) + return nil, nil +} + +func (d *decoder50) fill(w *window) ([]*filterBlock, error) { + var fl []*filterBlock + + for w.available() > 0 { + sym, err := d.mainDecoder.readSym(d.br) + if err == nil { + var f *filterBlock + f, err = d.decodeSym(w, sym) + if f != nil { + fl = append(fl, f) + } + } else if err == io.EOF { + // reached end of the block + if d.lastBlock { + return fl, io.EOF + } + err = d.readBlockHeader() + } + if err != nil { + if err == io.EOF { + return fl, errDecoderOutOfData + } + return fl, err + } + } + return fl, nil +} diff --git a/vendor/github.com/nwaples/rardecode/decode_reader.go b/vendor/github.com/nwaples/rardecode/decode_reader.go new file mode 100644 index 00000000..b346936c --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/decode_reader.go @@ -0,0 +1,290 @@ +package rardecode + +import ( + "errors" + "io" +) + +const ( + minWindowSize = 0x40000 + maxQueuedFilters = 8192 +) + +var ( + errTooManyFilters = errors.New("rardecode: too many filters") + errInvalidFilter = errors.New("rardecode: invalid filter") +) + +// filter functions take a byte slice, the current output offset and +// returns transformed data. +type filter func(b []byte, offset int64) ([]byte, error) + +// filterBlock is a block of data to be processed by a filter. +type filterBlock struct { + length int // length of block + offset int // bytes to be read before start of block + reset bool // drop all existing queued filters + filter filter // filter function +} + +// decoder is the interface for decoding compressed data +type decoder interface { + init(r io.ByteReader, reset bool) error // initialize decoder for current file + fill(w *window) ([]*filterBlock, error) // fill window with decoded data, returning any filters +} + +// window is a sliding window buffer. +type window struct { + buf []byte + mask int // buf length mask + r int // index in buf for reads (beginning) + w int // index in buf for writes (end) + l int // length of bytes to be processed by copyBytes + o int // offset of bytes to be processed by copyBytes +} + +// buffered returns the number of bytes yet to be read from window +func (w *window) buffered() int { return (w.w - w.r) & w.mask } + +// available returns the number of bytes that can be written before the window is full +func (w *window) available() int { return (w.r - w.w - 1) & w.mask } + +func (w *window) reset(log2size uint, clear bool) { + size := 1 << log2size + if size < minWindowSize { + size = minWindowSize + } + if size > len(w.buf) { + b := make([]byte, size) + if clear { + w.w = 0 + } else if len(w.buf) > 0 { + n := copy(b, w.buf[w.w:]) + n += copy(b[n:], w.buf[:w.w]) + w.w = n + } + w.buf = b + w.mask = size - 1 + } else if clear { + for i := range w.buf { + w.buf[i] = 0 + } + w.w = 0 + } + w.r = w.w +} + +// writeByte writes c to the end of the window +func (w *window) writeByte(c byte) { + w.buf[w.w] = c + w.w = (w.w + 1) & w.mask +} + +// copyBytes copies len bytes at off distance from the end +// to the end of the window. +func (w *window) copyBytes(len, off int) { + len &= w.mask + + n := w.available() + if len > n { + // if there is not enough space availaible we copy + // as much as we can and save the offset and length + // of the remaining data to be copied later. + w.l = len - n + w.o = off + len = n + } + + i := (w.w - off) & w.mask + for ; len > 0; len-- { + w.buf[w.w] = w.buf[i] + w.w = (w.w + 1) & w.mask + i = (i + 1) & w.mask + } +} + +// read reads bytes from the beginning of the window into p +func (w *window) read(p []byte) (n int) { + if w.r > w.w { + n = copy(p, w.buf[w.r:]) + w.r = (w.r + n) & w.mask + p = p[n:] + } + if w.r < w.w { + l := copy(p, w.buf[w.r:w.w]) + w.r += l + n += l + } + if w.l > 0 && n > 0 { + // if we have successfully read data, copy any + // leftover data from a previous copyBytes. + l := w.l + w.l = 0 + w.copyBytes(l, w.o) + } + return n +} + +// decodeReader implements io.Reader for decoding compressed data in RAR archives. +type decodeReader struct { + win window // sliding window buffer used as decode dictionary + dec decoder // decoder being used to unpack file + tot int64 // total bytes read + buf []byte // filter input/output buffer + outbuf []byte // filter output not yet read + err error + filters []*filterBlock // list of filterBlock's, each with offset relative to previous in list +} + +func (d *decodeReader) init(r io.ByteReader, dec decoder, winsize uint, reset bool) error { + if reset { + d.filters = nil + } + d.err = nil + d.outbuf = nil + d.tot = 0 + d.win.reset(winsize, reset) + d.dec = dec + return d.dec.init(r, reset) +} + +func (d *decodeReader) readErr() error { + err := d.err + d.err = nil + return err +} + +// queueFilter adds a filterBlock to the end decodeReader's filters. +func (d *decodeReader) queueFilter(f *filterBlock) error { + if f.reset { + d.filters = nil + } + if len(d.filters) >= maxQueuedFilters { + return errTooManyFilters + } + // offset & length must be < window size + f.offset &= d.win.mask + f.length &= d.win.mask + // make offset relative to previous filter in list + for _, fb := range d.filters { + if f.offset < fb.offset { + // filter block must not start before previous filter + return errInvalidFilter + } + f.offset -= fb.offset + } + d.filters = append(d.filters, f) + return nil +} + +// processFilters processes any filters valid at the current read index +// and stores the output in outbuf. +func (d *decodeReader) processFilters() (err error) { + f := d.filters[0] + if f.offset > 0 { + return nil + } + d.filters = d.filters[1:] + if d.win.buffered() < f.length { + // fill() didn't return enough bytes + err = d.readErr() + if err == nil || err == io.EOF { + return errInvalidFilter + } + return err + } + + if cap(d.buf) < f.length { + d.buf = make([]byte, f.length) + } + d.outbuf = d.buf[:f.length] + n := d.win.read(d.outbuf) + for { + // run filter passing buffer and total bytes read so far + d.outbuf, err = f.filter(d.outbuf, d.tot) + if err != nil { + return err + } + if cap(d.outbuf) > cap(d.buf) { + // Filter returned a bigger buffer, save it for future filters. + d.buf = d.outbuf + } + if len(d.filters) == 0 { + return nil + } + f = d.filters[0] + + if f.offset != 0 { + // next filter not at current offset + f.offset -= n + return nil + } + if f.length != len(d.outbuf) { + return errInvalidFilter + } + d.filters = d.filters[1:] + + if cap(d.outbuf) < cap(d.buf) { + // Filter returned a smaller buffer. Copy it back to the saved buffer + // so the next filter can make use of the larger buffer if needed. + d.outbuf = append(d.buf[:0], d.outbuf...) + } + } +} + +// fill fills the decodeReader's window +func (d *decodeReader) fill() { + if d.err != nil { + return + } + var fl []*filterBlock + fl, d.err = d.dec.fill(&d.win) // fill window using decoder + for _, f := range fl { + err := d.queueFilter(f) + if err != nil { + d.err = err + return + } + } +} + +// Read decodes data and stores it in p. +func (d *decodeReader) Read(p []byte) (n int, err error) { + if len(d.outbuf) == 0 { + // no filter output, see if we need to create more + if d.win.buffered() == 0 { + // fill empty window + d.fill() + if d.win.buffered() == 0 { + return 0, d.readErr() + } + } else if len(d.filters) > 0 { + f := d.filters[0] + if f.offset == 0 && f.length > d.win.buffered() { + d.fill() // filter at current offset needs more data + } + } + if len(d.filters) > 0 { + if err := d.processFilters(); err != nil { + return 0, err + } + } + } + if len(d.outbuf) > 0 { + // copy filter output into p + n = copy(p, d.outbuf) + d.outbuf = d.outbuf[n:] + } else if len(d.filters) > 0 { + f := d.filters[0] + if f.offset < len(p) { + // only read data up to beginning of next filter + p = p[:f.offset] + } + n = d.win.read(p) // read directly from window + f.offset -= n // adjust first filter offset by bytes just read + } else { + n = d.win.read(p) // read directly from window + } + d.tot += int64(n) + return n, nil +} diff --git a/vendor/github.com/nwaples/rardecode/decrypt_reader.go b/vendor/github.com/nwaples/rardecode/decrypt_reader.go new file mode 100644 index 00000000..bb9f279c --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/decrypt_reader.go @@ -0,0 +1,126 @@ +package rardecode + +import ( + "crypto/aes" + "crypto/cipher" + "io" +) + +// cipherBlockReader implements Block Mode decryption of an io.Reader object. +type cipherBlockReader struct { + r io.Reader + mode cipher.BlockMode + inbuf []byte // input buffer for partial data block + outbuf []byte // output buffer used when output slice < block size + n int // bytes read from outbuf + err error +} + +// read reads and decrypts one or more input blocks into p. +// len(p) must be >= cipher block size. +func (cr *cipherBlockReader) read(p []byte) (n int, err error) { + bs := cr.mode.BlockSize() + // round p down to a multiple of the block size + l := len(p) - len(p)%bs + p = p[:l] + + l = len(cr.inbuf) + if l > 0 { + // copy any buffered input into p + copy(p, cr.inbuf) + cr.inbuf = cr.inbuf[:0] + } + // read data for at least one block + n, err = io.ReadAtLeast(cr.r, p[l:], bs-l) + n += l + p = p[:n] + + l = n % bs + // check if p is a multiple of the cipher block size + if l > 0 { + n -= l + // save trailing partial block to process later + cr.inbuf = append(cr.inbuf, p[n:]...) + p = p[:n] + } + + if err != nil { + if err == io.ErrUnexpectedEOF || err == io.ErrShortBuffer { + // ignore trailing bytes < block size length + err = io.EOF + } + return 0, err + } + cr.mode.CryptBlocks(p, p) // decrypt block(s) + return n, nil +} + +// Read reads and decrypts data into p. +// If the input is not a multiple of the cipher block size, +// the trailing bytes will be ignored. +func (cr *cipherBlockReader) Read(p []byte) (n int, err error) { + for { + if cr.n < len(cr.outbuf) { + // return buffered output + n = copy(p, cr.outbuf[cr.n:]) + cr.n += n + return n, nil + } + if cr.err != nil { + err = cr.err + cr.err = nil + return 0, err + } + if len(p) >= cap(cr.outbuf) { + break + } + // p is not large enough to process a block, use outbuf instead + n, cr.err = cr.read(cr.outbuf[:cap(cr.outbuf)]) + cr.outbuf = cr.outbuf[:n] + cr.n = 0 + } + // read blocks into p + return cr.read(p) +} + +// ReadByte returns the next decrypted byte. +func (cr *cipherBlockReader) ReadByte() (byte, error) { + for { + if cr.n < len(cr.outbuf) { + c := cr.outbuf[cr.n] + cr.n++ + return c, nil + } + if cr.err != nil { + err := cr.err + cr.err = nil + return 0, err + } + // refill outbuf + var n int + n, cr.err = cr.read(cr.outbuf[:cap(cr.outbuf)]) + cr.outbuf = cr.outbuf[:n] + cr.n = 0 + } +} + +// newCipherBlockReader returns a cipherBlockReader that decrypts the given io.Reader using +// the provided block mode cipher. +func newCipherBlockReader(r io.Reader, mode cipher.BlockMode) *cipherBlockReader { + cr := &cipherBlockReader{r: r, mode: mode} + cr.outbuf = make([]byte, 0, mode.BlockSize()) + cr.inbuf = make([]byte, 0, mode.BlockSize()) + return cr +} + +// newAesDecryptReader returns a cipherBlockReader that decrypts input from a given io.Reader using AES. +// It will panic if the provided key is invalid. +func newAesDecryptReader(r io.Reader, key, iv []byte) *cipherBlockReader { + block, err := aes.NewCipher(key) + if err != nil { + panic(err) + } + mode := cipher.NewCBCDecrypter(block, iv) + + return newCipherBlockReader(r, mode) +} diff --git a/vendor/github.com/nwaples/rardecode/filters.go b/vendor/github.com/nwaples/rardecode/filters.go new file mode 100644 index 00000000..a9eb0407 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/filters.go @@ -0,0 +1,416 @@ +package rardecode + +import ( + "bytes" + "encoding/binary" + "hash/crc32" + "io" +) + +const ( + fileSize = 0x1000000 + + vmGlobalAddr = 0x3C000 + vmGlobalSize = 0x02000 + vmFixedGlobalSize = 0x40 + + maxUint32 = 1<<32 - 1 +) + +// v3Filter is the interface type for RAR V3 filters. +// v3Filter performs the same function as the filter type, except that it also takes +// the initial register values r, and global data as input for the RAR V3 VM. +type v3Filter func(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) + +var ( + // standardV3Filters is a list of known filters. We can replace the use of a vm + // filter with a custom filter function. + standardV3Filters = []struct { + crc uint32 // crc of code byte slice for filter + len int // length of code byte slice for filter + f v3Filter // replacement filter function + }{ + {0xad576887, 53, e8FilterV3}, + {0x3cd7e57e, 57, e8e9FilterV3}, + {0x3769893f, 120, itaniumFilterV3}, + {0x0e06077d, 29, deltaFilterV3}, + {0x1c2c5dc8, 149, filterRGBV3}, + {0xbc85e701, 216, filterAudioV3}, + } + + // itanium filter byte masks + byteMask = []int{4, 4, 6, 6, 0, 0, 7, 7, 4, 4, 0, 0, 4, 4, 0, 0} +) + +func filterE8(c byte, v5 bool, buf []byte, offset int64) ([]byte, error) { + off := int32(offset) + for b := buf; len(b) >= 5; { + ch := b[0] + b = b[1:] + off++ + if ch != 0xe8 && ch != c { + continue + } + if v5 { + off %= fileSize + } + addr := int32(binary.LittleEndian.Uint32(b)) + if addr < 0 { + if addr+off >= 0 { + binary.LittleEndian.PutUint32(b, uint32(addr+fileSize)) + } + } else if addr < fileSize { + binary.LittleEndian.PutUint32(b, uint32(addr-off)) + } + off += 4 + b = b[4:] + } + return buf, nil +} + +func e8FilterV3(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) { + return filterE8(0xe8, false, buf, offset) +} + +func e8e9FilterV3(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) { + return filterE8(0xe9, false, buf, offset) +} + +func getBits(buf []byte, pos, count uint) uint32 { + n := binary.LittleEndian.Uint32(buf[pos/8:]) + n >>= pos & 7 + mask := uint32(maxUint32) >> (32 - count) + return n & mask +} + +func setBits(buf []byte, pos, count uint, bits uint32) { + mask := uint32(maxUint32) >> (32 - count) + mask <<= pos & 7 + bits <<= pos & 7 + n := binary.LittleEndian.Uint32(buf[pos/8:]) + n = (n & ^mask) | (bits & mask) + binary.LittleEndian.PutUint32(buf[pos/8:], n) +} + +func itaniumFilterV3(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) { + fileOffset := uint32(offset) >> 4 + + for b := buf; len(b) > 21; b = b[16:] { + c := int(b[0]&0x1f) - 0x10 + if c >= 0 { + mask := byteMask[c] + if mask != 0 { + for i := uint(0); i <= 2; i++ { + if mask&(1<= 2*l { + res = buf[l : 2*l] // use unused capacity + } else { + res = make([]byte, l, 2*l) + } + + i := 0 + for j := 0; j < n; j++ { + var c byte + for k := j; k < len(res); k += n { + c -= buf[i] + i++ + res[k] = c + } + } + return res, nil +} + +func deltaFilterV3(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) { + return filterDelta(int(r[0]), buf) +} + +func abs(n int) int { + if n < 0 { + n = -n + } + return n +} + +func filterRGBV3(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) { + width := int(r[0] - 3) + posR := int(r[1]) + if posR < 0 || width < 0 { + return buf, nil + } + + var res []byte + l := len(buf) + if cap(buf) >= 2*l { + res = buf[l : 2*l] // use unused capacity + } else { + res = make([]byte, l, 2*l) + } + + for c := 0; c < 3; c++ { + var prevByte int + for i := c; i < len(res); i += 3 { + var predicted int + upperPos := i - width + if upperPos >= 3 { + upperByte := int(res[upperPos]) + upperLeftByte := int(res[upperPos-3]) + predicted = prevByte + upperByte - upperLeftByte + pa := abs(predicted - prevByte) + pb := abs(predicted - upperByte) + pc := abs(predicted - upperLeftByte) + if pa <= pb && pa <= pc { + predicted = prevByte + } else if pb <= pc { + predicted = upperByte + } else { + predicted = upperLeftByte + } + } else { + predicted = prevByte + } + prevByte = (predicted - int(buf[0])) & 0xFF + res[i] = uint8(prevByte) + buf = buf[1:] + } + + } + for i := posR; i < len(res)-2; i += 3 { + c := res[i+1] + res[i] += c + res[i+2] += c + } + return res, nil +} + +func filterAudioV3(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) { + var res []byte + l := len(buf) + if cap(buf) >= 2*l { + res = buf[l : 2*l] // use unused capacity + } else { + res = make([]byte, l, 2*l) + } + + chans := int(r[0]) + for c := 0; c < chans; c++ { + var prevByte, byteCount int + var diff [7]int + var d, k [3]int + + for i := c; i < len(res); i += chans { + predicted := prevByte<<3 + k[0]*d[0] + k[1]*d[1] + k[2]*d[2] + predicted = int(int8(predicted >> 3)) + + curByte := int(int8(buf[0])) + buf = buf[1:] + predicted -= curByte + res[i] = uint8(predicted) + + dd := curByte << 3 + diff[0] += abs(dd) + diff[1] += abs(dd - d[0]) + diff[2] += abs(dd + d[0]) + diff[3] += abs(dd - d[1]) + diff[4] += abs(dd + d[1]) + diff[5] += abs(dd - d[2]) + diff[6] += abs(dd + d[2]) + + prevDelta := int(int8(predicted - prevByte)) + prevByte = predicted + d[2] = d[1] + d[1] = prevDelta - d[0] + d[0] = prevDelta + + if byteCount&0x1f == 0 { + min := diff[0] + diff[0] = 0 + n := 0 + for j := 1; j < len(diff); j++ { + if diff[j] < min { + min = diff[j] + n = j + } + diff[j] = 0 + } + n-- + if n >= 0 { + m := n / 2 + if n%2 == 0 { + if k[m] >= -16 { + k[m]-- + } + } else { + if k[m] < 16 { + k[m]++ + } + } + } + } + byteCount++ + } + + } + return res, nil +} + +func filterArm(buf []byte, offset int64) ([]byte, error) { + for i := 0; len(buf)-i > 3; i += 4 { + if buf[i+3] == 0xeb { + n := uint(buf[i]) + n += uint(buf[i+1]) * 0x100 + n += uint(buf[i+2]) * 0x10000 + n -= (uint(offset) + uint(i)) / 4 + buf[i] = byte(n) + buf[i+1] = byte(n >> 8) + buf[i+2] = byte(n >> 16) + } + } + return buf, nil +} + +type vmFilter struct { + execCount uint32 + global []byte + static []byte + code []command +} + +// execute implements v3filter type for VM based RAR 3 filters. +func (f *vmFilter) execute(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) { + if len(buf) > vmGlobalAddr { + return buf, errInvalidFilter + } + v := newVM(buf) + + // register setup + v.r[3] = vmGlobalAddr + v.r[4] = uint32(len(buf)) + v.r[5] = f.execCount + for i, n := range r { + v.r[i] = n + } + + // vm global data memory block + vg := v.m[vmGlobalAddr : vmGlobalAddr+vmGlobalSize] + + // initialize fixed global memory + for i, n := range v.r[:vmRegs-1] { + binary.LittleEndian.PutUint32(vg[i*4:], n) + } + binary.LittleEndian.PutUint32(vg[0x1c:], uint32(len(buf))) + binary.LittleEndian.PutUint64(vg[0x24:], uint64(offset)) + binary.LittleEndian.PutUint32(vg[0x2c:], f.execCount) + + // registers + v.r[6] = uint32(offset) + + // copy program global memory + var n int + if len(f.global) > 0 { + n = copy(vg[vmFixedGlobalSize:], f.global) // use saved global instead + } else { + n = copy(vg[vmFixedGlobalSize:], global) + } + copy(vg[vmFixedGlobalSize+n:], f.static) + + v.execute(f.code) + + f.execCount++ + + // keep largest global buffer + if cap(global) > cap(f.global) { + f.global = global[:0] + } else if len(f.global) > 0 { + f.global = f.global[:0] + } + + // check for global data to be saved for next program execution + globalSize := binary.LittleEndian.Uint32(vg[0x30:]) + if globalSize > 0 { + if globalSize > vmGlobalSize-vmFixedGlobalSize { + globalSize = vmGlobalSize - vmFixedGlobalSize + } + if cap(f.global) < int(globalSize) { + f.global = make([]byte, globalSize) + } else { + f.global = f.global[:globalSize] + } + copy(f.global, vg[vmFixedGlobalSize:]) + } + + // find program output + length := binary.LittleEndian.Uint32(vg[0x1c:]) & vmMask + start := binary.LittleEndian.Uint32(vg[0x20:]) & vmMask + if start+length > vmSize { + // TODO: error + start = 0 + length = 0 + } + if start != 0 && cap(v.m) > cap(buf) { + // Initial buffer was to small for vm. + // Copy output to beginning of vm memory so that decodeReader + // will re-use the newly allocated vm memory and we will not + // have to reallocate again next time. + copy(v.m, v.m[start:start+length]) + start = 0 + } + return v.m[start : start+length], nil +} + +// getV3Filter returns a V3 filter function from a code byte slice. +func getV3Filter(code []byte) (v3Filter, error) { + // check if filter is a known standard filter + c := crc32.ChecksumIEEE(code) + for _, f := range standardV3Filters { + if f.crc == c && f.len == len(code) { + return f.f, nil + } + } + + // create new vm filter + f := new(vmFilter) + r := newRarBitReader(bytes.NewReader(code[1:])) // skip first xor byte check + + // read static data + n, err := r.readBits(1) + if err != nil { + return nil, err + } + if n > 0 { + m, err := r.readUint32() + if err != nil { + return nil, err + } + f.static = make([]byte, m+1) + err = r.readFull(f.static) + if err != nil { + return nil, err + } + } + + f.code, err = readCommands(r) + if err == io.EOF { + err = nil + } + + return f.execute, err +} diff --git a/vendor/github.com/nwaples/rardecode/huffman.go b/vendor/github.com/nwaples/rardecode/huffman.go new file mode 100644 index 00000000..eb289b40 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/huffman.go @@ -0,0 +1,208 @@ +package rardecode + +import ( + "errors" + "io" +) + +const ( + maxCodeLength = 15 // maximum code length in bits + maxQuickBits = 10 + maxQuickSize = 1 << maxQuickBits +) + +var ( + errHuffDecodeFailed = errors.New("rardecode: huffman decode failed") + errInvalidLengthTable = errors.New("rardecode: invalid huffman code length table") +) + +type huffmanDecoder struct { + limit [maxCodeLength + 1]int + pos [maxCodeLength + 1]int + symbol []int + min uint + quickbits uint + quicklen [maxQuickSize]uint + quicksym [maxQuickSize]int +} + +func (h *huffmanDecoder) init(codeLengths []byte) { + var count [maxCodeLength + 1]int + + for _, n := range codeLengths { + if n == 0 { + continue + } + count[n]++ + } + + h.pos[0] = 0 + h.limit[0] = 0 + h.min = 0 + for i := uint(1); i <= maxCodeLength; i++ { + h.limit[i] = h.limit[i-1] + count[i]<<(maxCodeLength-i) + h.pos[i] = h.pos[i-1] + count[i-1] + if h.min == 0 && h.limit[i] > 0 { + h.min = i + } + } + + if cap(h.symbol) >= len(codeLengths) { + h.symbol = h.symbol[:len(codeLengths)] + for i := range h.symbol { + h.symbol[i] = 0 + } + } else { + h.symbol = make([]int, len(codeLengths)) + } + + copy(count[:], h.pos[:]) + for i, n := range codeLengths { + if n != 0 { + h.symbol[count[n]] = i + count[n]++ + } + } + + if len(codeLengths) >= 298 { + h.quickbits = maxQuickBits + } else { + h.quickbits = maxQuickBits - 3 + } + + bits := uint(1) + for i := 0; i < 1<= h.limit[bits] && bits < maxCodeLength { + bits++ + } + h.quicklen[i] = bits + + dist := v - h.limit[bits-1] + dist >>= (maxCodeLength - bits) + + pos := h.pos[bits] + dist + if pos < len(h.symbol) { + h.quicksym[i] = h.symbol[pos] + } else { + h.quicksym[i] = 0 + } + } +} + +func (h *huffmanDecoder) readSym(r bitReader) (int, error) { + bits := uint(maxCodeLength) + v, err := r.readBits(maxCodeLength) + if err != nil { + if err != io.EOF { + return 0, err + } + // fall back to 1 bit at a time if we read past EOF + for i := uint(1); i <= maxCodeLength; i++ { + b, err := r.readBits(1) + if err != nil { + return 0, err // not enough bits return error + } + v |= b << (maxCodeLength - i) + if v < h.limit[i] { + bits = i + break + } + } + } else { + if v < h.limit[h.quickbits] { + i := v >> (maxCodeLength - h.quickbits) + r.unreadBits(maxCodeLength - h.quicklen[i]) + return h.quicksym[i], nil + } + + for i, n := range h.limit[h.min:] { + if v < n { + bits = h.min + uint(i) + r.unreadBits(maxCodeLength - bits) + break + } + } + } + + dist := v - h.limit[bits-1] + dist >>= maxCodeLength - bits + + pos := h.pos[bits] + dist + if pos > len(h.symbol) { + return 0, errHuffDecodeFailed + } + + return h.symbol[pos], nil +} + +// readCodeLengthTable reads a new code length table into codeLength from br. +// If addOld is set the old table is added to the new one. +func readCodeLengthTable(br bitReader, codeLength []byte, addOld bool) error { + var bitlength [20]byte + for i := 0; i < len(bitlength); i++ { + n, err := br.readBits(4) + if err != nil { + return err + } + if n == 0xf { + cnt, err := br.readBits(4) + if err != nil { + return err + } + if cnt > 0 { + // array already zero'd dont need to explicitly set + i += cnt + 1 + continue + } + } + bitlength[i] = byte(n) + } + + var bl huffmanDecoder + bl.init(bitlength[:]) + + for i := 0; i < len(codeLength); i++ { + l, err := bl.readSym(br) + if err != nil { + return err + } + + if l < 16 { + if addOld { + codeLength[i] = (codeLength[i] + byte(l)) & 0xf + } else { + codeLength[i] = byte(l) + } + continue + } + + var count int + var value byte + + switch l { + case 16, 18: + count, err = br.readBits(3) + count += 3 + default: + count, err = br.readBits(7) + count += 11 + } + if err != nil { + return err + } + if l < 18 { + if i == 0 { + return errInvalidLengthTable + } + value = codeLength[i-1] + } + for ; count > 0 && i < len(codeLength); i++ { + codeLength[i] = value + count-- + } + i-- + } + return nil +} diff --git a/vendor/github.com/nwaples/rardecode/ppm_model.go b/vendor/github.com/nwaples/rardecode/ppm_model.go new file mode 100644 index 00000000..58a545aa --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/ppm_model.go @@ -0,0 +1,1096 @@ +package rardecode + +import ( + "errors" + "io" +) + +const ( + rangeBottom = 1 << 15 + rangeTop = 1 << 24 + + maxFreq = 124 + + intBits = 7 + periodBits = 7 + binScale = 1 << (intBits + periodBits) + + n0 = 1 + n1 = 4 + n2 = 4 + n3 = 4 + n4 = (128 + 3 - 1*n1 - 2*n2 - 3*n3) / 4 + nIndexes = n0 + n1 + n2 + n3 + n4 + + // memory is allocated in units. A unit contains unitSize number of bytes. + // A unit can store one context or two states. + unitSize = 12 + + maxUint16 = 1<<16 - 1 + freeMark = -1 +) + +var ( + errCorruptPPM = errors.New("rardecode: corrupt ppm data") + + expEscape = []byte{25, 14, 9, 7, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2} + initBinEsc = []uint16{0x3CDD, 0x1F3F, 0x59BF, 0x48F3, 0x64A1, 0x5ABC, 0x6632, 0x6051} + + ns2Index [256]byte + ns2BSIndex [256]byte + + // units2Index maps the number of units in a block to a freelist index + units2Index [128 + 1]byte + // index2Units maps a freelist index to the size of the block in units + index2Units [nIndexes]int32 +) + +func init() { + ns2BSIndex[0] = 2 * 0 + ns2BSIndex[1] = 2 * 1 + for i := 2; i < 11; i++ { + ns2BSIndex[i] = 2 * 2 + } + for i := 11; i < 256; i++ { + ns2BSIndex[i] = 2 * 3 + } + + var j, n byte + for i := range ns2Index { + ns2Index[i] = n + if j <= 3 { + n++ + j = n + } else { + j-- + } + } + + var ii byte + var iu, units int32 + for i, n := range []int{n0, n1, n2, n3, n4} { + for j := 0; j < n; j++ { + units += int32(i) + index2Units[ii] = units + for iu <= units { + units2Index[iu] = ii + iu++ + } + ii++ + } + } +} + +type rangeCoder struct { + br io.ByteReader + code uint32 + low uint32 + rnge uint32 +} + +func (r *rangeCoder) init(br io.ByteReader) error { + r.br = br + r.low = 0 + r.rnge = ^uint32(0) + for i := 0; i < 4; i++ { + c, err := r.br.ReadByte() + if err != nil { + return err + } + r.code = r.code<<8 | uint32(c) + } + return nil +} + +func (r *rangeCoder) currentCount(scale uint32) uint32 { + r.rnge /= scale + return (r.code - r.low) / r.rnge +} + +func (r *rangeCoder) normalize() error { + for { + if r.low^(r.low+r.rnge) >= rangeTop { + if r.rnge >= rangeBottom { + return nil + } + r.rnge = -r.low & (rangeBottom - 1) + } + c, err := r.br.ReadByte() + if err != nil { + return err + } + r.code = r.code<<8 | uint32(c) + r.rnge <<= 8 + r.low <<= 8 + } +} + +func (r *rangeCoder) decode(lowCount, highCount uint32) error { + r.low += r.rnge * lowCount + r.rnge *= highCount - lowCount + + return r.normalize() +} + +type see2Context struct { + summ uint16 + shift byte + count byte +} + +func newSee2Context(i uint16) see2Context { + return see2Context{i << (periodBits - 4), (periodBits - 4), 4} +} + +func (s *see2Context) mean() uint32 { + if s == nil { + return 1 + } + n := s.summ >> s.shift + if n == 0 { + return 1 + } + s.summ -= n + return uint32(n) +} + +func (s *see2Context) update() { + if s == nil || s.shift >= periodBits { + return + } + s.count-- + if s.count == 0 { + s.summ += s.summ + s.count = 3 << s.shift + s.shift++ + } +} + +type state struct { + sym byte + freq byte + + // succ can point to a context or byte in memory. + // A context pointer is a positive integer. It is an index into the states + // array that points to the first of two states which the context is + // marshalled into. + // A byte pointer is a negative integer. The magnitude represents the position + // in bytes from the bottom of the memory. As memory is modelled as an array of + // states, this is used to calculate which state, and where in the state the + // byte is stored. + // A zero value represents a nil pointer. + succ int32 +} + +// uint16 return a uint16 stored in the sym and freq fields of a state +func (s state) uint16() uint16 { return uint16(s.sym) | uint16(s.freq)<<8 } + +// setUint16 stores a uint16 in the sym and freq fields of a state +func (s *state) setUint16(n uint16) { s.sym = byte(n); s.freq = byte(n >> 8) } + +// A context is marshalled into a slice of two states. +// The first state contains the number of states, and the suffix pointer. +// If there is only one state, the second state contains that state. +// If there is more than one state, the second state contains the summFreq +// and the index to the slice of states. +type context struct { + i int32 // index into the states array for context + s []state // slice of two states representing context + a *subAllocator +} + +// succPtr returns a pointer value for the context to be stored in a state.succ +func (c *context) succPtr() int32 { return c.i } + +func (c *context) numStates() int { return int(c.s[0].uint16()) } + +func (c *context) setNumStates(n int) { c.s[0].setUint16(uint16(n)) } + +func (c *context) statesIndex() int32 { return c.s[1].succ } + +func (c *context) setStatesIndex(n int32) { c.s[1].succ = n } + +func (c *context) suffix() *context { return c.a.succContext(c.s[0].succ) } + +func (c *context) setSuffix(sc *context) { c.s[0].succ = sc.i } + +func (c *context) summFreq() uint16 { return c.s[1].uint16() } + +func (c *context) setSummFreq(f uint16) { c.s[1].setUint16(f) } + +func (c *context) notEq(ctx *context) bool { return c.i != ctx.i } + +func (c *context) states() []state { + if ns := int32(c.s[0].uint16()); ns != 1 { + i := c.s[1].succ + return c.a.states[i : i+ns] + } + return c.s[1:] +} + +// shrinkStates shrinks the state list down to size states +func (c *context) shrinkStates(states []state, size int) []state { + i1 := units2Index[(len(states)+1)>>1] + i2 := units2Index[(size+1)>>1] + + if size == 1 { + // store state in context, and free states block + n := c.statesIndex() + c.s[1] = states[0] + states = c.s[1:] + c.a.addFreeBlock(n, i1) + } else if i1 != i2 { + if n := c.a.removeFreeBlock(i2); n > 0 { + // allocate new block and copy + copy(c.a.states[n:], states[:size]) + states = c.a.states[n:] + // free old block + c.a.addFreeBlock(c.statesIndex(), i1) + c.setStatesIndex(n) + } else { + // split current block, and free units not needed + n = c.statesIndex() + index2Units[i2]<<1 + u := index2Units[i1] - index2Units[i2] + c.a.freeUnits(n, u) + } + } + c.setNumStates(size) + return states[:size] +} + +// expandStates expands the states list by one +func (c *context) expandStates() []state { + states := c.states() + ns := len(states) + if ns == 1 { + s := states[0] + n := c.a.allocUnits(1) + if n == 0 { + return nil + } + c.setStatesIndex(n) + states = c.a.states[n:] + states[0] = s + } else if ns&0x1 == 0 { + u := ns >> 1 + i1 := units2Index[u] + i2 := units2Index[u+1] + if i1 != i2 { + n := c.a.allocUnits(i2) + if n == 0 { + return nil + } + copy(c.a.states[n:], states) + c.a.addFreeBlock(c.statesIndex(), i1) + c.setStatesIndex(n) + states = c.a.states[n:] + } + } + c.setNumStates(ns + 1) + return states[:ns+1] +} + +type subAllocator struct { + // memory for allocation is split into two heaps + + heap1MaxBytes int32 // maximum bytes available in heap1 + heap1Lo int32 // heap1 bottom in number of bytes + heap1Hi int32 // heap1 top in number of bytes + heap2Lo int32 // heap2 bottom index in states + heap2Hi int32 // heap2 top index in states + glueCount int + + // Each freeList entry contains an index into states for the beginning + // of a free block. The first state in that block may contain an index + // to another free block and so on. The size of the free block in units + // (2 states) for that freeList index can be determined from the + // index2Units array. + freeList [nIndexes]int32 + + // Instead of bytes, memory is represented by a slice of states. + // context's are marshalled to and from a pair of states. + // multiple bytes are stored in a state. + states []state +} + +func (a *subAllocator) init(maxMB int) { + bytes := int32(maxMB) << 20 + heap2Units := bytes / 8 / unitSize * 7 + a.heap1MaxBytes = bytes - heap2Units*unitSize + // Add one for the case when bytes are not a multiple of unitSize + heap1Units := a.heap1MaxBytes/unitSize + 1 + // Calculate total size in state's. Add 1 unit so we can reserve the first unit. + // This will allow us to use the zero index as a nil pointer. + n := int(1+heap1Units+heap2Units) * 2 + if cap(a.states) > n { + a.states = a.states[:n] + } else { + a.states = make([]state, n) + } +} + +func (a *subAllocator) restart() { + // Pad heap1 start by 1 unit and enough bytes so that there is no + // gap between heap1 end and heap2 start. + a.heap1Lo = unitSize + (unitSize - a.heap1MaxBytes%unitSize) + a.heap1Hi = unitSize + (a.heap1MaxBytes/unitSize+1)*unitSize + a.heap2Lo = a.heap1Hi / unitSize * 2 + a.heap2Hi = int32(len(a.states)) + a.glueCount = 0 + for i := range a.freeList { + a.freeList[i] = 0 + } + for i := range a.states { + a.states[i] = state{} + } +} + +// pushByte puts a byte on the heap and returns a state.succ index that +// can be used to retrieve it. +func (a *subAllocator) pushByte(c byte) int32 { + si := a.heap1Lo / 6 // state index + oi := a.heap1Lo % 6 // byte position in state + switch oi { + case 0: + a.states[si].sym = c + case 1: + a.states[si].freq = c + default: + n := (uint(oi) - 2) * 8 + mask := ^(uint32(0xFF) << n) + succ := uint32(a.states[si].succ) & mask + succ |= uint32(c) << n + a.states[si].succ = int32(succ) + } + a.heap1Lo++ + if a.heap1Lo >= a.heap1Hi { + return 0 + } + return -a.heap1Lo +} + +// popByte reverses the previous pushByte +func (a *subAllocator) popByte() { a.heap1Lo-- } + +// succByte returns a byte from the heap given a state.succ index +func (a *subAllocator) succByte(i int32) byte { + i = -i + si := i / 6 + oi := i % 6 + switch oi { + case 0: + return a.states[si].sym + case 1: + return a.states[si].freq + default: + n := (uint(oi) - 2) * 8 + succ := uint32(a.states[si].succ) >> n + return byte(succ & 0xff) + } +} + +// succContext returns a context given a state.succ index +func (a *subAllocator) succContext(i int32) *context { + if i <= 0 { + return nil + } + return &context{i: i, s: a.states[i : i+2 : i+2], a: a} +} + +// succIsNil returns whether a state.succ points to nothing +func (a *subAllocator) succIsNil(i int32) bool { return i == 0 } + +// nextByteAddr takes a state.succ value representing a pointer +// to a byte, and returns the next bytes address +func (a *subAllocator) nextByteAddr(n int32) int32 { return n - 1 } + +func (a *subAllocator) removeFreeBlock(i byte) int32 { + n := a.freeList[i] + if n != 0 { + a.freeList[i] = a.states[n].succ + a.states[n] = state{} + } + return n +} + +func (a *subAllocator) addFreeBlock(n int32, i byte) { + a.states[n].succ = a.freeList[i] + a.freeList[i] = n +} + +func (a *subAllocator) freeUnits(n, u int32) { + i := units2Index[u] + if u != index2Units[i] { + i-- + a.addFreeBlock(n, i) + u -= index2Units[i] + n += index2Units[i] << 1 + i = units2Index[u] + } + a.addFreeBlock(n, i) +} + +func (a *subAllocator) glueFreeBlocks() { + var freeIndex int32 + + for i, n := range a.freeList { + s := state{succ: freeMark} + s.setUint16(uint16(index2Units[i])) + for n != 0 { + states := a.states[n:] + states[1].succ = freeIndex + freeIndex = n + n = states[0].succ + states[0] = s + } + a.freeList[i] = 0 + } + + for i := freeIndex; i != 0; i = a.states[i+1].succ { + if a.states[i].succ != freeMark { + continue + } + u := int32(a.states[i].uint16()) + states := a.states[i+u<<1:] + for len(states) > 0 && states[0].succ == freeMark { + u += int32(states[0].uint16()) + if u > maxUint16 { + break + } + states[0].succ = 0 + a.states[i].setUint16(uint16(u)) + states = a.states[i+u<<1:] + } + } + + for n := freeIndex; n != 0; n = a.states[n+1].succ { + if a.states[n].succ != freeMark { + continue + } + a.states[n].succ = 0 + u := int32(a.states[n].uint16()) + m := n + for u > 128 { + a.addFreeBlock(m, nIndexes-1) + u -= 128 + m += 256 + } + a.freeUnits(m, u) + } +} + +func (a *subAllocator) allocUnitsRare(index byte) int32 { + if a.glueCount == 0 { + a.glueCount = 255 + a.glueFreeBlocks() + if n := a.removeFreeBlock(index); n > 0 { + return n + } + } + // try to find a larger free block and split it + for i := index + 1; i < nIndexes; i++ { + if n := a.removeFreeBlock(i); n > 0 { + u := index2Units[i] - index2Units[index] + a.freeUnits(n+index2Units[index]<<1, u) + return n + } + } + a.glueCount-- + + // try to allocate units from the top of heap1 + n := a.heap1Hi - index2Units[index]*unitSize + if n > a.heap1Lo { + a.heap1Hi = n + return a.heap1Hi / unitSize * 2 + } + return 0 +} + +func (a *subAllocator) allocUnits(i byte) int32 { + // try to allocate a free block + if n := a.removeFreeBlock(i); n > 0 { + return n + } + // try to allocate from the bottom of heap2 + n := index2Units[i] << 1 + if a.heap2Lo+n <= a.heap2Hi { + lo := a.heap2Lo + a.heap2Lo += n + return lo + } + return a.allocUnitsRare(i) +} + +func (a *subAllocator) newContext(s state, suffix *context) *context { + var n int32 + if a.heap2Lo < a.heap2Hi { + // allocate from top of heap2 + a.heap2Hi -= 2 + n = a.heap2Hi + } else if n = a.removeFreeBlock(1); n == 0 { + if n = a.allocUnitsRare(1); n == 0 { + return nil + } + } + c := &context{i: n, s: a.states[n : n+2 : n+2], a: a} + c.s[0] = state{} + c.setNumStates(1) + c.s[1] = s + if suffix != nil { + c.setSuffix(suffix) + } + return c +} + +func (a *subAllocator) newContextSize(ns int) *context { + c := a.newContext(state{}, nil) + c.setNumStates(ns) + i := units2Index[(ns+1)>>1] + n := a.allocUnits(i) + c.setStatesIndex(n) + return c +} + +type model struct { + maxOrder int + orderFall int + initRL int + runLength int + prevSuccess byte + escCount byte + prevSym byte + initEsc byte + minC *context + maxC *context + rc rangeCoder + a subAllocator + charMask [256]byte + binSumm [128][64]uint16 + see2Cont [25][16]see2Context +} + +func (m *model) restart() { + for i := range m.charMask { + m.charMask[i] = 0 + } + m.escCount = 1 + + if m.maxOrder < 12 { + m.initRL = -m.maxOrder - 1 + } else { + m.initRL = -12 - 1 + } + m.orderFall = m.maxOrder + m.runLength = m.initRL + m.prevSuccess = 0 + + m.a.restart() + + c := m.a.newContextSize(256) + c.setSummFreq(257) + states := c.states() + for i := range states { + states[i] = state{sym: byte(i), freq: 1} + } + m.minC = c + m.maxC = c + m.prevSym = 0 + + for i := range m.binSumm { + for j, esc := range initBinEsc { + n := binScale - esc/(uint16(i)+2) + for k := j; k < len(m.binSumm[i]); k += len(initBinEsc) { + m.binSumm[i][k] = n + } + } + } + + for i := range m.see2Cont { + see := newSee2Context(5*uint16(i) + 10) + for j := range m.see2Cont[i] { + m.see2Cont[i][j] = see + } + } +} + +func (m *model) init(br io.ByteReader, reset bool, maxOrder, maxMB int) error { + err := m.rc.init(br) + if err != nil { + return err + } + if !reset { + if m.minC == nil { + return errCorruptPPM + } + return nil + } + + m.a.init(maxMB) + + if maxOrder == 1 { + return errCorruptPPM + } + m.maxOrder = maxOrder + m.restart() + return nil +} + +func (m *model) rescale(s *state) *state { + if s.freq <= maxFreq { + return s + } + c := m.minC + + var summFreq uint16 + + s.freq += 4 + states := c.states() + escFreq := c.summFreq() + 4 + + for i := range states { + f := states[i].freq + escFreq -= uint16(f) + if m.orderFall != 0 { + f++ + } + f >>= 1 + summFreq += uint16(f) + states[i].freq = f + + if i == 0 || f <= states[i-1].freq { + continue + } + j := i - 1 + for j > 0 && f > states[j-1].freq { + j-- + } + t := states[i] + copy(states[j+1:i+1], states[j:i]) + states[j] = t + } + + i := len(states) - 1 + for states[i].freq == 0 { + i-- + escFreq++ + } + if i != len(states)-1 { + states = c.shrinkStates(states, i+1) + } + s = &states[0] + if i == 0 { + for { + s.freq -= s.freq >> 1 + escFreq >>= 1 + if escFreq <= 1 { + return s + } + } + } + summFreq += escFreq - (escFreq >> 1) + c.setSummFreq(summFreq) + return s +} + +func (m *model) decodeBinSymbol() (*state, error) { + c := m.minC + s := &c.states()[0] + + ns := c.suffix().numStates() + i := m.prevSuccess + ns2BSIndex[ns-1] + byte(m.runLength>>26)&0x20 + if m.prevSym >= 64 { + i += 8 + } + if s.sym >= 64 { + i += 2 * 8 + } + bs := &m.binSumm[s.freq-1][i] + mean := (*bs + 1<<(periodBits-2)) >> periodBits + + if m.rc.currentCount(binScale) < uint32(*bs) { + err := m.rc.decode(0, uint32(*bs)) + if s.freq < 128 { + s.freq++ + } + *bs += 1<>10] + m.charMask[s.sym] = m.escCount + m.prevSuccess = 0 + return nil, err +} + +func (m *model) decodeSymbol1() (*state, error) { + c := m.minC + states := c.states() + scale := uint32(c.summFreq()) + // protect against divide by zero + // TODO: look at why this happens, may be problem elsewhere + if scale == 0 { + return nil, errCorruptPPM + } + count := m.rc.currentCount(scale) + m.prevSuccess = 0 + + var n uint32 + for i := range states { + s := &states[i] + n += uint32(s.freq) + if n <= count { + continue + } + err := m.rc.decode(n-uint32(s.freq), n) + s.freq += 4 + c.setSummFreq(uint16(scale + 4)) + if i == 0 { + if 2*n > scale { + m.prevSuccess = 1 + m.runLength++ + } + } else { + if s.freq <= states[i-1].freq { + return s, err + } + states[i-1], states[i] = states[i], states[i-1] + s = &states[i-1] + } + return m.rescale(s), err + } + + for _, s := range states { + m.charMask[s.sym] = m.escCount + } + return nil, m.rc.decode(n, scale) +} + +func (m *model) makeEscFreq(c *context, numMasked int) *see2Context { + ns := c.numStates() + if ns == 256 { + return nil + } + diff := ns - numMasked + + var i int + if m.prevSym >= 64 { + i = 8 + } + if diff < c.suffix().numStates()-ns { + i++ + } + if int(c.summFreq()) < 11*ns { + i += 2 + } + if numMasked > diff { + i += 4 + } + return &m.see2Cont[ns2Index[diff-1]][i] +} + +func (m *model) decodeSymbol2(numMasked int) (*state, error) { + c := m.minC + + see := m.makeEscFreq(c, numMasked) + scale := see.mean() + + var i int + var hi uint32 + states := c.states() + sl := make([]*state, len(states)-numMasked) + for j := range sl { + for m.charMask[states[i].sym] == m.escCount { + i++ + } + hi += uint32(states[i].freq) + sl[j] = &states[i] + i++ + } + + scale += hi + count := m.rc.currentCount(scale) + + if count >= scale { + return nil, errCorruptPPM + } + if count >= hi { + err := m.rc.decode(hi, scale) + if see != nil { + see.summ += uint16(scale) + } + for _, s := range sl { + m.charMask[s.sym] = m.escCount + } + return nil, err + } + + hi = uint32(sl[0].freq) + for hi <= count { + sl = sl[1:] + hi += uint32(sl[0].freq) + } + s := sl[0] + + err := m.rc.decode(hi-uint32(s.freq), hi) + + see.update() + + m.escCount++ + m.runLength = m.initRL + + s.freq += 4 + c.setSummFreq(c.summFreq() + 4) + return m.rescale(s), err +} + +func (c *context) findState(sym byte) *state { + var i int + states := c.states() + for i = range states { + if states[i].sym == sym { + break + } + } + return &states[i] +} + +func (m *model) createSuccessors(s, ss *state) *context { + var sl []*state + + if m.orderFall != 0 { + sl = append(sl, s) + } + + c := m.minC + for suff := c.suffix(); suff != nil; suff = c.suffix() { + c = suff + + if ss == nil { + ss = c.findState(s.sym) + } + if ss.succ != s.succ { + c = m.a.succContext(ss.succ) + break + } + sl = append(sl, ss) + ss = nil + } + + if len(sl) == 0 { + return c + } + + var up state + up.sym = m.a.succByte(s.succ) + up.succ = m.a.nextByteAddr(s.succ) + + states := c.states() + if len(states) > 1 { + s = c.findState(up.sym) + + cf := uint16(s.freq) - 1 + s0 := c.summFreq() - uint16(len(states)) - cf + + if 2*cf <= s0 { + if 5*cf > s0 { + up.freq = 2 + } else { + up.freq = 1 + } + } else { + up.freq = byte(1 + (2*cf+3*s0-1)/(2*s0)) + } + } else { + up.freq = states[0].freq + } + + for i := len(sl) - 1; i >= 0; i-- { + c = m.a.newContext(up, c) + if c == nil { + return nil + } + sl[i].succ = c.succPtr() + } + return c +} + +func (m *model) update(s *state) { + if m.orderFall == 0 { + if c := m.a.succContext(s.succ); c != nil { + m.minC = c + m.maxC = c + return + } + } + + if m.escCount == 0 { + m.escCount = 1 + for i := range m.charMask { + m.charMask[i] = 0 + } + } + + var ss *state // matching minC.suffix state + + if s.freq < maxFreq/4 && m.minC.suffix() != nil { + c := m.minC.suffix() + states := c.states() + + var i int + if len(states) > 1 { + for states[i].sym != s.sym { + i++ + } + if i > 0 && states[i].freq >= states[i-1].freq { + states[i-1], states[i] = states[i], states[i-1] + i-- + } + if states[i].freq < maxFreq-9 { + states[i].freq += 2 + c.setSummFreq(c.summFreq() + 2) + } + } else if states[0].freq < 32 { + states[0].freq++ + } + ss = &states[i] // save later for createSuccessors + } + + if m.orderFall == 0 { + c := m.createSuccessors(s, ss) + if c == nil { + m.restart() + } else { + m.minC = c + m.maxC = c + s.succ = c.succPtr() + } + return + } + + succ := m.a.pushByte(s.sym) + if m.a.succIsNil(succ) { + m.restart() + return + } + + var minC *context + if m.a.succIsNil(s.succ) { + s.succ = succ + minC = m.minC + } else { + minC = m.a.succContext(s.succ) + if minC == nil { + minC = m.createSuccessors(s, ss) + if minC == nil { + m.restart() + return + } + } + m.orderFall-- + if m.orderFall == 0 { + succ = minC.succPtr() + if m.maxC.notEq(m.minC) { + m.a.popByte() + } + } + } + + n := m.minC.numStates() + s0 := int(m.minC.summFreq()) - n - int(s.freq-1) + for c := m.maxC; c.notEq(m.minC); c = c.suffix() { + var summFreq uint16 + + states := c.expandStates() + if states == nil { + m.restart() + return + } + if ns := len(states) - 1; ns != 1 { + summFreq = c.summFreq() + if 4*ns <= n && int(summFreq) <= 8*ns { + summFreq += 2 + } + if 2*ns < n { + summFreq++ + } + } else { + p := &states[0] + if p.freq < maxFreq/4-1 { + p.freq += p.freq + } else { + p.freq = maxFreq - 4 + } + summFreq = uint16(p.freq) + uint16(m.initEsc) + if n > 3 { + summFreq++ + } + } + + cf := 2 * int(s.freq) * int(summFreq+6) + sf := s0 + int(summFreq) + var freq byte + if cf >= 6*sf { + switch { + case cf >= 15*sf: + freq = 7 + case cf >= 12*sf: + freq = 6 + case cf >= 9*sf: + freq = 5 + default: + freq = 4 + } + summFreq += uint16(freq) + } else { + switch { + case cf >= 4*sf: + freq = 3 + case cf > sf: + freq = 2 + default: + freq = 1 + } + summFreq += 3 + } + states[len(states)-1] = state{sym: s.sym, freq: freq, succ: succ} + c.setSummFreq(summFreq) + } + m.minC = minC + m.maxC = minC +} + +func (m *model) ReadByte() (byte, error) { + if m.minC == nil { + return 0, errCorruptPPM + } + var s *state + var err error + if m.minC.numStates() == 1 { + s, err = m.decodeBinSymbol() + } else { + s, err = m.decodeSymbol1() + } + for s == nil && err == nil { + n := m.minC.numStates() + for m.minC.numStates() == n { + m.orderFall++ + m.minC = m.minC.suffix() + if m.minC == nil { + return 0, errCorruptPPM + } + } + s, err = m.decodeSymbol2(n) + } + if err != nil { + return 0, err + } + + // save sym so it doesn't get overwritten by a possible restart() + sym := s.sym + m.update(s) + m.prevSym = sym + return sym, nil +} diff --git a/vendor/github.com/nwaples/rardecode/reader.go b/vendor/github.com/nwaples/rardecode/reader.go new file mode 100644 index 00000000..03e88a87 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/reader.go @@ -0,0 +1,369 @@ +package rardecode + +import ( + "bufio" + "bytes" + "errors" + "io" + "io/ioutil" + "os" + "time" +) + +// FileHeader HostOS types +const ( + HostOSUnknown = 0 + HostOSMSDOS = 1 + HostOSOS2 = 2 + HostOSWindows = 3 + HostOSUnix = 4 + HostOSMacOS = 5 + HostOSBeOS = 6 +) + +const ( + maxPassword = 128 +) + +var ( + errShortFile = errors.New("rardecode: decoded file too short") + errInvalidFileBlock = errors.New("rardecode: invalid file block") + errUnexpectedArcEnd = errors.New("rardecode: unexpected end of archive") + errBadFileChecksum = errors.New("rardecode: bad file checksum") +) + +type byteReader interface { + io.Reader + io.ByteReader +} + +type limitedReader struct { + r io.Reader + n int64 // bytes remaining + shortErr error // error returned when r returns io.EOF with n > 0 +} + +func (l *limitedReader) Read(p []byte) (int, error) { + if l.n <= 0 { + return 0, io.EOF + } + if int64(len(p)) > l.n { + p = p[0:l.n] + } + n, err := l.r.Read(p) + l.n -= int64(n) + if err == io.EOF && l.n > 0 { + return n, l.shortErr + } + return n, err +} + +type limitedByteReader struct { + limitedReader + br io.ByteReader +} + +func (l *limitedByteReader) ReadByte() (byte, error) { + if l.n <= 0 { + return 0, io.EOF + } + c, err := l.br.ReadByte() + if err == nil { + l.n-- + } else if err == io.EOF && l.n > 0 { + return 0, l.shortErr + } + return c, err +} + +// limitByteReader returns a limitedByteReader that reads from r and stops with +// io.EOF after n bytes. +// If r returns an io.EOF before reading n bytes, io.ErrUnexpectedEOF is returned. +func limitByteReader(r byteReader, n int64) *limitedByteReader { + return &limitedByteReader{limitedReader{r, n, io.ErrUnexpectedEOF}, r} +} + +// fileChecksum allows file checksum validations to be performed. +// File contents must first be written to fileChecksum. Then valid is +// called to perform the file checksum calculation to determine +// if the file contents are valid or not. +type fileChecksum interface { + io.Writer + valid() bool +} + +// FileHeader represents a single file in a RAR archive. +type FileHeader struct { + Name string // file name using '/' as the directory separator + IsDir bool // is a directory + HostOS byte // Host OS the archive was created on + Attributes int64 // Host OS specific file attributes + PackedSize int64 // packed file size (or first block if the file spans volumes) + UnPackedSize int64 // unpacked file size + UnKnownSize bool // unpacked file size is not known + ModificationTime time.Time // modification time (non-zero if set) + CreationTime time.Time // creation time (non-zero if set) + AccessTime time.Time // access time (non-zero if set) + Version int // file version +} + +// Mode returns an os.FileMode for the file, calculated from the Attributes field. +func (f *FileHeader) Mode() os.FileMode { + var m os.FileMode + + if f.IsDir { + m = os.ModeDir + } + if f.HostOS == HostOSWindows { + if f.IsDir { + m |= 0777 + } else if f.Attributes&1 > 0 { + m |= 0444 // readonly + } else { + m |= 0666 + } + return m + } + // assume unix perms for all remaining os types + m |= os.FileMode(f.Attributes) & os.ModePerm + + // only check other bits on unix host created archives + if f.HostOS != HostOSUnix { + return m + } + + if f.Attributes&0x200 != 0 { + m |= os.ModeSticky + } + if f.Attributes&0x400 != 0 { + m |= os.ModeSetgid + } + if f.Attributes&0x800 != 0 { + m |= os.ModeSetuid + } + + // Check for additional file types. + if f.Attributes&0xF000 == 0xA000 { + m |= os.ModeSymlink + } + return m +} + +// fileBlockHeader represents a file block in a RAR archive. +// Files may comprise one or more file blocks. +// Solid files retain decode tables and dictionary from previous solid files in the archive. +type fileBlockHeader struct { + first bool // first block in file + last bool // last block in file + solid bool // file is solid + winSize uint // log base 2 of decode window size + cksum fileChecksum // file checksum + decoder decoder // decoder to use for file + key []byte // key for AES, non-empty if file encrypted + iv []byte // iv for AES, non-empty if file encrypted + FileHeader +} + +// fileBlockReader provides sequential access to file blocks in a RAR archive. +type fileBlockReader interface { + io.Reader // Read's read data from the current file block + io.ByteReader // Read bytes from current file block + next() (*fileBlockHeader, error) // reads the next file block header at current position + reset() // resets encryption + isSolid() bool // is archive solid + version() int // returns current archive format version +} + +// packedFileReader provides sequential access to packed files in a RAR archive. +type packedFileReader struct { + r fileBlockReader + h *fileBlockHeader // current file header +} + +// nextBlockInFile reads the next file block in the current file at the current +// archive file position, or returns an error if there is a problem. +// It is invalid to call this when already at the last block in the current file. +func (f *packedFileReader) nextBlockInFile() error { + h, err := f.r.next() + if err != nil { + if err == io.EOF { + // archive ended, but file hasn't + return errUnexpectedArcEnd + } + return err + } + if h.first || h.Name != f.h.Name { + return errInvalidFileBlock + } + f.h = h + return nil +} + +// next advances to the next packed file in the RAR archive. +func (f *packedFileReader) next() (*fileBlockHeader, error) { + if f.h != nil { + // skip to last block in current file + for !f.h.last { + // discard remaining block data + if _, err := io.Copy(ioutil.Discard, f.r); err != nil { + return nil, err + } + if err := f.nextBlockInFile(); err != nil { + return nil, err + } + } + // discard last block data + if _, err := io.Copy(ioutil.Discard, f.r); err != nil { + return nil, err + } + } + var err error + f.h, err = f.r.next() // get next file block + if err != nil { + if err == errArchiveEnd { + return nil, io.EOF + } + return nil, err + } + if !f.h.first { + return nil, errInvalidFileBlock + } + return f.h, nil +} + +// Read reads the packed data for the current file into p. +func (f *packedFileReader) Read(p []byte) (int, error) { + n, err := f.r.Read(p) // read current block data + for err == io.EOF { // current block empty + if n > 0 { + return n, nil + } + if f.h == nil || f.h.last { + return 0, io.EOF // last block so end of file + } + if err := f.nextBlockInFile(); err != nil { + return 0, err + } + n, err = f.r.Read(p) // read new block data + } + return n, err +} + +func (f *packedFileReader) ReadByte() (byte, error) { + c, err := f.r.ReadByte() // read current block data + for err == io.EOF && f.h != nil && !f.h.last { // current block empty + if err := f.nextBlockInFile(); err != nil { + return 0, err + } + c, err = f.r.ReadByte() // read new block data + } + return c, err +} + +// Reader provides sequential access to files in a RAR archive. +type Reader struct { + r io.Reader // reader for current unpacked file + pr packedFileReader // reader for current packed file + dr decodeReader // reader for decoding and filters if file is compressed + cksum fileChecksum // current file checksum + solidr io.Reader // reader for solid file +} + +// Read reads from the current file in the RAR archive. +func (r *Reader) Read(p []byte) (int, error) { + n, err := r.r.Read(p) + if err == io.EOF && r.cksum != nil && !r.cksum.valid() { + return n, errBadFileChecksum + } + return n, err +} + +// Next advances to the next file in the archive. +func (r *Reader) Next() (*FileHeader, error) { + if r.solidr != nil { + // solid files must be read fully to update decoder information + if _, err := io.Copy(ioutil.Discard, r.solidr); err != nil { + return nil, err + } + } + + h, err := r.pr.next() // skip to next file + if err != nil { + return nil, err + } + r.solidr = nil + + br := byteReader(&r.pr) // start with packed file reader + + // check for encryption + if len(h.key) > 0 && len(h.iv) > 0 { + br = newAesDecryptReader(br, h.key, h.iv) // decrypt + } + r.r = br + // check for compression + if h.decoder != nil { + err = r.dr.init(br, h.decoder, h.winSize, !h.solid) + if err != nil { + return nil, err + } + r.r = &r.dr + if r.pr.r.isSolid() { + r.solidr = r.r + } + } + if h.UnPackedSize >= 0 && !h.UnKnownSize { + // Limit reading to UnPackedSize as there may be padding + r.r = &limitedReader{r.r, h.UnPackedSize, errShortFile} + } + r.cksum = h.cksum + if r.cksum != nil { + r.r = io.TeeReader(r.r, h.cksum) // write file data to checksum as it is read + } + fh := new(FileHeader) + *fh = h.FileHeader + return fh, nil +} + +func (r *Reader) init(fbr fileBlockReader) { + r.r = bytes.NewReader(nil) // initial reads will always return EOF + r.pr.r = fbr +} + +// NewReader creates a Reader reading from r. +// NewReader only supports single volume archives. +// Multi-volume archives must use OpenReader. +func NewReader(r io.Reader, password string) (*Reader, error) { + br, ok := r.(*bufio.Reader) + if !ok { + br = bufio.NewReader(r) + } + fbr, err := newFileBlockReader(br, password) + if err != nil { + return nil, err + } + rr := new(Reader) + rr.init(fbr) + return rr, nil +} + +type ReadCloser struct { + v *volume + Reader +} + +// Close closes the rar file. +func (rc *ReadCloser) Close() error { + return rc.v.Close() +} + +// OpenReader opens a RAR archive specified by the name and returns a ReadCloser. +func OpenReader(name, password string) (*ReadCloser, error) { + v, err := openVolume(name, password) + if err != nil { + return nil, err + } + rc := new(ReadCloser) + rc.v = v + rc.Reader.init(v) + return rc, nil +} diff --git a/vendor/github.com/nwaples/rardecode/vm.go b/vendor/github.com/nwaples/rardecode/vm.go new file mode 100644 index 00000000..fd26a5a0 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/vm.go @@ -0,0 +1,687 @@ +package rardecode + +import ( + "encoding/binary" + "errors" +) + +const ( + // vm flag bits + flagC = 1 // Carry + flagZ = 2 // Zero + flagS = 0x80000000 // Sign + + maxCommands = 25000000 // maximum number of commands that can be run in a program + + vmRegs = 8 // number if registers + vmSize = 0x40000 // memory size + vmMask = vmSize - 1 +) + +var ( + errInvalidVMInstruction = errors.New("rardecode: invalid vm instruction") +) + +type vm struct { + ip uint32 // instruction pointer + ipMod bool // ip was modified + fl uint32 // flag bits + r [vmRegs]uint32 // registers + m []byte // memory +} + +func (v *vm) setIP(ip uint32) { + v.ip = ip + v.ipMod = true +} + +// execute runs a list of commands on the vm. +func (v *vm) execute(cmd []command) { + v.ip = 0 // reset instruction pointer + for n := 0; n < maxCommands; n++ { + ip := v.ip + if ip >= uint32(len(cmd)) { + return + } + ins := cmd[ip] + ins.f(v, ins.bm, ins.op) // run cpu instruction + if v.ipMod { + // command modified ip, don't increment + v.ipMod = false + } else { + v.ip++ // increment ip for next command + } + } +} + +// newVM creates a new RAR virtual machine using the byte slice as memory. +func newVM(mem []byte) *vm { + v := new(vm) + + if cap(mem) < vmSize+4 { + v.m = make([]byte, vmSize+4) + copy(v.m, mem) + } else { + v.m = mem[:vmSize+4] + for i := len(mem); i < len(v.m); i++ { + v.m[i] = 0 + } + } + v.r[7] = vmSize + return v +} + +type operand interface { + get(v *vm, byteMode bool) uint32 + set(v *vm, byteMode bool, n uint32) +} + +// Immediate Operand +type opI uint32 + +func (op opI) get(v *vm, bm bool) uint32 { return uint32(op) } +func (op opI) set(v *vm, bm bool, n uint32) {} + +// Direct Operand +type opD uint32 + +func (op opD) get(v *vm, byteMode bool) uint32 { + if byteMode { + return uint32(v.m[op]) + } + return binary.LittleEndian.Uint32(v.m[op:]) +} + +func (op opD) set(v *vm, byteMode bool, n uint32) { + if byteMode { + v.m[op] = byte(n) + } else { + binary.LittleEndian.PutUint32(v.m[op:], n) + } +} + +// Register Operand +type opR uint32 + +func (op opR) get(v *vm, byteMode bool) uint32 { + if byteMode { + return v.r[op] & 0xFF + } + return v.r[op] +} + +func (op opR) set(v *vm, byteMode bool, n uint32) { + if byteMode { + v.r[op] = (v.r[op] & 0xFFFFFF00) | (n & 0xFF) + } else { + v.r[op] = n + } +} + +// Register Indirect Operand +type opRI uint32 + +func (op opRI) get(v *vm, byteMode bool) uint32 { + i := v.r[op] & vmMask + if byteMode { + return uint32(v.m[i]) + } + return binary.LittleEndian.Uint32(v.m[i:]) +} +func (op opRI) set(v *vm, byteMode bool, n uint32) { + i := v.r[op] & vmMask + if byteMode { + v.m[i] = byte(n) + } else { + binary.LittleEndian.PutUint32(v.m[i:], n) + } +} + +// Base Plus Index Indirect Operand +type opBI struct { + r uint32 + i uint32 +} + +func (op opBI) get(v *vm, byteMode bool) uint32 { + i := (v.r[op.r] + op.i) & vmMask + if byteMode { + return uint32(v.m[i]) + } + return binary.LittleEndian.Uint32(v.m[i:]) +} +func (op opBI) set(v *vm, byteMode bool, n uint32) { + i := (v.r[op.r] + op.i) & vmMask + if byteMode { + v.m[i] = byte(n) + } else { + binary.LittleEndian.PutUint32(v.m[i:], n) + } +} + +type commandFunc func(v *vm, byteMode bool, op []operand) + +type command struct { + f commandFunc + bm bool // is byte mode + op []operand +} + +var ( + ops = []struct { + f commandFunc + byteMode bool // supports byte mode + nops int // number of operands + jop bool // is a jump op + }{ + {mov, true, 2, false}, + {cmp, true, 2, false}, + {add, true, 2, false}, + {sub, true, 2, false}, + {jz, false, 1, true}, + {jnz, false, 1, true}, + {inc, true, 1, false}, + {dec, true, 1, false}, + {jmp, false, 1, true}, + {xor, true, 2, false}, + {and, true, 2, false}, + {or, true, 2, false}, + {test, true, 2, false}, + {js, false, 1, true}, + {jns, false, 1, true}, + {jb, false, 1, true}, + {jbe, false, 1, true}, + {ja, false, 1, true}, + {jae, false, 1, true}, + {push, false, 1, false}, + {pop, false, 1, false}, + {call, false, 1, true}, + {ret, false, 0, false}, + {not, true, 1, false}, + {shl, true, 2, false}, + {shr, true, 2, false}, + {sar, true, 2, false}, + {neg, true, 1, false}, + {pusha, false, 0, false}, + {popa, false, 0, false}, + {pushf, false, 0, false}, + {popf, false, 0, false}, + {movzx, false, 2, false}, + {movsx, false, 2, false}, + {xchg, true, 2, false}, + {mul, true, 2, false}, + {div, true, 2, false}, + {adc, true, 2, false}, + {sbb, true, 2, false}, + {print, false, 0, false}, + } +) + +func mov(v *vm, bm bool, op []operand) { + op[0].set(v, bm, op[1].get(v, bm)) +} + +func cmp(v *vm, bm bool, op []operand) { + v1 := op[0].get(v, bm) + r := v1 - op[1].get(v, bm) + if r == 0 { + v.fl = flagZ + } else { + v.fl = 0 + if r > v1 { + v.fl = flagC + } + v.fl |= r & flagS + } +} + +func add(v *vm, bm bool, op []operand) { + v1 := op[0].get(v, bm) + r := v1 + op[1].get(v, bm) + v.fl = 0 + signBit := uint32(flagS) + if bm { + r &= 0xFF + signBit = 0x80 + } + if r < v1 { + v.fl |= flagC + } + if r == 0 { + v.fl |= flagZ + } else if r&signBit > 0 { + v.fl |= flagS + } + op[0].set(v, bm, r) +} + +func sub(v *vm, bm bool, op []operand) { + v1 := op[0].get(v, bm) + r := v1 - op[1].get(v, bm) + v.fl = 0 + + if r == 0 { + v.fl = flagZ + } else { + v.fl = 0 + if r > v1 { + v.fl = flagC + } + v.fl |= r & flagS + } + op[0].set(v, bm, r) +} + +func jz(v *vm, bm bool, op []operand) { + if v.fl&flagZ > 0 { + v.setIP(op[0].get(v, false)) + } +} + +func jnz(v *vm, bm bool, op []operand) { + if v.fl&flagZ == 0 { + v.setIP(op[0].get(v, false)) + } +} + +func inc(v *vm, bm bool, op []operand) { + r := op[0].get(v, bm) + 1 + if bm { + r &= 0xFF + } + op[0].set(v, bm, r) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } +} + +func dec(v *vm, bm bool, op []operand) { + r := op[0].get(v, bm) - 1 + op[0].set(v, bm, r) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } +} + +func jmp(v *vm, bm bool, op []operand) { + v.setIP(op[0].get(v, false)) +} + +func xor(v *vm, bm bool, op []operand) { + r := op[0].get(v, bm) ^ op[1].get(v, bm) + op[0].set(v, bm, r) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } +} + +func and(v *vm, bm bool, op []operand) { + r := op[0].get(v, bm) & op[1].get(v, bm) + op[0].set(v, bm, r) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } +} + +func or(v *vm, bm bool, op []operand) { + r := op[0].get(v, bm) | op[1].get(v, bm) + op[0].set(v, bm, r) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } +} + +func test(v *vm, bm bool, op []operand) { + r := op[0].get(v, bm) & op[1].get(v, bm) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } +} + +func js(v *vm, bm bool, op []operand) { + if v.fl&flagS > 0 { + v.setIP(op[0].get(v, false)) + } +} + +func jns(v *vm, bm bool, op []operand) { + if v.fl&flagS == 0 { + v.setIP(op[0].get(v, false)) + } +} + +func jb(v *vm, bm bool, op []operand) { + if v.fl&flagC > 0 { + v.setIP(op[0].get(v, false)) + } +} + +func jbe(v *vm, bm bool, op []operand) { + if v.fl&(flagC|flagZ) > 0 { + v.setIP(op[0].get(v, false)) + } +} + +func ja(v *vm, bm bool, op []operand) { + if v.fl&(flagC|flagZ) == 0 { + v.setIP(op[0].get(v, false)) + } +} + +func jae(v *vm, bm bool, op []operand) { + if v.fl&flagC == 0 { + v.setIP(op[0].get(v, false)) + } +} + +func push(v *vm, bm bool, op []operand) { + v.r[7] -= 4 + opRI(7).set(v, false, op[0].get(v, false)) + +} + +func pop(v *vm, bm bool, op []operand) { + op[0].set(v, false, opRI(7).get(v, false)) + v.r[7] += 4 +} + +func call(v *vm, bm bool, op []operand) { + v.r[7] -= 4 + opRI(7).set(v, false, v.ip+1) + v.setIP(op[0].get(v, false)) +} + +func ret(v *vm, bm bool, op []operand) { + r7 := v.r[7] + if r7 >= vmSize { + v.setIP(0xFFFFFFFF) // trigger end of program + } else { + v.setIP(binary.LittleEndian.Uint32(v.m[r7:])) + v.r[7] += 4 + } +} + +func not(v *vm, bm bool, op []operand) { + op[0].set(v, bm, ^op[0].get(v, bm)) +} + +func shl(v *vm, bm bool, op []operand) { + v1 := op[0].get(v, bm) + v2 := op[1].get(v, bm) + r := v1 << v2 + op[0].set(v, bm, r) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } + if (v1<<(v2-1))&0x80000000 > 0 { + v.fl |= flagC + } +} + +func shr(v *vm, bm bool, op []operand) { + v1 := op[0].get(v, bm) + v2 := op[1].get(v, bm) + r := v1 >> v2 + op[0].set(v, bm, r) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } + if (v1>>(v2-1))&0x1 > 0 { + v.fl |= flagC + } +} + +func sar(v *vm, bm bool, op []operand) { + v1 := op[0].get(v, bm) + v2 := op[1].get(v, bm) + r := uint32(int32(v1) >> v2) + op[0].set(v, bm, r) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } + if (v1>>(v2-1))&0x1 > 0 { + v.fl |= flagC + } +} + +func neg(v *vm, bm bool, op []operand) { + r := 0 - op[0].get(v, bm) + op[0].set(v, bm, r) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r&flagS | flagC + } +} + +func pusha(v *vm, bm bool, op []operand) { + sp := opD(v.r[7]) + for _, r := range v.r { + sp = (sp - 4) & vmMask + sp.set(v, false, r) + } + v.r[7] = uint32(sp) +} + +func popa(v *vm, bm bool, op []operand) { + sp := opD(v.r[7]) + for i := 7; i >= 0; i-- { + v.r[i] = sp.get(v, false) + sp = (sp + 4) & vmMask + } +} + +func pushf(v *vm, bm bool, op []operand) { + v.r[7] -= 4 + opRI(7).set(v, false, v.fl) +} + +func popf(v *vm, bm bool, op []operand) { + v.fl = opRI(7).get(v, false) + v.r[7] += 4 +} + +func movzx(v *vm, bm bool, op []operand) { + op[0].set(v, false, op[1].get(v, true)) +} + +func movsx(v *vm, bm bool, op []operand) { + op[0].set(v, false, uint32(int8(op[1].get(v, true)))) +} + +func xchg(v *vm, bm bool, op []operand) { + v1 := op[0].get(v, bm) + op[0].set(v, bm, op[1].get(v, bm)) + op[1].set(v, bm, v1) +} + +func mul(v *vm, bm bool, op []operand) { + r := op[0].get(v, bm) * op[1].get(v, bm) + op[0].set(v, bm, r) +} + +func div(v *vm, bm bool, op []operand) { + div := op[1].get(v, bm) + if div != 0 { + r := op[0].get(v, bm) / div + op[0].set(v, bm, r) + } +} + +func adc(v *vm, bm bool, op []operand) { + v1 := op[0].get(v, bm) + fc := v.fl & flagC + r := v1 + op[1].get(v, bm) + fc + if bm { + r &= 0xFF + } + op[0].set(v, bm, r) + + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } + if r < v1 || (r == v1 && fc > 0) { + v.fl |= flagC + } +} + +func sbb(v *vm, bm bool, op []operand) { + v1 := op[0].get(v, bm) + fc := v.fl & flagC + r := v1 - op[1].get(v, bm) - fc + if bm { + r &= 0xFF + } + op[0].set(v, bm, r) + + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } + if r > v1 || (r == v1 && fc > 0) { + v.fl |= flagC + } +} + +func print(v *vm, bm bool, op []operand) { + // TODO: ignore print for the moment +} + +func decodeArg(br *rarBitReader, byteMode bool) (operand, error) { + n, err := br.readBits(1) + if err != nil { + return nil, err + } + if n > 0 { // Register + n, err = br.readBits(3) + return opR(n), err + } + n, err = br.readBits(1) + if err != nil { + return nil, err + } + if n == 0 { // Immediate + if byteMode { + n, err = br.readBits(8) + } else { + m, err := br.readUint32() + return opI(m), err + } + return opI(n), err + } + n, err = br.readBits(1) + if err != nil { + return nil, err + } + if n == 0 { + // Register Indirect + n, err = br.readBits(3) + return opRI(n), err + } + n, err = br.readBits(1) + if err != nil { + return nil, err + } + if n == 0 { + // Base + Index Indirect + n, err = br.readBits(3) + if err != nil { + return nil, err + } + i, err := br.readUint32() + return opBI{r: uint32(n), i: i}, err + } + // Direct addressing + m, err := br.readUint32() + return opD(m & vmMask), err +} + +func fixJumpOp(op operand, off int) operand { + n, ok := op.(opI) + if !ok { + return op + } + if n >= 256 { + return n - 256 + } + if n >= 136 { + n -= 264 + } else if n >= 16 { + n -= 8 + } else if n >= 8 { + n -= 16 + } + return n + opI(off) +} + +func readCommands(br *rarBitReader) ([]command, error) { + var cmds []command + + for { + code, err := br.readBits(4) + if err != nil { + return cmds, err + } + if code&0x08 > 0 { + n, err := br.readBits(2) + if err != nil { + return cmds, err + } + code = (code<<2 | n) - 24 + } + + if code >= len(ops) { + return cmds, errInvalidVMInstruction + } + ins := ops[code] + + var com command + + if ins.byteMode { + n, err := br.readBits(1) + if err != nil { + return cmds, err + } + com.bm = n > 0 + } + com.f = ins.f + + if ins.nops > 0 { + com.op = make([]operand, ins.nops) + com.op[0], err = decodeArg(br, com.bm) + if err != nil { + return cmds, err + } + if ins.nops == 2 { + com.op[1], err = decodeArg(br, com.bm) + if err != nil { + return cmds, err + } + } else if ins.jop { + com.op[0] = fixJumpOp(com.op[0], len(cmds)) + } + } + cmds = append(cmds, com) + } +} diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE new file mode 100644 index 00000000..bd899d83 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2015, Pierre Curto +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md new file mode 100644 index 00000000..dd3c9d47 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/README.md @@ -0,0 +1,31 @@ +[![godoc](https://godoc.org/github.com/pierrec/lz4?status.png)](https://godoc.org/github.com/pierrec/lz4) +[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4) + +# lz4 +LZ4 compression and decompression in pure Go + +## Usage + +```go +import "github.com/pierrec/lz4" +``` + +## Description + +Package lz4 implements reading and writing lz4 compressed data (a frame), +as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html, +using an io.Reader (decompression) and io.Writer (compression). +It is designed to minimize memory usage while maximizing throughput by being able to +[de]compress data concurrently. + +The Reader and the Writer support concurrent processing provided the supplied buffers are +large enough (in multiples of BlockMaxSize) and there is no block dependency. +Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently. +The runtime.GOMAXPROCS() value is used to apply concurrency or not. + +Although the block level compression and decompression functions are exposed and are fully compatible +with the lz4 block format definition, they are low level and should not be used directly. +For a complete description of an lz4 compressed block, see: +http://fastcompression.blogspot.fr/2011/05/lz4-explained.html + +See https://github.com/Cyan4973/lz4 for the reference C implementation. diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go new file mode 100644 index 00000000..44e3eaaa --- /dev/null +++ b/vendor/github.com/pierrec/lz4/block.go @@ -0,0 +1,454 @@ +package lz4 + +import ( + "encoding/binary" + "errors" +) + +// block represents a frame data block. +// Used when compressing or decompressing frame blocks concurrently. +type block struct { + compressed bool + zdata []byte // compressed data + data []byte // decompressed data + offset int // offset within the data as with block dependency the 64Kb window is prepended to it + checksum uint32 // compressed data checksum + err error // error while [de]compressing +} + +var ( + // ErrInvalidSource is returned by UncompressBlock when a compressed block is corrupted. + ErrInvalidSource = errors.New("lz4: invalid source") + // ErrShortBuffer is returned by UncompressBlock, CompressBlock or CompressBlockHC when + // the supplied buffer for [de]compression is too small. + ErrShortBuffer = errors.New("lz4: short buffer") +) + +// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. +func CompressBlockBound(n int) int { + return n + n/255 + 16 +} + +// UncompressBlock decompresses the source buffer into the destination one, +// starting at the di index and returning the decompressed size. +// +// The destination buffer must be sized appropriately. +// +// An error is returned if the source data is invalid or the destination buffer is too small. +func UncompressBlock(src, dst []byte, di int) (int, error) { + si, sn, di0 := 0, len(src), di + if sn == 0 { + return 0, nil + } + + for { + // literals and match lengths (token) + lLen := int(src[si] >> 4) + mLen := int(src[si] & 0xF) + if si++; si == sn { + return di, ErrInvalidSource + } + + // literals + if lLen > 0 { + if lLen == 0xF { + for src[si] == 0xFF { + lLen += 0xFF + if si++; si == sn { + return di - di0, ErrInvalidSource + } + } + lLen += int(src[si]) + if si++; si == sn { + return di - di0, ErrInvalidSource + } + } + if len(dst)-di < lLen || si+lLen > sn { + return di - di0, ErrShortBuffer + } + di += copy(dst[di:], src[si:si+lLen]) + + if si += lLen; si >= sn { + return di - di0, nil + } + } + + if si += 2; si >= sn { + return di, ErrInvalidSource + } + offset := int(src[si-2]) | int(src[si-1])<<8 + if di-offset < 0 || offset == 0 { + return di - di0, ErrInvalidSource + } + + // match + if mLen == 0xF { + for src[si] == 0xFF { + mLen += 0xFF + if si++; si == sn { + return di - di0, ErrInvalidSource + } + } + mLen += int(src[si]) + if si++; si == sn { + return di - di0, ErrInvalidSource + } + } + // minimum match length is 4 + mLen += 4 + if len(dst)-di <= mLen { + return di - di0, ErrShortBuffer + } + + // copy the match (NB. match is at least 4 bytes long) + if mLen >= offset { + bytesToCopy := offset * (mLen / offset) + // Efficiently copy the match dst[di-offset:di] into the slice + // dst[di:di+bytesToCopy] + expanded := dst[di-offset : di+bytesToCopy] + n := offset + for n <= bytesToCopy+offset { + copy(expanded[n:], expanded[:n]) + n *= 2 + } + di += bytesToCopy + mLen -= bytesToCopy + } + + di += copy(dst[di:], dst[di-offset:di-offset+mLen]) + } +} + +// CompressBlock compresses the source buffer starting at soffet into the destination one. +// This is the fast version of LZ4 compression and also the default one. +// +// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlock(src, dst []byte, soffset int) (int, error) { + sn, dn := len(src)-mfLimit, len(dst) + if sn <= 0 || dn == 0 || soffset >= sn { + return 0, nil + } + var si, di int + + // fast scan strategy: + // we only need a hash table to store the last sequences (4 bytes) + var hashTable [1 << hashLog]int + var hashShift = uint((minMatch * 8) - hashLog) + + // Initialise the hash table with the first 64Kb of the input buffer + // (used when compressing dependent blocks) + for si < soffset { + h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + si++ + hashTable[h] = si + } + + anchor := si + fma := 1 << skipStrength + for si < sn-minMatch { + // hash the next 4 bytes (sequence)... + h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + // -1 to separate existing entries from new ones + ref := hashTable[h] - 1 + // ...and store the position of the hash in the hash table (+1 to compensate the -1 upon saving) + hashTable[h] = si + 1 + // no need to check the last 3 bytes in the first literal 4 bytes as + // this guarantees that the next match, if any, is compressed with + // a lower size, since to have some compression we must have: + // ll+ml-overlap > 1 + (ll-15)/255 + (ml-4-15)/255 + 2 (uncompressed size>compressed size) + // => ll+ml>3+2*overlap => ll+ml>= 4+2*overlap + // and by definition we do have: + // ll >= 1, ml >= 4 + // => ll+ml >= 5 + // => so overlap must be 0 + + // the sequence is new, out of bound (64kb) or not valid: try next sequence + if ref < 0 || fma&(1<>winSizeLog > 0 || + src[ref] != src[si] || + src[ref+1] != src[si+1] || + src[ref+2] != src[si+2] || + src[ref+3] != src[si+3] { + // variable step: improves performance on non-compressible data + si += fma >> skipStrength + fma++ + continue + } + // match found + fma = 1 << skipStrength + lLen := si - anchor + offset := si - ref + + // encode match length part 1 + si += minMatch + mLen := si // match length has minMatch already + for si <= sn && src[si] == src[si-offset] { + si++ + } + mLen = si - mLen + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // encode literals length + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + if di++; di == dn { + return di, ErrShortBuffer + } + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(l) + } + if di++; di == dn { + return di, ErrShortBuffer + } + + // literals + if di+lLen >= dn { + return di, ErrShortBuffer + } + di += copy(dst[di:], src[anchor:anchor+lLen]) + anchor = si + + // encode offset + if di += 2; di >= dn { + return di, ErrShortBuffer + } + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // encode match length part 2 + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(mLen) + if di++; di == dn { + return di, ErrShortBuffer + } + } + } + + if anchor == 0 { + // incompressible + return 0, nil + } + + // last literals + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + if di++; di == dn { + return di, ErrShortBuffer + } + lLen -= 0xF + for ; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(lLen) + } + if di++; di == dn { + return di, ErrShortBuffer + } + + // write literals + src = src[anchor:] + switch n := di + len(src); { + case n > dn: + return di, ErrShortBuffer + case n >= sn: + // incompressible + return 0, nil + } + di += copy(dst[di:], src) + return di, nil +} + +// CompressBlockHC compresses the source buffer starting at soffet into the destination one. +// CompressBlockHC compression ratio is better than CompressBlock but it is also slower. +// +// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlockHC(src, dst []byte, soffset int) (int, error) { + sn, dn := len(src)-mfLimit, len(dst) + if sn <= 0 || dn == 0 || soffset >= sn { + return 0, nil + } + var si, di int + + // Hash Chain strategy: + // we need a hash table and a chain table + // the chain table cannot contain more entries than the window size (64Kb entries) + var hashTable [1 << hashLog]int + var chainTable [winSize]int + var hashShift = uint((minMatch * 8) - hashLog) + + // Initialise the hash table with the first 64Kb of the input buffer + // (used when compressing dependent blocks) + for si < soffset { + h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + chainTable[si&winMask] = hashTable[h] + si++ + hashTable[h] = si + } + + anchor := si + for si < sn-minMatch { + // hash the next 4 bytes (sequence)... + h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + + // follow the chain until out of window and give the longest match + mLen := 0 + offset := 0 + for next := hashTable[h] - 1; next > 0 && next > si-winSize; next = chainTable[next&winMask] - 1 { + // the first (mLen==0) or next byte (mLen>=minMatch) at current match length must match to improve on the match length + if src[next+mLen] == src[si+mLen] { + for ml := 0; ; ml++ { + if src[next+ml] != src[si+ml] || si+ml > sn { + // found a longer match, keep its position and length + if mLen < ml && ml >= minMatch { + mLen = ml + offset = si - next + } + break + } + } + } + } + chainTable[si&winMask] = hashTable[h] + hashTable[h] = si + 1 + + // no match found + if mLen == 0 { + si++ + continue + } + + // match found + // update hash/chain tables with overlaping bytes: + // si already hashed, add everything from si+1 up to the match length + for si, ml := si+1, si+mLen; si < ml; { + h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + chainTable[si&winMask] = hashTable[h] + si++ + hashTable[h] = si + } + + lLen := si - anchor + si += mLen + mLen -= minMatch // match length does not include minMatch + + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // encode literals length + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + if di++; di == dn { + return di, ErrShortBuffer + } + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(l) + } + if di++; di == dn { + return di, ErrShortBuffer + } + + // literals + if di+lLen >= dn { + return di, ErrShortBuffer + } + di += copy(dst[di:], src[anchor:anchor+lLen]) + anchor = si + + // encode offset + if di += 2; di >= dn { + return di, ErrShortBuffer + } + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // encode match length part 2 + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(mLen) + if di++; di == dn { + return di, ErrShortBuffer + } + } + } + + if anchor == 0 { + // incompressible + return 0, nil + } + + // last literals + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + if di++; di == dn { + return di, ErrShortBuffer + } + lLen -= 0xF + for ; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(lLen) + } + if di++; di == dn { + return di, ErrShortBuffer + } + + // write literals + src = src[anchor:] + switch n := di + len(src); { + case n > dn: + return di, ErrShortBuffer + case n >= sn: + // incompressible + return 0, nil + } + di += copy(dst[di:], src) + return di, nil +} diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go new file mode 100644 index 00000000..ddb82f66 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4.go @@ -0,0 +1,105 @@ +// Package lz4 implements reading and writing lz4 compressed data (a frame), +// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html, +// using an io.Reader (decompression) and io.Writer (compression). +// It is designed to minimize memory usage while maximizing throughput by being able to +// [de]compress data concurrently. +// +// The Reader and the Writer support concurrent processing provided the supplied buffers are +// large enough (in multiples of BlockMaxSize) and there is no block dependency. +// Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently. +// The runtime.GOMAXPROCS() value is used to apply concurrency or not. +// +// Although the block level compression and decompression functions are exposed and are fully compatible +// with the lz4 block format definition, they are low level and should not be used directly. +// For a complete description of an lz4 compressed block, see: +// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html +// +// See https://github.com/Cyan4973/lz4 for the reference C implementation. +package lz4 + +import ( + "hash" + "sync" + + "github.com/pierrec/xxHash/xxHash32" +) + +const ( + // Extension is the LZ4 frame file name extension + Extension = ".lz4" + // Version is the LZ4 frame format version + Version = 1 + + frameMagic = uint32(0x184D2204) + frameSkipMagic = uint32(0x184D2A50) + + // The following constants are used to setup the compression algorithm. + minMatch = 4 // the minimum size of the match sequence size (4 bytes) + winSizeLog = 16 // LZ4 64Kb window size limit + winSize = 1 << winSizeLog + winMask = winSize - 1 // 64Kb window of previous data for dependent blocks + + // hashLog determines the size of the hash table used to quickly find a previous match position. + // Its value influences the compression speed and memory usage, the lower the faster, + // but at the expense of the compression ratio. + // 16 seems to be the best compromise. + hashLog = 16 + hashTableSize = 1 << hashLog + hashShift = uint((minMatch * 8) - hashLog) + + mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes. + skipStrength = 6 // variable step for fast scan + + hasher = uint32(2654435761) // prime number used to hash minMatch +) + +// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. +var bsMapID = map[byte]int{4: 64 << 10, 5: 256 << 10, 6: 1 << 20, 7: 4 << 20} +var bsMapValue = map[int]byte{} + +// Reversed. +func init() { + for i, v := range bsMapID { + bsMapValue[v] = i + } +} + +// Header describes the various flags that can be set on a Writer or obtained from a Reader. +// The default values match those of the LZ4 frame format definition (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). +// +// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls. +// It is the caller responsibility to check them if necessary (typically when using the Reader concurrency). +type Header struct { + BlockDependency bool // compressed blocks are dependent (one block depends on the last 64Kb of the previous one) + BlockChecksum bool // compressed blocks are checksumed + NoChecksum bool // frame checksum + BlockMaxSize int // the size of the decompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB. + Size uint64 // the frame total size. It is _not_ computed by the Writer. + HighCompression bool // use high compression (only for the Writer) + done bool // whether the descriptor was processed (Read or Write and checked) + // Removed as not supported + // Dict bool // a dictionary id is to be used + // DictID uint32 // the dictionary id read from the frame, if any. +} + +// xxhPool wraps the standard pool for xxHash items. +// Putting items back in the pool automatically resets them. +type xxhPool struct { + sync.Pool +} + +func (p *xxhPool) Get() hash.Hash32 { + return p.Pool.Get().(hash.Hash32) +} + +func (p *xxhPool) Put(h hash.Hash32) { + h.Reset() + p.Pool.Put(h) +} + +// hashPool is used by readers and writers and contains xxHash items. +var hashPool = xxhPool{ + Pool: sync.Pool{ + New: func() interface{} { return xxHash32.New(0) }, + }, +} diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go new file mode 100644 index 00000000..9f7fd604 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/reader.go @@ -0,0 +1,364 @@ +package lz4 + +import ( + "encoding/binary" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "runtime" + "sync" + "sync/atomic" +) + +// ErrInvalid is returned when the data being read is not an LZ4 archive +// (LZ4 magic number detection failed). +var ErrInvalid = errors.New("invalid lz4 data") + +// errEndOfBlock is returned by readBlock when it has reached the last block of the frame. +// It is not an error. +var errEndOfBlock = errors.New("end of block") + +// Reader implements the LZ4 frame decoder. +// The Header is set after the first call to Read(). +// The Header may change between Read() calls in case of concatenated frames. +type Reader struct { + Pos int64 // position within the source + Header + src io.Reader + checksum hash.Hash32 // frame hash + wg sync.WaitGroup // decompressing go routine wait group + data []byte // buffered decompressed data + window []byte // 64Kb decompressed data window +} + +// NewReader returns a new LZ4 frame decoder. +// No access to the underlying io.Reader is performed. +func NewReader(src io.Reader) *Reader { + return &Reader{ + src: src, + checksum: hashPool.Get(), + } +} + +// readHeader checks the frame magic number and parses the frame descriptoz. +// Skippable frames are supported even as a first frame although the LZ4 +// specifications recommends skippable frames not to be used as first frames. +func (z *Reader) readHeader(first bool) error { + defer z.checksum.Reset() + + for { + var magic uint32 + if err := binary.Read(z.src, binary.LittleEndian, &magic); err != nil { + if !first && err == io.ErrUnexpectedEOF { + return io.EOF + } + return err + } + z.Pos += 4 + if magic>>8 == frameSkipMagic>>8 { + var skipSize uint32 + if err := binary.Read(z.src, binary.LittleEndian, &skipSize); err != nil { + return err + } + z.Pos += 4 + m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize)) + z.Pos += m + if err != nil { + return err + } + continue + } + if magic != frameMagic { + return ErrInvalid + } + break + } + + // header + var buf [8]byte + if _, err := io.ReadFull(z.src, buf[:2]); err != nil { + return err + } + z.Pos += 2 + + b := buf[0] + if b>>6 != Version { + return fmt.Errorf("lz4.Read: invalid version: got %d expected %d", b>>6, Version) + } + z.BlockDependency = b>>5&1 == 0 + z.BlockChecksum = b>>4&1 > 0 + frameSize := b>>3&1 > 0 + z.NoChecksum = b>>2&1 == 0 + // z.Dict = b&1 > 0 + + bmsID := buf[1] >> 4 & 0x7 + bSize, ok := bsMapID[bmsID] + if !ok { + return fmt.Errorf("lz4.Read: invalid block max size: %d", bmsID) + } + z.BlockMaxSize = bSize + + z.checksum.Write(buf[0:2]) + + if frameSize { + if err := binary.Read(z.src, binary.LittleEndian, &z.Size); err != nil { + return err + } + z.Pos += 8 + binary.LittleEndian.PutUint64(buf[:], z.Size) + z.checksum.Write(buf[0:8]) + } + + // if z.Dict { + // if err := binary.Read(z.src, binary.LittleEndian, &z.DictID); err != nil { + // return err + // } + // z.Pos += 4 + // binary.LittleEndian.PutUint32(buf[:], z.DictID) + // z.checksum.Write(buf[0:4]) + // } + + // header checksum + if _, err := io.ReadFull(z.src, buf[:1]); err != nil { + return err + } + z.Pos++ + if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] { + return fmt.Errorf("lz4.Read: invalid header checksum: got %v expected %v", buf[0], h) + } + + z.Header.done = true + + return nil +} + +// Read decompresses data from the underlying source into the supplied buffer. +// +// Since there can be multiple streams concatenated, Header values may +// change between calls to Read(). If that is the case, no data is actually read from +// the underlying io.Reader, to allow for potential input buffer resizing. +// +// Data is buffered if the input buffer is too small, and exhausted upon successive calls. +// +// If the buffer is large enough (typically in multiples of BlockMaxSize) and there is +// no block dependency, then the data will be decompressed concurrently based on the GOMAXPROCS value. +func (z *Reader) Read(buf []byte) (n int, err error) { + if !z.Header.done { + if err = z.readHeader(true); err != nil { + return + } + } + + if len(buf) == 0 { + return + } + + // exhaust remaining data from previous Read() + if len(z.data) > 0 { + n = copy(buf, z.data) + z.data = z.data[n:] + if len(z.data) == 0 { + z.data = nil + } + return + } + + // Break up the input buffer into BlockMaxSize blocks with at least one block. + // Then decompress into each of them concurrently if possible (no dependency). + // In case of dependency, the first block will be missing the window (except on the + // very first call), the rest will have it already since it comes from the previous block. + wbuf := buf + zn := (len(wbuf) + z.BlockMaxSize - 1) / z.BlockMaxSize + zblocks := make([]block, zn) + for zi, abort := 0, uint32(0); zi < zn && atomic.LoadUint32(&abort) == 0; zi++ { + zb := &zblocks[zi] + // last block may be too small + if len(wbuf) < z.BlockMaxSize+len(z.window) { + wbuf = make([]byte, z.BlockMaxSize+len(z.window)) + } + copy(wbuf, z.window) + if zb.err = z.readBlock(wbuf, zb); zb.err != nil { + break + } + wbuf = wbuf[z.BlockMaxSize:] + if !z.BlockDependency { + z.wg.Add(1) + go z.decompressBlock(zb, &abort) + continue + } + // cannot decompress concurrently when dealing with block dependency + z.decompressBlock(zb, nil) + // the last block may not contain enough data + if len(z.window) == 0 { + z.window = make([]byte, winSize) + } + if len(zb.data) >= winSize { + copy(z.window, zb.data[len(zb.data)-winSize:]) + } else { + copy(z.window, z.window[len(zb.data):]) + copy(z.window[len(zb.data)+1:], zb.data) + } + } + z.wg.Wait() + + // since a block size may be less then BlockMaxSize, trim the decompressed buffers + for _, zb := range zblocks { + if zb.err != nil { + if zb.err == errEndOfBlock { + return n, z.close() + } + return n, zb.err + } + bLen := len(zb.data) + if !z.NoChecksum { + z.checksum.Write(zb.data) + } + m := copy(buf[n:], zb.data) + // buffer the remaining data (this is necessarily the last block) + if m < bLen { + z.data = zb.data[m:] + } + n += m + } + + return +} + +// readBlock reads an entire frame block from the frame. +// The input buffer is the one that will receive the decompressed data. +// If the end of the frame is detected, it returns the errEndOfBlock error. +func (z *Reader) readBlock(buf []byte, b *block) error { + var bLen uint32 + if err := binary.Read(z.src, binary.LittleEndian, &bLen); err != nil { + return err + } + atomic.AddInt64(&z.Pos, 4) + + switch { + case bLen == 0: + return errEndOfBlock + case bLen&(1<<31) == 0: + b.compressed = true + b.data = buf + b.zdata = make([]byte, bLen) + default: + bLen = bLen & (1<<31 - 1) + if int(bLen) > len(buf) { + return fmt.Errorf("lz4.Read: invalid block size: %d", bLen) + } + b.data = buf[:bLen] + b.zdata = buf[:bLen] + } + if _, err := io.ReadFull(z.src, b.zdata); err != nil { + return err + } + + if z.BlockChecksum { + if err := binary.Read(z.src, binary.LittleEndian, &b.checksum); err != nil { + return err + } + xxh := hashPool.Get() + defer hashPool.Put(xxh) + xxh.Write(b.zdata) + if h := xxh.Sum32(); h != b.checksum { + return fmt.Errorf("lz4.Read: invalid block checksum: got %x expected %x", h, b.checksum) + } + } + + return nil +} + +// decompressBlock decompresses a frame block. +// In case of an error, the block err is set with it and abort is set to 1. +func (z *Reader) decompressBlock(b *block, abort *uint32) { + if abort != nil { + defer z.wg.Done() + } + if b.compressed { + n := len(z.window) + m, err := UncompressBlock(b.zdata, b.data, n) + if err != nil { + if abort != nil { + atomic.StoreUint32(abort, 1) + } + b.err = err + return + } + b.data = b.data[n : n+m] + } + atomic.AddInt64(&z.Pos, int64(len(b.data))) +} + +// close validates the frame checksum (if any) and checks the next frame (if any). +func (z *Reader) close() error { + if !z.NoChecksum { + var checksum uint32 + if err := binary.Read(z.src, binary.LittleEndian, &checksum); err != nil { + return err + } + if checksum != z.checksum.Sum32() { + return fmt.Errorf("lz4.Read: invalid frame checksum: got %x expected %x", z.checksum.Sum32(), checksum) + } + } + + // get ready for the next concatenated frame, but do not change the position + pos := z.Pos + z.Reset(z.src) + z.Pos = pos + + // since multiple frames can be concatenated, check for another one + return z.readHeader(false) +} + +// Reset discards the Reader's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) { + z.Header = Header{} + z.Pos = 0 + z.src = r + z.checksum.Reset() + z.data = nil + z.window = nil +} + +// WriteTo decompresses the data from the underlying io.Reader and writes it to the io.Writer. +// Returns the number of bytes written. +func (z *Reader) WriteTo(w io.Writer) (n int64, err error) { + cpus := runtime.GOMAXPROCS(0) + var buf []byte + + // The initial buffer being nil, the first Read will be only read the compressed frame options. + // The buffer can then be sized appropriately to support maximum concurrency decompression. + // If multiple frames are concatenated, Read() will return with no data decompressed but with + // potentially changed options. The buffer will be resized accordingly, always trying to + // maximize concurrency. + for { + nsize := 0 + // the block max size can change if multiple streams are concatenated. + // Check it after every Read(). + if z.BlockDependency { + // in case of dependency, we cannot decompress concurrently, + // so allocate the minimum buffer + window size + nsize = len(z.window) + z.BlockMaxSize + } else { + // if no dependency, allocate a buffer large enough for concurrent decompression + nsize = cpus * z.BlockMaxSize + } + if nsize != len(buf) { + buf = make([]byte, nsize) + } + + m, er := z.Read(buf) + if er != nil && er != io.EOF { + return n, er + } + m, err = w.Write(buf[:m]) + n += int64(m) + if err != nil || er == io.EOF { + return + } + } +} diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go new file mode 100644 index 00000000..b1b712fe --- /dev/null +++ b/vendor/github.com/pierrec/lz4/writer.go @@ -0,0 +1,377 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "hash" + "io" + "runtime" +) + +// Writer implements the LZ4 frame encoder. +type Writer struct { + Header + dst io.Writer + checksum hash.Hash32 // frame checksum + data []byte // data to be compressed, only used when dealing with block dependency as we need 64Kb to work with + window []byte // last 64KB of decompressed data (block dependency) + blockMaxSize buffer + + zbCompressBuf []byte // buffer for compressing lz4 blocks + writeSizeBuf []byte // four-byte slice for writing checksums and sizes in writeblock +} + +// NewWriter returns a new LZ4 frame encoder. +// No access to the underlying io.Writer is performed. +// The supplied Header is checked at the first Write. +// It is ok to change it before the first Write but then not until a Reset() is performed. +func NewWriter(dst io.Writer) *Writer { + return &Writer{ + dst: dst, + checksum: hashPool.Get(), + Header: Header{ + BlockMaxSize: 4 << 20, + }, + writeSizeBuf: make([]byte, 4), + } +} + +// writeHeader builds and writes the header (magic+header) to the underlying io.Writer. +func (z *Writer) writeHeader() error { + // Default to 4Mb if BlockMaxSize is not set + if z.Header.BlockMaxSize == 0 { + z.Header.BlockMaxSize = 4 << 20 + } + // the only option that need to be validated + bSize, ok := bsMapValue[z.Header.BlockMaxSize] + if !ok { + return fmt.Errorf("lz4: invalid block max size: %d", z.Header.BlockMaxSize) + } + + // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes + // Size and DictID are optional + var buf [19]byte + + // set the fixed size data: magic number, block max size and flags + binary.LittleEndian.PutUint32(buf[0:], frameMagic) + flg := byte(Version << 6) + if !z.Header.BlockDependency { + flg |= 1 << 5 + } + if z.Header.BlockChecksum { + flg |= 1 << 4 + } + if z.Header.Size > 0 { + flg |= 1 << 3 + } + if !z.Header.NoChecksum { + flg |= 1 << 2 + } + // if z.Header.Dict { + // flg |= 1 + // } + buf[4] = flg + buf[5] = bSize << 4 + + // current buffer size: magic(4) + flags(1) + block max size (1) + n := 6 + // optional items + if z.Header.Size > 0 { + binary.LittleEndian.PutUint64(buf[n:], z.Header.Size) + n += 8 + } + // if z.Header.Dict { + // binary.LittleEndian.PutUint32(buf[n:], z.Header.DictID) + // n += 4 + // } + + // header checksum includes the flags, block max size and optional Size and DictID + z.checksum.Write(buf[4:n]) + buf[n] = byte(z.checksum.Sum32() >> 8 & 0xFF) + z.checksum.Reset() + + // header ready, write it out + if _, err := z.dst.Write(buf[0 : n+1]); err != nil { + return err + } + z.Header.done = true + + // initialize buffers dependent on header info + z.zbCompressBuf = make([]byte, winSize+z.BlockMaxSize) + + return nil +} + +// Write compresses data from the supplied buffer into the underlying io.Writer. +// Write does not return until the data has been written. +// +// If the input buffer is large enough (typically in multiples of BlockMaxSize) +// the data will be compressed concurrently. +// +// Write never buffers any data unless in BlockDependency mode where it may +// do so until it has 64Kb of data, after which it never buffers any. +func (z *Writer) Write(buf []byte) (n int, err error) { + if !z.Header.done { + if err = z.writeHeader(); err != nil { + return + } + } + + if len(buf) == 0 { + return + } + + if !z.NoChecksum { + z.checksum.Write(buf) + } + + // with block dependency, require at least 64Kb of data to work with + // not having 64Kb only matters initially to setup the first window + bl := 0 + if z.BlockDependency && len(z.window) == 0 { + bl = len(z.data) + z.data = append(z.data, buf...) + if len(z.data) < winSize { + return len(buf), nil + } + buf = z.data + z.data = nil + } + + // Break up the input buffer into BlockMaxSize blocks, provisioning the left over block. + // Then compress into each of them concurrently if possible (no dependency). + var ( + zb block + wbuf = buf + zn = len(wbuf) / z.BlockMaxSize + zi = 0 + leftover = len(buf) % z.BlockMaxSize + ) + +loop: + for zi < zn { + if z.BlockDependency { + if zi == 0 { + // first block does not have the window + zb.data = append(z.window, wbuf[:z.BlockMaxSize]...) + zb.offset = len(z.window) + wbuf = wbuf[z.BlockMaxSize-winSize:] + } else { + // set the uncompressed data including the window from previous block + zb.data = wbuf[:z.BlockMaxSize+winSize] + zb.offset = winSize + wbuf = wbuf[z.BlockMaxSize:] + } + } else { + zb.data = wbuf[:z.BlockMaxSize] + wbuf = wbuf[z.BlockMaxSize:] + } + + goto write + } + + // left over + if leftover > 0 { + zb = block{data: wbuf} + if z.BlockDependency { + if zn == 0 { + zb.data = append(z.window, zb.data...) + zb.offset = len(z.window) + } else { + zb.offset = winSize + } + } + + leftover = 0 + goto write + } + + if z.BlockDependency { + if len(z.window) == 0 { + z.window = make([]byte, winSize) + } + // last buffer may be shorter than the window + if len(buf) >= winSize { + copy(z.window, buf[len(buf)-winSize:]) + } else { + copy(z.window, z.window[len(buf):]) + copy(z.window[len(buf)+1:], buf) + } + } + + return + +write: + zb = z.compressBlock(zb) + _, err = z.writeBlock(zb) + + written := len(zb.data) + if bl > 0 { + if written >= bl { + written -= bl + bl = 0 + } else { + bl -= written + written = 0 + } + } + + n += written + // remove the window in zb.data + if z.BlockDependency { + if zi == 0 { + n -= len(z.window) + } else { + n -= winSize + } + } + if err != nil { + return + } + zi++ + goto loop +} + +// compressBlock compresses a block. +func (z *Writer) compressBlock(zb block) block { + // compressed block size cannot exceed the input's + var ( + n int + err error + zbuf = z.zbCompressBuf + ) + if z.HighCompression { + n, err = CompressBlockHC(zb.data, zbuf, zb.offset) + } else { + n, err = CompressBlock(zb.data, zbuf, zb.offset) + } + + // compressible and compressed size smaller than decompressed: ok! + if err == nil && n > 0 && len(zb.zdata) < len(zb.data) { + zb.compressed = true + zb.zdata = zbuf[:n] + } else { + zb.compressed = false + zb.zdata = zb.data[zb.offset:] + } + + if z.BlockChecksum { + xxh := hashPool.Get() + xxh.Write(zb.zdata) + zb.checksum = xxh.Sum32() + hashPool.Put(xxh) + } + + return zb +} + +// writeBlock writes a frame block to the underlying io.Writer (size, data). +func (z *Writer) writeBlock(zb block) (int, error) { + bLen := uint32(len(zb.zdata)) + if !zb.compressed { + bLen |= 1 << 31 + } + + n := 0 + + binary.LittleEndian.PutUint32(z.writeSizeBuf, bLen) + n, err := z.dst.Write(z.writeSizeBuf) + if err != nil { + return n, err + } + + m, err := z.dst.Write(zb.zdata) + n += m + if err != nil { + return n, err + } + + if z.BlockChecksum { + binary.LittleEndian.PutUint32(z.writeSizeBuf, zb.checksum) + m, err := z.dst.Write(z.writeSizeBuf) + n += m + + if err != nil { + return n, err + } + } + + return n, nil +} + +// Flush flushes any pending compressed data to the underlying writer. +// Flush does not return until the data has been written. +// If the underlying writer returns an error, Flush returns that error. +// +// Flush is only required when in BlockDependency mode and the total of +// data written is less than 64Kb. +func (z *Writer) Flush() error { + if len(z.data) == 0 { + return nil + } + + zb := z.compressBlock(block{data: z.data}) + if _, err := z.writeBlock(zb); err != nil { + return err + } + return nil +} + +// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return err + } + } + + // buffered data for the block dependency window + if z.BlockDependency && len(z.data) > 0 { + zb := block{data: z.data} + if _, err := z.writeBlock(z.compressBlock(zb)); err != nil { + return err + } + } + + if err := binary.Write(z.dst, binary.LittleEndian, uint32(0)); err != nil { + return err + } + if !z.NoChecksum { + if err := binary.Write(z.dst, binary.LittleEndian, z.checksum.Sum32()); err != nil { + return err + } + } + return nil +} + +// Reset clears the state of the Writer z such that it is equivalent to its +// initial state from NewWriter, but instead writing to w. +// No access to the underlying io.Writer is performed. +func (z *Writer) Reset(w io.Writer) { + z.Header = Header{} + z.dst = w + z.checksum.Reset() + z.data = nil + z.window = nil +} + +// ReadFrom compresses the data read from the io.Reader and writes it to the underlying io.Writer. +// Returns the number of bytes read. +// It does not close the Writer. +func (z *Writer) ReadFrom(r io.Reader) (n int64, err error) { + cpus := runtime.GOMAXPROCS(0) + buf := make([]byte, cpus*z.BlockMaxSize) + for { + m, er := io.ReadFull(r, buf) + n += int64(m) + if er == nil || er == io.ErrUnexpectedEOF || er == io.EOF { + if _, err = z.Write(buf[:m]); err != nil { + return + } + if er == nil { + continue + } + return + } + return n, er + } +} diff --git a/vendor/github.com/pierrec/xxHash/LICENSE b/vendor/github.com/pierrec/xxHash/LICENSE new file mode 100644 index 00000000..c1418f3f --- /dev/null +++ b/vendor/github.com/pierrec/xxHash/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2014, Pierre Curto +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go b/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go new file mode 100644 index 00000000..ff58256b --- /dev/null +++ b/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go @@ -0,0 +1,212 @@ +// Package xxHash32 implements the very fast xxHash hashing algorithm (32 bits version). +// (https://github.com/Cyan4973/xxHash/) +package xxHash32 + +import "hash" + +const ( + prime32_1 = 2654435761 + prime32_2 = 2246822519 + prime32_3 = 3266489917 + prime32_4 = 668265263 + prime32_5 = 374761393 +) + +type xxHash struct { + seed uint32 + v1 uint32 + v2 uint32 + v3 uint32 + v4 uint32 + totalLen uint64 + buf [16]byte + bufused int +} + +// New returns a new Hash32 instance. +func New(seed uint32) hash.Hash32 { + xxh := &xxHash{seed: seed} + xxh.Reset() + return xxh +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (xxh xxHash) Sum(b []byte) []byte { + h32 := xxh.Sum32() + return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) +} + +// Reset resets the Hash to its initial state. +func (xxh *xxHash) Reset() { + xxh.v1 = xxh.seed + prime32_1 + prime32_2 + xxh.v2 = xxh.seed + prime32_2 + xxh.v3 = xxh.seed + xxh.v4 = xxh.seed - prime32_1 + xxh.totalLen = 0 + xxh.bufused = 0 +} + +// Size returns the number of bytes returned by Sum(). +func (xxh *xxHash) Size() int { + return 4 +} + +// BlockSize gives the minimum number of bytes accepted by Write(). +func (xxh *xxHash) BlockSize() int { + return 1 +} + +// Write adds input bytes to the Hash. +// It never returns an error. +func (xxh *xxHash) Write(input []byte) (int, error) { + n := len(input) + m := xxh.bufused + + xxh.totalLen += uint64(n) + + r := len(xxh.buf) - m + if n < r { + copy(xxh.buf[m:], input) + xxh.bufused += len(input) + return n, nil + } + + p := 0 + if m > 0 { + // some data left from previous update + copy(xxh.buf[xxh.bufused:], input[:r]) + xxh.bufused += len(input) - r + + // fast rotl(13) + xxh.v1 = rol13(xxh.v1+u32(xxh.buf[:])*prime32_2) * prime32_1 + xxh.v2 = rol13(xxh.v2+u32(xxh.buf[4:])*prime32_2) * prime32_1 + xxh.v3 = rol13(xxh.v3+u32(xxh.buf[8:])*prime32_2) * prime32_1 + xxh.v4 = rol13(xxh.v4+u32(xxh.buf[12:])*prime32_2) * prime32_1 + p = r + xxh.bufused = 0 + } + + // Causes compiler to work directly from registers instead of stack: + v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4 + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+u32(sub[:])*prime32_2) * prime32_1 + v2 = rol13(v2+u32(sub[4:])*prime32_2) * prime32_1 + v3 = rol13(v3+u32(sub[8:])*prime32_2) * prime32_1 + v4 = rol13(v4+u32(sub[12:])*prime32_2) * prime32_1 + } + xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4 + + copy(xxh.buf[xxh.bufused:], input[p:]) + xxh.bufused += len(input) - p + + return n, nil +} + +// Sum32 returns the 32 bits Hash value. +func (xxh *xxHash) Sum32() uint32 { + h32 := uint32(xxh.totalLen) + if xxh.totalLen >= 16 { + h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4) + } else { + h32 += xxh.seed + prime32_5 + } + + p := 0 + n := xxh.bufused + for n := n - 4; p <= n; p += 4 { + h32 += u32(xxh.buf[p:p+4]) * prime32_3 + h32 = rol17(h32) * prime32_4 + } + for ; p < n; p++ { + h32 += uint32(xxh.buf[p]) * prime32_5 + h32 = rol11(h32) * prime32_1 + } + + h32 ^= h32 >> 15 + h32 *= prime32_2 + h32 ^= h32 >> 13 + h32 *= prime32_3 + h32 ^= h32 >> 16 + + return h32 +} + +// Checksum returns the 32bits Hash value. +func Checksum(input []byte, seed uint32) uint32 { + n := len(input) + h32 := uint32(n) + + if n < 16 { + h32 += seed + prime32_5 + } else { + v1 := seed + prime32_1 + prime32_2 + v2 := seed + prime32_2 + v3 := seed + v4 := seed - prime32_1 + p := 0 + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+u32(sub[:])*prime32_2) * prime32_1 + v2 = rol13(v2+u32(sub[4:])*prime32_2) * prime32_1 + v3 = rol13(v3+u32(sub[8:])*prime32_2) * prime32_1 + v4 = rol13(v4+u32(sub[12:])*prime32_2) * prime32_1 + } + input = input[p:] + n -= p + h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + } + + p := 0 + for n := n - 4; p <= n; p += 4 { + h32 += u32(input[p:p+4]) * prime32_3 + h32 = rol17(h32) * prime32_4 + } + for p < n { + h32 += uint32(input[p]) * prime32_5 + h32 = rol11(h32) * prime32_1 + p++ + } + + h32 ^= h32 >> 15 + h32 *= prime32_2 + h32 ^= h32 >> 13 + h32 *= prime32_3 + h32 ^= h32 >> 16 + + return h32 +} + +func u32(buf []byte) uint32 { + // go compiler recognizes this pattern and optimizes it on little endian platforms + return uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 +} + +func rol1(u uint32) uint32 { + return u<<1 | u>>31 +} + +func rol7(u uint32) uint32 { + return u<<7 | u>>25 +} + +func rol11(u uint32) uint32 { + return u<<11 | u>>21 +} + +func rol12(u uint32) uint32 { + return u<<12 | u>>20 +} + +func rol13(u uint32) uint32 { + return u<<13 | u>>19 +} + +func rol17(u uint32) uint32 { + return u<<17 | u>>15 +} + +func rol18(u uint32) uint32 { + return u<<18 | u>>14 +} diff --git a/vendor/github.com/tmthrgd/go-bindata/LICENSE b/vendor/github.com/tmthrgd/go-bindata/LICENSE new file mode 100644 index 00000000..62c39157 --- /dev/null +++ b/vendor/github.com/tmthrgd/go-bindata/LICENSE @@ -0,0 +1,54 @@ +Copyright (c) 2017, Tom Thorogood. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Tom Thorogood nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---- Portions of the source code are also covered by the following license: ---- + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/tmthrgd/go-bindata/restore/restore.go b/vendor/github.com/tmthrgd/go-bindata/restore/restore.go new file mode 100644 index 00000000..4030004d --- /dev/null +++ b/vendor/github.com/tmthrgd/go-bindata/restore/restore.go @@ -0,0 +1,57 @@ +// Copyright 2017 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a Modified +// BSD License that can be found in the LICENSE file. + +// Package restore provides the restore API that was +// previously embedded into the generated output. +package restore + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// AssetAndInfo represents the generated AssetAndInfo method. +type AssetAndInfo func(name string) (data []byte, info os.FileInfo, err error) + +// AssetDir represents the generated AssetDir method. +type AssetDir func(name string) (children []string, err error) + +// Asset restores an asset under the given directory +func Asset(dir, name string, assetAndInfo AssetAndInfo) error { + path := filepath.Join(dir, filepath.FromSlash(name)) + + data, info, err := assetAndInfo(name) + if err != nil { + return err + } + + if err = os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + + if err = ioutil.WriteFile(path, data, info.Mode()); err != nil { + return err + } + + return os.Chtimes(path, info.ModTime(), info.ModTime()) +} + +// Assets restores an asset under the given directory recursively +func Assets(dir, name string, assetDir AssetDir, assetAndInfo AssetAndInfo) error { + children, err := assetDir(name) + // File + if err != nil { + return Asset(dir, name, assetAndInfo) + } + + // Dir + for _, child := range children { + if err = Assets(dir, filepath.Join(name, child), assetDir, assetAndInfo); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/LICENSE b/vendor/github.com/ulikunitz/xz/LICENSE new file mode 100644 index 00000000..58ebdc16 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2014-2016 Ulrich Kunitz +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* My name, Ulrich Kunitz, may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ulikunitz/xz/README.md b/vendor/github.com/ulikunitz/xz/README.md new file mode 100644 index 00000000..969ae7a0 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/README.md @@ -0,0 +1,71 @@ +# Package xz + +This Go language package supports the reading and writing of xz +compressed streams. It includes also a gxz command for compressing and +decompressing data. The package is completely written in Go and doesn't +have any dependency on any C code. + +The package is currently under development. There might be bugs and APIs +are not considered stable. At this time the package cannot compete with +the xz tool regarding compression speed and size. The algorithms there +have been developed over a long time and are highly optimized. However +there are a number of improvements planned and I'm very optimistic about +parallel compression and decompression. Stay tuned! + +# Using the API + +The following example program shows how to use the API. + + package main + + import ( + "bytes" + "io" + "log" + "os" + + "github.com/ulikunitz/xz" + ) + + func main() { + const text = "The quick brown fox jumps over the lazy dog.\n" + var buf bytes.Buffer + // compress text + w, err := xz.NewWriter(&buf) + if err != nil { + log.Fatalf("xz.NewWriter error %s", err) + } + if _, err := io.WriteString(w, text); err != nil { + log.Fatalf("WriteString error %s", err) + } + if err := w.Close(); err != nil { + log.Fatalf("w.Close error %s", err) + } + // decompress buffer and write output to stdout + r, err := xz.NewReader(&buf) + if err != nil { + log.Fatalf("NewReader error %s", err) + } + if _, err = io.Copy(os.Stdout, r); err != nil { + log.Fatalf("io.Copy error %s", err) + } + } + +# Using the gxz compression tool + +The package includes a gxz command line utility for compression and +decompression. + +Use following command for installation: + + $ go get github.com/ulikunitz/xz/cmd/gxz + +To test it call the following command. + + $ gxz bigfile + +After some time a much smaller file bigfile.xz will replace bigfile. +To decompress it use the following command. + + $ gxz -d bigfile.xz + diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md new file mode 100644 index 00000000..7b34c0ca --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -0,0 +1,315 @@ +# TODO list + +## Release v0.6 + +1. Review encoder and check for lzma improvements under xz. +2. Fix binary tree matcher. +3. Compare compression ratio with xz tool using comparable parameters + and optimize parameters +4. Do some optimizations + - rename operation action and make it a simple type of size 8 + - make maxMatches, wordSize parameters + - stop searching after a certain length is found (parameter sweetLen) + +## Release v0.7 + +1. Optimize code +2. Do statistical analysis to get linear presets. +3. Test sync.Pool compatability for xz and lzma Writer and Reader +3. Fuzz optimized code. + +## Release v0.8 + +1. Support parallel go routines for writing and reading xz files. +2. Support a ReaderAt interface for xz files with small block sizes. +3. Improve compatibility between gxz and xz +4. Provide manual page for gxz + +## Release v0.9 + +1. Improve documentation +2. Fuzz again + +## Release v1.0 + +1. Full functioning gxz +2. Add godoc URL to README.md (godoc.org) +3. Resolve all issues. +4. Define release candidates. +5. Public announcement. + +## Package lzma + +### Release v0.6 + +- Rewrite Encoder into a simple greedy one-op-at-a-time encoder + including + + simple scan at the dictionary head for the same byte + + use the killer byte (requiring matches to get longer, the first + test should be the byte that would make the match longer) + + +## Optimizations + +- There may be a lot of false sharing in lzma.State; check whether this + can be improved by reorganizing the internal structure of it. +- Check whether batching encoding and decoding improves speed. + +### DAG optimizations + +- Use full buffer to create minimal bit-length above range encoder. +- Might be too slow (see v0.4) + +### Different match finders + +- hashes with 2, 3 characters additional to 4 characters +- binary trees with 2-7 characters (uint64 as key, use uint32 as + pointers into a an array) +- rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers + into an array with bit-steeling for the colors) + +## Release Procedure + +- execute goch -l for all packages; probably with lower param like 0.5. +- check orthography with gospell +- Write release notes in doc/relnotes. +- Update README.md +- xb copyright . in xz directory to ensure all new files have Copyright + header +- VERSION= go generate github.com/ulikunitz/xz/... to update + version files +- Execute test for Linux/amd64, Linux/x86 and Windows/amd64. +- Update TODO.md - write short log entry +- git checkout master && git merge dev +- git tag -a +- git push + +## Log + +### 2017-06-05 + +Release v0.5.4 fixes issues #15 of another problem with the padding size +check for the xz block header. I removed the check completely. + +### 2017-02-15 + +Release v0.5.3 fixes issue #12 regarding the decompression of an empty +XZ stream. Many thanks to Tomasz KÅ‚ak, who reported the issue. + +### 2016-12-02 + +Release v0.5.2 became necessary to allow the decoding of xz files with +4-byte padding in the block header. Many thanks to Greg, who reported +the issue. + +### 2016-07-23 + +Release v0.5.1 became necessary to fix problems with 32-bit platforms. +Many thanks to Bruno Brigas, who reported the issue. + +### 2016-07-04 + +Release v0.5 provides improvements to the compressor and provides support for +the decompression of xz files with multiple xz streams. + +### 2016-01-31 + +Another compression rate increase by checking the byte at length of the +best match first, before checking the whole prefix. This makes the +compressor even faster. We have now a large time budget to beat the +compression ratio of the xz tool. For enwik8 we have now over 40 seconds +to reduce the compressed file size for another 7 MiB. + +### 2016-01-30 + +I simplified the encoder. Speed and compression rate increased +dramatically. A high compression rate affects also the decompression +speed. The approach with the buffer and optimizing for operation +compression rate has not been successful. Going for the maximum length +appears to be the best approach. + +### 2016-01-28 + +The release v0.4 is ready. It provides a working xz implementation, +which is rather slow, but works and is interoperable with the xz tool. +It is an important milestone. + +### 2016-01-10 + +I have the first working implementation of an xz reader and writer. I'm +happy about reaching this milestone. + +### 2015-12-02 + +I'm now ready to implement xz because, I have a working LZMA2 +implementation. I decided today that v0.4 will use the slow encoder +using the operations buffer to be able to go back, if I intend to do so. + +### 2015-10-21 + +I have restarted the work on the library. While trying to implement +LZMA2, I discovered that I need to resimplify the encoder and decoder +functions. The option approach is too complicated. Using a limited byte +writer and not caring for written bytes at all and not to try to handle +uncompressed data simplifies the LZMA encoder and decoder much. +Processing uncompressed data and handling limits is a feature of the +LZMA2 format not of LZMA. + +I learned an interesting method from the LZO format. If the last copy is +too far away they are moving the head one 2 bytes and not 1 byte to +reduce processing times. + +### 2015-08-26 + +I have now reimplemented the lzma package. The code is reasonably fast, +but can still be optimized. The next step is to implement LZMA2 and then +xz. + +### 2015-07-05 + +Created release v0.3. The version is the foundation for a full xz +implementation that is the target of v0.4. + +### 2015-06-11 + +The gflag package has been developed because I couldn't use flag and +pflag for a fully compatible support of gzip's and lzma's options. It +seems to work now quite nicely. + +### 2015-06-05 + +The overflow issue was interesting to research, however Henry S. Warren +Jr. Hacker's Delight book was very helpful as usual and had the issue +explained perfectly. Fefe's information on his website was based on the +C FAQ and quite bad, because it didn't address the issue of -MININT == +MININT. + +### 2015-06-04 + +It has been a productive day. I improved the interface of lzma.Reader +and lzma.Writer and fixed the error handling. + +### 2015-06-01 + +By computing the bit length of the LZMA operations I was able to +improve the greedy algorithm implementation. By using an 8 MByte buffer +the compression rate was not as good as for xz but already better then +gzip default. + +Compression is currently slow, but this is something we will be able to +improve over time. + +### 2015-05-26 + +Checked the license of ogier/pflag. The binary lzmago binary should +include the license terms for the pflag library. + +I added the endorsement clause as used by Google for the Go sources the +LICENSE file. + +### 2015-05-22 + +The package lzb contains now the basic implementation for creating or +reading LZMA byte streams. It allows the support for the implementation +of the DAG-shortest-path algorithm for the compression function. + +### 2015-04-23 + +Completed yesterday the lzbase classes. I'm a little bit concerned that +using the components may require too much code, but on the other hand +there is a lot of flexibility. + +### 2015-04-22 + +Implemented Reader and Writer during the Bayern game against Porto. The +second half gave me enough time. + +### 2015-04-21 + +While showering today morning I discovered that the design for OpEncoder +and OpDecoder doesn't work, because encoding/decoding might depend on +the current status of the dictionary. This is not exactly the right way +to start the day. + +Therefore we need to keep the Reader and Writer design. This time around +we simplify it by ignoring size limits. These can be added by wrappers +around the Reader and Writer interfaces. The Parameters type isn't +needed anymore. + +However I will implement a ReaderState and WriterState type to use +static typing to ensure the right State object is combined with the +right lzbase.Reader and lzbase.Writer. + +As a start I have implemented ReaderState and WriterState to ensure +that the state for reading is only used by readers and WriterState only +used by Writers. + +### 2015-04-20 + +Today I implemented the OpDecoder and tested OpEncoder and OpDecoder. + +### 2015-04-08 + +Came up with a new simplified design for lzbase. I implemented already +the type State that replaces OpCodec. + +### 2015-04-06 + +The new lzma package is now fully usable and lzmago is using it now. The +old lzma package has been completely removed. + +### 2015-04-05 + +Implemented lzma.Reader and tested it. + +### 2015-04-04 + +Implemented baseReader by adapting code form lzma.Reader. + +### 2015-04-03 + +The opCodec has been copied yesterday to lzma2. opCodec has a high +number of dependencies on other files in lzma2. Therefore I had to copy +almost all files from lzma. + +### 2015-03-31 + +Removed only a TODO item. + +However in Francesco Campoy's presentation "Go for Javaneros +(Javaïstes?)" is the the idea that using an embedded field E, all the +methods of E will be defined on T. If E is an interface T satisfies E. + +https://talks.golang.org/2014/go4java.slide#51 + +I have never used this, but it seems to be a cool idea. + +### 2015-03-30 + +Finished the type writerDict and wrote a simple test. + +### 2015-03-25 + +I started to implement the writerDict. + +### 2015-03-24 + +After thinking long about the LZMA2 code and several false starts, I +have now a plan to create a self-sufficient lzma2 package that supports +the classic LZMA format as well as LZMA2. The core idea is to support a +baseReader and baseWriter type that support the basic LZMA stream +without any headers. Both types must support the reuse of dictionaries +and the opCodec. + +### 2015-01-10 + +1. Implemented simple lzmago tool +2. Tested tool against large 4.4G file + - compression worked correctly; tested decompression with lzma + - decompression hits a full buffer condition +3. Fixed a bug in the compressor and wrote a test for it +4. Executed full cycle for 4.4 GB file; performance can be improved ;-) + +### 2015-01-11 + +- Release v0.2 because of the working LZMA encoder and decoder diff --git a/vendor/github.com/ulikunitz/xz/bits.go b/vendor/github.com/ulikunitz/xz/bits.go new file mode 100644 index 00000000..fadc1a59 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/bits.go @@ -0,0 +1,74 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "io" +) + +// putUint32LE puts the little-endian representation of x into the first +// four bytes of p. +func putUint32LE(p []byte, x uint32) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) +} + +// putUint64LE puts the little-endian representation of x into the first +// eight bytes of p. +func putUint64LE(p []byte, x uint64) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) + p[4] = byte(x >> 32) + p[5] = byte(x >> 40) + p[6] = byte(x >> 48) + p[7] = byte(x >> 56) +} + +// uint32LE converts a little endian representation to an uint32 value. +func uint32LE(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | + uint32(p[3])<<24 +} + +// putUvarint puts a uvarint representation of x into the byte slice. +func putUvarint(p []byte, x uint64) int { + i := 0 + for x >= 0x80 { + p[i] = byte(x) | 0x80 + x >>= 7 + i++ + } + p[i] = byte(x) + return i + 1 +} + +// errOverflow indicates an overflow of the 64-bit unsigned integer. +var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer") + +// readUvarint reads a uvarint from the given byte reader. +func readUvarint(r io.ByteReader) (x uint64, n int, err error) { + var s uint + i := 0 + for { + b, err := r.ReadByte() + if err != nil { + return x, i, err + } + i++ + if b < 0x80 { + if i > 10 || i == 10 && b > 1 { + return x, i, errOverflowU64 + } + return x | uint64(b)< 0 { + k = 4 - k + } + return k +} + +/*** Header ***/ + +// headerMagic stores the magic bytes for the header +var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00} + +// HeaderLen provides the length of the xz file header. +const HeaderLen = 12 + +// Constants for the checksum methods supported by xz. +const ( + CRC32 byte = 0x1 + CRC64 = 0x4 + SHA256 = 0xa +) + +// errInvalidFlags indicates that flags are invalid. +var errInvalidFlags = errors.New("xz: invalid flags") + +// verifyFlags returns the error errInvalidFlags if the value is +// invalid. +func verifyFlags(flags byte) error { + switch flags { + case CRC32, CRC64, SHA256: + return nil + default: + return errInvalidFlags + } +} + +// flagstrings maps flag values to strings. +var flagstrings = map[byte]string{ + CRC32: "CRC-32", + CRC64: "CRC-64", + SHA256: "SHA-256", +} + +// flagString returns the string representation for the given flags. +func flagString(flags byte) string { + s, ok := flagstrings[flags] + if !ok { + return "invalid" + } + return s +} + +// newHashFunc returns a function that creates hash instances for the +// hash method encoded in flags. +func newHashFunc(flags byte) (newHash func() hash.Hash, err error) { + switch flags { + case CRC32: + newHash = newCRC32 + case CRC64: + newHash = newCRC64 + case SHA256: + newHash = sha256.New + default: + err = errInvalidFlags + } + return +} + +// header provides the actual content of the xz file header: the flags. +type header struct { + flags byte +} + +// Errors returned by readHeader. +var errHeaderMagic = errors.New("xz: invalid header magic bytes") + +// ValidHeader checks whether data is a correct xz file header. The +// length of data must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + err := h.UnmarshalBinary(data) + return err == nil +} + +// String returns a string representation of the flags. +func (h header) String() string { + return flagString(h.flags) +} + +// UnmarshalBinary reads header from the provided data slice. +func (h *header) UnmarshalBinary(data []byte) error { + // header length + if len(data) != HeaderLen { + return errors.New("xz: wrong file header length") + } + + // magic header + if !bytes.Equal(headerMagic, data[:6]) { + return errHeaderMagic + } + + // checksum + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + if uint32LE(data[8:]) != crc.Sum32() { + return errors.New("xz: invalid checksum for file header") + } + + // stream flags + if data[6] != 0 { + return errInvalidFlags + } + flags := data[7] + if err := verifyFlags(flags); err != nil { + return err + } + + h.flags = flags + return nil +} + +// MarshalBinary generates the xz file header. +func (h *header) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(h.flags); err != nil { + return nil, err + } + + data = make([]byte, 12) + copy(data, headerMagic) + data[7] = h.flags + + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + putUint32LE(data[8:], crc.Sum32()) + + return data, nil +} + +/*** Footer ***/ + +// footerLen defines the length of the footer. +const footerLen = 12 + +// footerMagic contains the footer magic bytes. +var footerMagic = []byte{'Y', 'Z'} + +// footer represents the content of the xz file footer. +type footer struct { + indexSize int64 + flags byte +} + +// String prints a string representation of the footer structure. +func (f footer) String() string { + return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize) +} + +// Minimum and maximum for the size of the index (backward size). +const ( + minIndexSize = 4 + maxIndexSize = (1 << 32) * 4 +) + +// MarshalBinary converts footer values into an xz file footer. Note +// that the footer value is checked for correctness. +func (f *footer) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(f.flags); err != nil { + return nil, err + } + if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) { + return nil, errors.New("xz: index size out of range") + } + if f.indexSize%4 != 0 { + return nil, errors.New( + "xz: index size not aligned to four bytes") + } + + data = make([]byte, footerLen) + + // backward size (index size) + s := (f.indexSize / 4) - 1 + putUint32LE(data[4:], uint32(s)) + // flags + data[9] = f.flags + // footer magic + copy(data[10:], footerMagic) + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + putUint32LE(data, crc.Sum32()) + + return data, nil +} + +// UnmarshalBinary sets the footer value by unmarshalling an xz file +// footer. +func (f *footer) UnmarshalBinary(data []byte) error { + if len(data) != footerLen { + return errors.New("xz: wrong footer length") + } + + // magic bytes + if !bytes.Equal(data[10:], footerMagic) { + return errors.New("xz: footer magic invalid") + } + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + if uint32LE(data) != crc.Sum32() { + return errors.New("xz: footer checksum error") + } + + var g footer + // backward size (index size) + g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4 + + // flags + if data[8] != 0 { + return errInvalidFlags + } + g.flags = data[9] + if err := verifyFlags(g.flags); err != nil { + return err + } + + *f = g + return nil +} + +/*** Block Header ***/ + +// blockHeader represents the content of an xz block header. +type blockHeader struct { + compressedSize int64 + uncompressedSize int64 + filters []filter +} + +// String converts the block header into a string. +func (h blockHeader) String() string { + var buf bytes.Buffer + first := true + if h.compressedSize >= 0 { + fmt.Fprintf(&buf, "compressed size %d", h.compressedSize) + first = false + } + if h.uncompressedSize >= 0 { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize) + first = false + } + for _, f := range h.filters { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "filter %s", f) + first = false + } + return buf.String() +} + +// Masks for the block flags. +const ( + filterCountMask = 0x03 + compressedSizePresent = 0x40 + uncompressedSizePresent = 0x80 + reservedBlockFlags = 0x3C +) + +// errIndexIndicator signals that an index indicator (0x00) has been found +// instead of an expected block header indicator. +var errIndexIndicator = errors.New("xz: found index indicator") + +// readBlockHeader reads the block header. +func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) { + var buf bytes.Buffer + buf.Grow(20) + + // block header size + z, err := io.CopyN(&buf, r, 1) + n = int(z) + if err != nil { + return nil, n, err + } + s := buf.Bytes()[0] + if s == 0 { + return nil, n, errIndexIndicator + } + + // read complete header + headerLen := (int(s) + 1) * 4 + buf.Grow(headerLen - 1) + z, err = io.CopyN(&buf, r, int64(headerLen-1)) + n += int(z) + if err != nil { + return nil, n, err + } + + // unmarshal block header + h = new(blockHeader) + if err = h.UnmarshalBinary(buf.Bytes()); err != nil { + return nil, n, err + } + + return h, n, nil +} + +// readSizeInBlockHeader reads the uncompressed or compressed size +// fields in the block header. The present value informs the function +// whether the respective field is actually present in the header. +func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) { + if !present { + return -1, nil + } + x, _, err := readUvarint(r) + if err != nil { + return 0, err + } + if x >= 1<<63 { + return 0, errors.New("xz: size overflow in block header") + } + return int64(x), nil +} + +// UnmarshalBinary unmarshals the block header. +func (h *blockHeader) UnmarshalBinary(data []byte) error { + // Check header length + s := data[0] + if data[0] == 0 { + return errIndexIndicator + } + headerLen := (int(s) + 1) * 4 + if len(data) != headerLen { + return fmt.Errorf("xz: data length %d; want %d", len(data), + headerLen) + } + n := headerLen - 4 + + // Check CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[:n]) + if crc.Sum32() != uint32LE(data[n:]) { + return errors.New("xz: checksum error for block header") + } + + // Block header flags + flags := data[1] + if flags&reservedBlockFlags != 0 { + return errors.New("xz: reserved block header flags set") + } + + r := bytes.NewReader(data[2:n]) + + // Compressed size + var err error + h.compressedSize, err = readSizeInBlockHeader( + r, flags&compressedSizePresent != 0) + if err != nil { + return err + } + + // Uncompressed size + h.uncompressedSize, err = readSizeInBlockHeader( + r, flags&uncompressedSizePresent != 0) + if err != nil { + return err + } + + h.filters, err = readFilters(r, int(flags&filterCountMask)+1) + if err != nil { + return err + } + + // Check padding + // Since headerLen is a multiple of 4 we don't need to check + // alignment. + k := r.Len() + // The standard spec says that the padding should have not more + // than 3 bytes. However we found paddings of 4 or 5 in the + // wild. See https://github.com/ulikunitz/xz/pull/11 and + // https://github.com/ulikunitz/xz/issues/15 + // + // The only reasonable approach seems to be to ignore the + // padding size. We still check that all padding bytes are zero. + if !allZeros(data[n-k : n]) { + return errPadding + } + return nil +} + +// MarshalBinary marshals the binary header. +func (h *blockHeader) MarshalBinary() (data []byte, err error) { + if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) { + return nil, errors.New("xz: filter count wrong") + } + for i, f := range h.filters { + if i < len(h.filters)-1 { + if f.id() == lzmaFilterID { + return nil, errors.New( + "xz: LZMA2 filter is not the last") + } + } else { + // last filter + if f.id() != lzmaFilterID { + return nil, errors.New("xz: " + + "last filter must be the LZMA2 filter") + } + } + } + + var buf bytes.Buffer + // header size must set at the end + buf.WriteByte(0) + + // flags + flags := byte(len(h.filters) - 1) + if h.compressedSize >= 0 { + flags |= compressedSizePresent + } + if h.uncompressedSize >= 0 { + flags |= uncompressedSizePresent + } + buf.WriteByte(flags) + + p := make([]byte, 10) + if h.compressedSize >= 0 { + k := putUvarint(p, uint64(h.compressedSize)) + buf.Write(p[:k]) + } + if h.uncompressedSize >= 0 { + k := putUvarint(p, uint64(h.uncompressedSize)) + buf.Write(p[:k]) + } + + for _, f := range h.filters { + fp, err := f.MarshalBinary() + if err != nil { + return nil, err + } + buf.Write(fp) + } + + // padding + for i := padLen(int64(buf.Len())); i > 0; i-- { + buf.WriteByte(0) + } + + // crc place holder + buf.Write(p[:4]) + + data = buf.Bytes() + if len(data)%4 != 0 { + panic("data length not aligned") + } + s := len(data)/4 - 1 + if !(1 < s && s <= 255) { + panic("wrong block header size") + } + data[0] = byte(s) + + crc := crc32.NewIEEE() + crc.Write(data[:len(data)-4]) + putUint32LE(data[len(data)-4:], crc.Sum32()) + + return data, nil +} + +// Constants used for marshalling and unmarshalling filters in the xz +// block header. +const ( + minFilters = 1 + maxFilters = 4 + minReservedID = 1 << 62 +) + +// filter represents a filter in the block header. +type filter interface { + id() uint64 + UnmarshalBinary(data []byte) error + MarshalBinary() (data []byte, err error) + reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error) + writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error) + // filter must be last filter + last() bool +} + +// readFilter reads a block filter from the block header. At this point +// in time only the LZMA2 filter is supported. +func readFilter(r io.Reader) (f filter, err error) { + br := lzma.ByteReader(r) + + // index + id, _, err := readUvarint(br) + if err != nil { + return nil, err + } + + var data []byte + switch id { + case lzmaFilterID: + data = make([]byte, lzmaFilterLen) + data[0] = lzmaFilterID + if _, err = io.ReadFull(r, data[1:]); err != nil { + return nil, err + } + f = new(lzmaFilter) + default: + if id >= minReservedID { + return nil, errors.New( + "xz: reserved filter id in block stream header") + } + return nil, errors.New("xz: invalid filter id") + } + if err = f.UnmarshalBinary(data); err != nil { + return nil, err + } + return f, err +} + +// readFilters reads count filters. At this point in time only the count +// 1 is supported. +func readFilters(r io.Reader, count int) (filters []filter, err error) { + if count != 1 { + return nil, errors.New("xz: unsupported filter count") + } + f, err := readFilter(r) + if err != nil { + return nil, err + } + return []filter{f}, err +} + +// writeFilters writes the filters. +func writeFilters(w io.Writer, filters []filter) (n int, err error) { + for _, f := range filters { + p, err := f.MarshalBinary() + if err != nil { + return n, err + } + k, err := w.Write(p) + n += k + if err != nil { + return n, err + } + } + return n, nil +} + +/*** Index ***/ + +// record describes a block in the xz file index. +type record struct { + unpaddedSize int64 + uncompressedSize int64 +} + +// readRecord reads an index record. +func readRecord(r io.ByteReader) (rec record, n int, err error) { + u, k, err := readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.unpaddedSize = int64(u) + if rec.unpaddedSize < 0 { + return rec, n, errors.New("xz: unpadded size negative") + } + + u, k, err = readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.uncompressedSize = int64(u) + if rec.uncompressedSize < 0 { + return rec, n, errors.New("xz: uncompressed size negative") + } + + return rec, n, nil +} + +// MarshalBinary converts an index record in its binary encoding. +func (rec *record) MarshalBinary() (data []byte, err error) { + // maximum length of a uvarint is 10 + p := make([]byte, 20) + n := putUvarint(p, uint64(rec.unpaddedSize)) + n += putUvarint(p[n:], uint64(rec.uncompressedSize)) + return p[:n], nil +} + +// writeIndex writes the index, a sequence of records. +func writeIndex(w io.Writer, index []record) (n int64, err error) { + crc := crc32.NewIEEE() + mw := io.MultiWriter(w, crc) + + // index indicator + k, err := mw.Write([]byte{0}) + n += int64(k) + if err != nil { + return n, err + } + + // number of records + p := make([]byte, 10) + k = putUvarint(p, uint64(len(index))) + k, err = mw.Write(p[:k]) + n += int64(k) + if err != nil { + return n, err + } + + // list of records + for _, rec := range index { + p, err := rec.MarshalBinary() + if err != nil { + return n, err + } + k, err = mw.Write(p) + n += int64(k) + if err != nil { + return n, err + } + } + + // index padding + k, err = mw.Write(make([]byte, padLen(int64(n)))) + n += int64(k) + if err != nil { + return n, err + } + + // crc32 checksum + putUint32LE(p, crc.Sum32()) + k, err = w.Write(p[:4]) + n += int64(k) + + return n, err +} + +// readIndexBody reads the index from the reader. It assumes that the +// index indicator has already been read. +func readIndexBody(r io.Reader) (records []record, n int64, err error) { + crc := crc32.NewIEEE() + // index indicator + crc.Write([]byte{0}) + + br := lzma.ByteReader(io.TeeReader(r, crc)) + + // number of records + u, k, err := readUvarint(br) + n += int64(k) + if err != nil { + return nil, n, err + } + recLen := int(u) + if recLen < 0 || uint64(recLen) != u { + return nil, n, errors.New("xz: record number overflow") + } + + // list of records + records = make([]record, recLen) + for i := range records { + records[i], k, err = readRecord(br) + n += int64(k) + if err != nil { + return nil, n, err + } + } + + p := make([]byte, padLen(int64(n+1)), 4) + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return nil, n, err + } + if !allZeros(p) { + return nil, n, errors.New("xz: non-zero byte in index padding") + } + + // crc32 + s := crc.Sum32() + p = p[:4] + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return records, n, err + } + if uint32LE(p) != s { + return nil, n, errors.New("xz: wrong checksum for index") + } + + return records, n, nil +} diff --git a/vendor/github.com/ulikunitz/xz/fox.xz b/vendor/github.com/ulikunitz/xz/fox.xz new file mode 100644 index 0000000000000000000000000000000000000000..4b820bd5a16e83fe5db4fb315639a4337f862483 GIT binary patch literal 104 zcmexsUKJ6=z`*kC+7>q^21Q0O1_p)_{ill=8FWH2QWXkIGn2Cwl8W-n^AytZD-^Oy za|?dFO$zmVVdxt0+m!4eq- E0K@hlng9R* literal 0 HcmV?d00001 diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go new file mode 100644 index 00000000..a3288787 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go @@ -0,0 +1,181 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// CyclicPoly provides a cyclic polynomial rolling hash. +type CyclicPoly struct { + h uint64 + p []uint64 + i int +} + +// ror rotates the unsigned 64-bit integer to right. The argument s must be +// less than 64. +func ror(x uint64, s uint) uint64 { + return (x >> s) | (x << (64 - s)) +} + +// NewCyclicPoly creates a new instance of the CyclicPoly structure. The +// argument n gives the number of bytes for which a hash will be executed. +// This number must be positive; the method panics if this isn't the case. +func NewCyclicPoly(n int) *CyclicPoly { + if n < 1 { + panic("argument n must be positive") + } + return &CyclicPoly{p: make([]uint64, 0, n)} +} + +// Len returns the length of the byte sequence for which a hash is generated. +func (r *CyclicPoly) Len() int { + return cap(r.p) +} + +// RollByte hashes the next byte and returns a hash value. The complete becomes +// available after at least Len() bytes have been hashed. +func (r *CyclicPoly) RollByte(x byte) uint64 { + y := hash[x] + if len(r.p) < cap(r.p) { + r.h = ror(r.h, 1) ^ y + r.p = append(r.p, y) + } else { + r.h ^= ror(r.p[r.i], uint(cap(r.p)-1)) + r.h = ror(r.h, 1) ^ y + r.p[r.i] = y + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} + +// Stores the hash for the individual bytes. +var hash = [256]uint64{ + 0x2e4fc3f904065142, 0xc790984cfbc99527, + 0x879f95eb8c62f187, 0x3b61be86b5021ef2, + 0x65a896a04196f0a5, 0xc5b307b80470b59e, + 0xd3bff376a70df14b, 0xc332f04f0b3f1701, + 0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53, + 0x1906a10c2c1c0208, 0xfb0c712a03421c0d, + 0x38be311a65c9552b, 0xfee7ee4ca6445c7e, + 0x71aadeded184f21e, 0xd73426fccda23b2d, + 0x29773fb5fb9600b5, 0xce410261cd32981a, + 0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c, + 0xc13e35fc9c73a887, 0xf30ed5c201e76dbc, + 0xa5f10b3910482cea, 0x2945d59be02dfaad, + 0x06ee334ff70571b5, 0xbabf9d8070f44380, + 0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7, + 0x26183cb9f7b1664c, 0xea71dac7da068f21, + 0xea92eca5bd1d0bb7, 0x415595862defcd75, + 0x248a386023c60648, 0x9cf021ab284b3c8a, + 0xfc9372df02870f6c, 0x2b92d693eeb3b3fc, + 0x73e799d139dc6975, 0x7b15ae312486363c, + 0xb70e5454a2239c80, 0x208e3fb31d3b2263, + 0x01f563cabb930f44, 0x2ac4533d2a3240d8, + 0x84231ed1064f6f7c, 0xa9f020977c2a6d19, + 0x213c227271c20122, 0x09fe8a9a0a03d07a, + 0x4236dc75bcaf910c, 0x460a8b2bead8f17e, + 0xd9b27be1aa07055f, 0xd202d5dc4b11c33e, + 0x70adb010543bea12, 0xcdae938f7ea6f579, + 0x3f3d870208672f4d, 0x8e6ccbce9d349536, + 0xe4c0871a389095ae, 0xf5f2a49152bca080, + 0x9a43f9b97269934e, 0xc17b3753cb6f475c, + 0xd56d941e8e206bd4, 0xac0a4f3e525eda00, + 0xa06d5a011912a550, 0x5537ed19537ad1df, + 0xa32fe713d611449d, 0x2a1d05b47c3b579f, + 0x991d02dbd30a2a52, 0x39e91e7e28f93eb0, + 0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97, + 0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44, + 0x0b63d5d801708420, 0x8f227ca8f37ffaec, + 0x0256278670887c24, 0x107e14877dbf540b, + 0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61, + 0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001, + 0x31f601d5d31c48c4, 0x72ff3c0928bcaec7, + 0xd99264421147eb03, 0x535a2d6d38aefcfe, + 0x6ba8b4454a916237, 0xfa39366eaae4719c, + 0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4, + 0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8, + 0xd61c2503fe639144, 0x30ce625441eb92d3, + 0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5, + 0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf, + 0xc7ea4872c96b83ae, 0x6dd5d376f4392382, + 0x1be88681aaa9792f, 0xfef465ee1b6c10d9, + 0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9, + 0x7808e902b3857d0b, 0x171c9c4ea4607972, + 0x58d66274850146df, 0x42b311c10d3981d1, + 0x647fa8c621c41a4c, 0xf472771c66ddfedc, + 0x338d27e3f847b46b, 0x6402ce3da97545ce, + 0x5162db616fc38638, 0x9c83be97bc22a50e, + 0x2d3d7478a78d5e72, 0xe621a9b938fd5397, + 0x9454614eb0f81c45, 0x395fb6e742ed39b6, + 0x77dd9179d06037bf, 0xc478d0fee4d2656d, + 0x35d9d6cb772007af, 0x83a56e92c883f0f6, + 0x27937453250c00a1, 0x27bd6ebc3a46a97d, + 0x9f543bf784342d51, 0xd158f38c48b0ed52, + 0x8dd8537c045f66b4, 0x846a57230226f6d5, + 0x6b13939e0c4e7cdf, 0xfca25425d8176758, + 0x92e5fc6cd52788e6, 0x9992e13d7a739170, + 0x518246f7a199e8ea, 0xf104c2a71b9979c7, + 0x86b3ffaabea4768f, 0x6388061cf3e351ad, + 0x09d9b5295de5bbb5, 0x38bf1638c2599e92, + 0x1d759846499e148d, 0x4c0ff015e5f96ef4, + 0xa41a94cfa270f565, 0x42d76f9cb2326c0b, + 0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a, + 0x337523aabbe6cf8d, 0x646bb14001d42b12, + 0xc178729d138adc74, 0xf900ef4491f24086, + 0xee1a90d334bb5ac4, 0x9755c92247301a50, + 0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9, + 0x0fa8084cf91ac6ff, 0x10d226cf136e6189, + 0xd302057a07d4fb21, 0x5f03800e20a0fcc3, + 0x80118d4ae46bd210, 0x58ab61a522843733, + 0x51edd575c5432a4b, 0x94ee6ff67f9197f7, + 0x765669e0e5e8157b, 0xa5347830737132f0, + 0x3ba485a69f01510c, 0x0b247d7b957a01c3, + 0x1b3d63449fd807dc, 0x0fdc4721c30ad743, + 0x8b535ed3829b2b14, 0xee41d0cad65d232c, + 0xe6a99ed97a6a982f, 0x65ac6194c202003d, + 0x692accf3a70573eb, 0xcc3c02c3e200d5af, + 0x0d419e8b325914a3, 0x320f160f42c25e40, + 0x00710d647a51fe7a, 0x3c947692330aed60, + 0x9288aa280d355a7a, 0xa1806a9b791d1696, + 0x5d60e38496763da1, 0x6c69e22e613fd0f4, + 0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba, + 0x460c17992cbaece1, 0xf7822c5444d3297f, + 0x344a9790c69b74aa, 0xb80a42e6cae09dce, + 0x1b1361eaf2b1e757, 0xd84c1e758e236f01, + 0x88e0b7be347627cc, 0x45246009b7a99490, + 0x8011c6dd3fe50472, 0xc341d682bffb99d7, + 0x2511be93808e2d15, 0xd5bc13d7fd739840, + 0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157, + 0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0, + 0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc, + 0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e, + 0xa559cce0d9199aac, 0xde39d47ef3723380, + 0xe5b69d848ce42e35, 0xefa24296f8e79f52, + 0x70190b59db9a5afc, 0x26f166cdb211e7bf, + 0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017, + 0xb9059b05e9420d90, 0x2f0da855c9388754, + 0x611d5e9ab77949cc, 0x2912038ac01163f4, + 0x0231df50402b2fba, 0x45660fc4f3245f58, + 0xb91cc97c7c8dac50, 0xb72d2aafe4953427, + 0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2, + 0x1310e1c1a48d21c3, 0xad48a7810cdd8544, + 0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de, + 0xe70cfc8fe1ee9626, 0xef4711b0d8dda442, + 0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93, + 0x9b37db9d0335a39c, 0x494b6f870f5cfebc, + 0x6d1b3c1149dda943, 0x372c943a518c1093, + 0xad27af45e77c09c4, 0x3b6f92b646044604, + 0xac2917909f5fcf4f, 0x2069a60e977e5557, + 0x353a469e71014de5, 0x24be356281f55c15, + 0x2b6d710ba8e9adea, 0x404ad1751c749c29, + 0xed7311bf23d7f185, 0xba4f6976b4acc43e, + 0x32d7198d2bc39000, 0xee667019014d6e01, + 0x494ef3e128d14c83, 0x1f95a152baecd6be, + 0x201648dff1f483a5, 0x68c28550c8384af6, + 0x5fc834a6824a7f48, 0x7cd06cb7365eaf28, + 0xd82bbd95e9b30909, 0x234f0d1694c53f6d, + 0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e, + 0xf8f6b97f5585080a, 0x74236084be57b95b, + 0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b, + 0x4378ffe93e1528c5, 0x94ca92a17118e2d2, +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go new file mode 100644 index 00000000..f99ec220 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go @@ -0,0 +1,14 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package hash provides rolling hashes. + +Rolling hashes have to be used for maintaining the positions of n-byte +sequences in the dictionary buffer. + +The package provides currently the Rabin-Karp rolling hash and a Cyclic +Polynomial hash. Both support the Hashes method to be used with an interface. +*/ +package hash diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go new file mode 100644 index 00000000..58635b11 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go @@ -0,0 +1,66 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// A is the default constant for Robin-Karp rolling hash. This is a random +// prime. +const A = 0x97b548add41d5da1 + +// RabinKarp supports the computation of a rolling hash. +type RabinKarp struct { + A uint64 + // a^n + aOldest uint64 + h uint64 + p []byte + i int +} + +// NewRabinKarp creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The default constant will will be +// used. +func NewRabinKarp(n int) *RabinKarp { + return NewRabinKarpConst(n, A) +} + +// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The argument a provides the +// constant used to compute the hash. +func NewRabinKarpConst(n int, a uint64) *RabinKarp { + if n <= 0 { + panic("number of bytes n must be positive") + } + aOldest := uint64(1) + // There are faster methods. For the small n required by the LZMA + // compressor O(n) is sufficient. + for i := 0; i < n; i++ { + aOldest *= a + } + return &RabinKarp{ + A: a, aOldest: aOldest, + p: make([]byte, 0, n), + } +} + +// Len returns the length of the byte sequence. +func (r *RabinKarp) Len() int { + return cap(r.p) +} + +// RollByte computes the hash after x has been added. +func (r *RabinKarp) RollByte(x byte) uint64 { + if len(r.p) < cap(r.p) { + r.h += uint64(x) + r.h *= r.A + r.p = append(r.p, x) + } else { + r.h -= uint64(r.p[r.i]) * r.aOldest + r.h += uint64(x) + r.h *= r.A + r.p[r.i] = x + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go new file mode 100644 index 00000000..ab6a19ca --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go @@ -0,0 +1,29 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// Roller provides an interface for rolling hashes. The hash value will become +// valid after hash has been called Len times. +type Roller interface { + Len() int + RollByte(x byte) uint64 +} + +// Hashes computes all hash values for the array p. Note that the state of the +// roller is changed. +func Hashes(r Roller, p []byte) []uint64 { + n := r.Len() + if len(p) < n { + return nil + } + h := make([]uint64, len(p)-n+1) + for i := 0; i < n-1; i++ { + r.RollByte(p[i]) + } + for i := range h { + h[i] = r.RollByte(p[i+n-1]) + } + return h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go new file mode 100644 index 00000000..0ba45e8f --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go @@ -0,0 +1,457 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xlog provides a simple logging package that allows to disable +// certain message categories. It defines a type, Logger, with multiple +// methods for formatting output. The package has also a predefined +// 'standard' Logger accessible through helper function Print[f|ln], +// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln] +// that are easier to use then creating a Logger manually. That logger +// writes to standard error and prints the date and time of each logged +// message, which can be configured using the function SetFlags. +// +// The Fatal functions call os.Exit(1) after the message is output +// unless not suppressed by the flags. The Panic functions call panic +// after the writing the log message unless suppressed. +package xlog + +import ( + "fmt" + "io" + "os" + "runtime" + "sync" + "time" +) + +// The flags define what information is prefixed to each log entry +// generated by the Logger. The Lno* versions allow the suppression of +// specific output. The bits are or'ed together to control what will be +// printed. There is no control over the order of the items printed and +// the format. The full format is: +// +// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message +// +const ( + Ldate = 1 << iota // the date: 2009-01-23 + Ltime // the time: 01:23:23 + Lmicroseconds // microsecond resolution: 01:23:23.123123 + Llongfile // full file name and line number: /a/b/c/d.go:23 + Lshortfile // final file name element and line number: d.go:23 + Lnopanic // suppresses output from Panic[f|ln] but not the panic call + Lnofatal // suppresses output from Fatal[f|ln] but not the exit + Lnowarn // suppresses output from Warn[f|ln] + Lnoprint // suppresses output from Print[f|ln] + Lnodebug // suppresses output from Debug[f|ln] + // initial values for the standard logger + Lstdflags = Ldate | Ltime | Lnodebug +) + +// A Logger represents an active logging object that generates lines of +// output to an io.Writer. Each logging operation if not suppressed +// makes a single call to the Writer's Write method. A Logger can be +// used simultaneously from multiple goroutines; it guarantees to +// serialize access to the Writer. +type Logger struct { + mu sync.Mutex // ensures atomic writes; and protects the following + // fields + prefix string // prefix to write at beginning of each line + flag int // properties + out io.Writer // destination for output + buf []byte // for accumulating text to write +} + +// New creates a new Logger. The out argument sets the destination to +// which the log output will be written. The prefix appears at the +// beginning of each log line. The flag argument defines the logging +// properties. +func New(out io.Writer, prefix string, flag int) *Logger { + return &Logger{out: out, prefix: prefix, flag: flag} +} + +// std is the standard logger used by the package scope functions. +var std = New(os.Stderr, "", Lstdflags) + +// itoa converts the integer to ASCII. A negative widths will avoid +// zero-padding. The function supports only non-negative integers. +func itoa(buf *[]byte, i int, wid int) { + var u = uint(i) + if u == 0 && wid <= 1 { + *buf = append(*buf, '0') + return + } + var b [32]byte + bp := len(b) + for ; u > 0 || wid > 0; u /= 10 { + bp-- + wid-- + b[bp] = byte(u%10) + '0' + } + *buf = append(*buf, b[bp:]...) +} + +// formatHeader puts the header into the buf field of the buffer. +func (l *Logger) formatHeader(t time.Time, file string, line int) { + l.buf = append(l.buf, l.prefix...) + if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 { + if l.flag&Ldate != 0 { + year, month, day := t.Date() + itoa(&l.buf, year, 4) + l.buf = append(l.buf, '-') + itoa(&l.buf, int(month), 2) + l.buf = append(l.buf, '-') + itoa(&l.buf, day, 2) + l.buf = append(l.buf, ' ') + } + if l.flag&(Ltime|Lmicroseconds) != 0 { + hour, min, sec := t.Clock() + itoa(&l.buf, hour, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, min, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, sec, 2) + if l.flag&Lmicroseconds != 0 { + l.buf = append(l.buf, '.') + itoa(&l.buf, t.Nanosecond()/1e3, 6) + } + l.buf = append(l.buf, ' ') + } + } + if l.flag&(Lshortfile|Llongfile) != 0 { + if l.flag&Lshortfile != 0 { + short := file + for i := len(file) - 1; i > 0; i-- { + if file[i] == '/' { + short = file[i+1:] + break + } + } + file = short + } + l.buf = append(l.buf, file...) + l.buf = append(l.buf, ':') + itoa(&l.buf, line, -1) + l.buf = append(l.buf, ": "...) + } +} + +func (l *Logger) output(calldepth int, now time.Time, s string) error { + var file string + var line int + if l.flag&(Lshortfile|Llongfile) != 0 { + l.mu.Unlock() + var ok bool + _, file, line, ok = runtime.Caller(calldepth) + if !ok { + file = "???" + line = 0 + } + l.mu.Lock() + } + l.buf = l.buf[:0] + l.formatHeader(now, file, line) + l.buf = append(l.buf, s...) + if len(s) == 0 || s[len(s)-1] != '\n' { + l.buf = append(l.buf, '\n') + } + _, err := l.out.Write(l.buf) + return err +} + +// Output writes the string s with the header controlled by the flags to +// the l.out writer. A newline will be appended if s doesn't end in a +// newline. Calldepth is used to recover the PC, although all current +// calls of Output use the call depth 2. Access to the function is serialized. +func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprint(v...) + return l.output(calldepth+1, now, s) +} + +// Outputf works like output but formats the output like Printf. +func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintf(format, v...) + return l.output(calldepth+1, now, s) +} + +// Outputln works like output but formats the output like Println. +func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintln(v...) + return l.output(calldepth+1, now, s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panic(v ...interface{}) { + l.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panic(v ...interface{}) { + std.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicf(format string, v ...interface{}) { + l.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicf(format string, v ...interface{}) { + std.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicln(v ...interface{}) { + l.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicln(v ...interface{}) { + std.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatal(v ...interface{}) { + l.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatal(v ...interface{}) { + std.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalf(format string, v ...interface{}) { + l.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalf(format string, v ...interface{}) { + std.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalln(format string, v ...interface{}) { + l.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalln(format string, v ...interface{}) { + std.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warn(v ...interface{}) { + l.Output(2, Lnowarn, v...) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func Warn(v ...interface{}) { + std.Output(2, Lnowarn, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnf(format string, v ...interface{}) { + l.Outputf(2, Lnowarn, format, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func Warnf(format string, v ...interface{}) { + std.Outputf(2, Lnowarn, format, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnln(v ...interface{}) { + l.Outputln(2, Lnowarn, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func Warnln(v ...interface{}) { + std.Outputln(2, Lnowarn, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Print(v ...interface{}) { + l.Output(2, Lnoprint, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func Print(v ...interface{}) { + std.Output(2, Lnoprint, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Printf(format string, v ...interface{}) { + l.Outputf(2, Lnoprint, format, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func Printf(format string, v ...interface{}) { + std.Outputf(2, Lnoprint, format, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func (l *Logger) Println(v ...interface{}) { + l.Outputln(2, Lnoprint, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func Println(v ...interface{}) { + std.Outputln(2, Lnoprint, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debug(v ...interface{}) { + l.Output(2, Lnodebug, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func Debug(v ...interface{}) { + std.Output(2, Lnodebug, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugf(format string, v ...interface{}) { + l.Outputf(2, Lnodebug, format, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func Debugf(format string, v ...interface{}) { + std.Outputf(2, Lnodebug, format, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugln(v ...interface{}) { + l.Outputln(2, Lnodebug, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func Debugln(v ...interface{}) { + std.Outputln(2, Lnodebug, v...) +} + +// Flags returns the current flags used by the logger. +func (l *Logger) Flags() int { + l.mu.Lock() + defer l.mu.Unlock() + return l.flag +} + +// Flags returns the current flags used by the standard logger. +func Flags() int { + return std.Flags() +} + +// SetFlags sets the flags of the logger. +func (l *Logger) SetFlags(flag int) { + l.mu.Lock() + defer l.mu.Unlock() + l.flag = flag +} + +// SetFlags sets the flags for the standard logger. +func SetFlags(flag int) { + std.SetFlags(flag) +} + +// Prefix returns the prefix used by the logger. +func (l *Logger) Prefix() string { + l.mu.Lock() + defer l.mu.Unlock() + return l.prefix +} + +// Prefix returns the prefix used by the standard logger of the package. +func Prefix() string { + return std.Prefix() +} + +// SetPrefix sets the prefix for the logger. +func (l *Logger) SetPrefix(prefix string) { + l.mu.Lock() + defer l.mu.Unlock() + l.prefix = prefix +} + +// SetPrefix sets the prefix of the standard logger of the package. +func SetPrefix(prefix string) { + std.SetPrefix(prefix) +} + +// SetOutput sets the output of the logger. +func (l *Logger) SetOutput(w io.Writer) { + l.mu.Lock() + defer l.mu.Unlock() + l.out = w +} + +// SetOutput sets the output for the standard logger of the package. +func SetOutput(w io.Writer) { + std.SetOutput(w) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go new file mode 100644 index 00000000..a781bd19 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go @@ -0,0 +1,523 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bufio" + "errors" + "fmt" + "io" + "unicode" +) + +// node represents a node in the binary tree. +type node struct { + // x is the search value + x uint32 + // p parent node + p uint32 + // l left child + l uint32 + // r right child + r uint32 +} + +// wordLen is the number of bytes represented by the v field of a node. +const wordLen = 4 + +// binTree supports the identification of the next operation based on a +// binary tree. +// +// Nodes will be identified by their index into the ring buffer. +type binTree struct { + dict *encoderDict + // ring buffer of nodes + node []node + // absolute offset of the entry for the next node. Position 4 + // byte larger. + hoff int64 + // front position in the node ring buffer + front uint32 + // index of the root node + root uint32 + // current x value + x uint32 + // preallocated array + data []byte +} + +// null represents the nonexistent index. We can't use zero because it +// would always exist or we would need to decrease the index for each +// reference. +const null uint32 = 1<<32 - 1 + +// newBinTree initializes the binTree structure. The capacity defines +// the size of the buffer and defines the maximum distance for which +// matches will be found. +func newBinTree(capacity int) (t *binTree, err error) { + if capacity < 1 { + return nil, errors.New( + "newBinTree: capacity must be larger than zero") + } + if int64(capacity) >= int64(null) { + return nil, errors.New( + "newBinTree: capacity must less 2^{32}-1") + } + t = &binTree{ + node: make([]node, capacity), + hoff: -int64(wordLen), + root: null, + data: make([]byte, maxMatchLen), + } + return t, nil +} + +func (t *binTree) SetDict(d *encoderDict) { t.dict = d } + +// WriteByte writes a single byte into the binary tree. +func (t *binTree) WriteByte(c byte) error { + t.x = (t.x << 8) | uint32(c) + t.hoff++ + if t.hoff < 0 { + return nil + } + v := t.front + if int64(v) < t.hoff { + // We are overwriting old nodes stored in the tree. + t.remove(v) + } + t.node[v].x = t.x + t.add(v) + t.front++ + if int64(t.front) >= int64(len(t.node)) { + t.front = 0 + } + return nil +} + +// Writes writes a sequence of bytes into the binTree structure. +func (t *binTree) Write(p []byte) (n int, err error) { + for _, c := range p { + t.WriteByte(c) + } + return len(p), nil +} + +// add puts the node v into the tree. The node must not be part of the +// tree before. +func (t *binTree) add(v uint32) { + vn := &t.node[v] + // Set left and right to null indices. + vn.l, vn.r = null, null + // If the binary tree is empty make v the root. + if t.root == null { + t.root = v + vn.p = null + return + } + x := vn.x + p := t.root + // Search for the right leave link and add the new node. + for { + pn := &t.node[p] + if x <= pn.x { + if pn.l == null { + pn.l = v + vn.p = p + return + } + p = pn.l + } else { + if pn.r == null { + pn.r = v + vn.p = p + return + } + p = pn.r + } + } +} + +// parent returns the parent node index of v and the pointer to v value +// in the parent. +func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) { + if t.root == v { + return null, &t.root + } + p = t.node[v].p + if t.node[p].l == v { + ptr = &t.node[p].l + } else { + ptr = &t.node[p].r + } + return +} + +// Remove node v. +func (t *binTree) remove(v uint32) { + vn := &t.node[v] + p, ptr := t.parent(v) + l, r := vn.l, vn.r + if l == null { + // Move the right child up. + *ptr = r + if r != null { + t.node[r].p = p + } + return + } + if r == null { + // Move the left child up. + *ptr = l + t.node[l].p = p + return + } + + // Search the in-order predecessor u. + un := &t.node[l] + ur := un.r + if ur == null { + // In order predecessor is l. Move it up. + un.r = r + t.node[r].p = l + un.p = p + *ptr = l + return + } + var u uint32 + for { + // Look for the max value in the tree where l is root. + u = ur + ur = t.node[u].r + if ur == null { + break + } + } + // replace u with ul + un = &t.node[u] + ul := un.l + up := un.p + t.node[up].r = ul + if ul != null { + t.node[ul].p = up + } + + // replace v by u + un.l, un.r = l, r + t.node[l].p = u + t.node[r].p = u + *ptr = u + un.p = p +} + +// search looks for the node that have the value x or for the nodes that +// brace it. The node highest in the tree with the value x will be +// returned. All other nodes with the same value live in left subtree of +// the returned node. +func (t *binTree) search(v uint32, x uint32) (a, b uint32) { + a, b = null, null + if v == null { + return + } + for { + vn := &t.node[v] + if x <= vn.x { + if x == vn.x { + return v, v + } + b = v + if vn.l == null { + return + } + v = vn.l + } else { + a = v + if vn.r == null { + return + } + v = vn.r + } + } +} + +// max returns the node with maximum value in the subtree with v as +// root. +func (t *binTree) max(v uint32) uint32 { + if v == null { + return null + } + for { + r := t.node[v].r + if r == null { + return v + } + v = r + } +} + +// min returns the node with the minimum value in the subtree with v as +// root. +func (t *binTree) min(v uint32) uint32 { + if v == null { + return null + } + for { + l := t.node[v].l + if l == null { + return v + } + v = l + } +} + +// pred returns the in-order predecessor of node v. +func (t *binTree) pred(v uint32) uint32 { + if v == null { + return null + } + u := t.max(t.node[v].l) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].r == v { + return p + } + v = p + } +} + +// succ returns the in-order successor of node v. +func (t *binTree) succ(v uint32) uint32 { + if v == null { + return null + } + u := t.min(t.node[v].r) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].l == v { + return p + } + v = p + } +} + +// xval converts the first four bytes of a into an 32-bit unsigned +// integer in big-endian order. +func xval(a []byte) uint32 { + var x uint32 + switch len(a) { + default: + x |= uint32(a[3]) + fallthrough + case 3: + x |= uint32(a[2]) << 8 + fallthrough + case 2: + x |= uint32(a[1]) << 16 + fallthrough + case 1: + x |= uint32(a[0]) << 24 + case 0: + } + return x +} + +// dumpX converts value x into a four-letter string. +func dumpX(x uint32) string { + a := make([]byte, 4) + for i := 0; i < 4; i++ { + c := byte(x >> uint((3-i)*8)) + if unicode.IsGraphic(rune(c)) { + a[i] = c + } else { + a[i] = '.' + } + } + return string(a) +} + +// dumpNode writes a representation of the node v into the io.Writer. +func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) { + if v == null { + return + } + + vn := &t.node[v] + + t.dumpNode(w, vn.r, indent+2) + + for i := 0; i < indent; i++ { + fmt.Fprint(w, " ") + } + if vn.p == null { + fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x)) + } else { + fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p) + } + + t.dumpNode(w, vn.l, indent+2) +} + +// dump prints a representation of the binary tree into the writer. +func (t *binTree) dump(w io.Writer) error { + bw := bufio.NewWriter(w) + t.dumpNode(bw, t.root, 0) + return bw.Flush() +} + +func (t *binTree) distance(v uint32) int { + dist := int(t.front) - int(v) + if dist <= 0 { + dist += len(t.node) + } + return dist +} + +type matchParams struct { + rep [4]uint32 + // length when match will be accepted + nAccept int + // nodes to check + check int + // finish if length get shorter + stopShorter bool +} + +func (t *binTree) match(m match, distIter func() (int, bool), p matchParams, +) (r match, checked int, accepted bool) { + buf := &t.dict.buf + for { + if checked >= p.check { + return m, checked, true + } + dist, ok := distIter() + if !ok { + return m, checked, false + } + checked++ + if m.n > 0 { + i := buf.rear - dist + m.n - 1 + if i < 0 { + i += len(buf.data) + } else if i >= len(buf.data) { + i -= len(buf.data) + } + if buf.data[i] != t.data[m.n-1] { + if p.stopShorter { + return m, checked, false + } + continue + } + } + n := buf.matchLen(dist, t.data) + switch n { + case 0: + if p.stopShorter { + return m, checked, false + } + continue + case 1: + if uint32(dist-minDistance) != p.rep[0] { + continue + } + } + if n < m.n || (n == m.n && int64(dist) >= m.distance) { + continue + } + m = match{int64(dist), n} + if n >= p.nAccept { + return m, checked, true + } + } +} + +func (t *binTree) NextOp(rep [4]uint32) operation { + // retrieve maxMatchLen data + n, _ := t.dict.buf.Peek(t.data[:maxMatchLen]) + if n == 0 { + panic("no data in buffer") + } + t.data = t.data[:n] + + var ( + m match + x, u, v uint32 + iterPred, iterSucc func() (int, bool) + ) + p := matchParams{ + rep: rep, + nAccept: maxMatchLen, + check: 32, + } + i := 4 + iterSmall := func() (dist int, ok bool) { + i-- + if i <= 0 { + return 0, false + } + return i, true + } + m, checked, accepted := t.match(m, iterSmall, p) + if accepted { + goto end + } + p.check -= checked + x = xval(t.data) + u, v = t.search(t.root, x) + if u == v && len(t.data) == 4 { + iter := func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u, v = t.search(t.node[u].l, x) + if u != v { + u = null + } + return dist, true + } + m, _, _ = t.match(m, iter, p) + goto end + } + p.stopShorter = true + iterSucc = func() (dist int, ok bool) { + if v == null { + return 0, false + } + dist = t.distance(v) + v = t.succ(v) + return dist, true + } + m, checked, accepted = t.match(m, iterSucc, p) + if accepted { + goto end + } + p.check -= checked + iterPred = func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u = t.pred(u) + return dist, true + } + m, _, _ = t.match(m, iterPred, p) +end: + if m.n == 0 { + return lit{t.data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go new file mode 100644 index 00000000..e9bab019 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go @@ -0,0 +1,45 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +/* Naming conventions follows the CodeReviewComments in the Go Wiki. */ + +// ntz32Const is used by the functions NTZ and NLZ. +const ntz32Const = 0x04d7651f + +// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé. +// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26. +var ntz32Table = [32]int8{ + 0, 1, 2, 24, 3, 19, 6, 25, + 22, 4, 20, 10, 16, 7, 12, 26, + 31, 23, 18, 5, 21, 9, 15, 11, + 30, 17, 8, 14, 29, 13, 28, 27, +} + +// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer. +func ntz32(x uint32) int { + if x == 0 { + return 32 + } + x = (x & -x) * ntz32Const + return int(ntz32Table[x>>27]) +} + +// nlz32 computes the number of leading zeros for an unsigned 32-bit integer. +func nlz32(x uint32) int { + // Smear left most bit to the right + x |= x >> 1 + x |= x >> 2 + x |= x >> 4 + x |= x >> 8 + x |= x >> 16 + // Use ntz mechanism to calculate nlz. + x++ + if x == 0 { + return 0 + } + x *= ntz32Const + return 32 - int(ntz32Table[x>>27]) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/breader.go b/vendor/github.com/ulikunitz/xz/lzma/breader.go new file mode 100644 index 00000000..5350d814 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/breader.go @@ -0,0 +1,39 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// breader provides the ReadByte function for a Reader. It doesn't read +// more data from the reader than absolutely necessary. +type breader struct { + io.Reader + // helper slice to save allocations + p []byte +} + +// ByteReader converts an io.Reader into an io.ByteReader. +func ByteReader(r io.Reader) io.ByteReader { + br, ok := r.(io.ByteReader) + if !ok { + return &breader{r, make([]byte, 1)} + } + return br +} + +// ReadByte read byte function. +func (r *breader) ReadByte() (c byte, err error) { + n, err := r.Reader.Read(r.p) + if n < 1 { + if err == nil { + err = errors.New("breader.ReadByte: no data") + } + return 0, err + } + return r.p[0], nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/vendor/github.com/ulikunitz/xz/lzma/buffer.go new file mode 100644 index 00000000..50e0b6d5 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/buffer.go @@ -0,0 +1,171 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" +) + +// buffer provides a circular buffer of bytes. If the front index equals +// the rear index the buffer is empty. As a consequence front cannot be +// equal rear for a full buffer. So a full buffer has a length that is +// one byte less the the length of the data slice. +type buffer struct { + data []byte + front int + rear int +} + +// newBuffer creates a buffer with the given size. +func newBuffer(size int) *buffer { + return &buffer{data: make([]byte, size+1)} +} + +// Cap returns the capacity of the buffer. +func (b *buffer) Cap() int { + return len(b.data) - 1 +} + +// Resets the buffer. The front and rear index are set to zero. +func (b *buffer) Reset() { + b.front = 0 + b.rear = 0 +} + +// Buffered returns the number of bytes buffered. +func (b *buffer) Buffered() int { + delta := b.front - b.rear + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// Available returns the number of bytes available for writing. +func (b *buffer) Available() int { + delta := b.rear - 1 - b.front + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// addIndex adds a non-negative integer to the index i and returns the +// resulting index. The function takes care of wrapping the index as +// well as potential overflow situations. +func (b *buffer) addIndex(i int, n int) int { + // subtraction of len(b.data) prevents overflow + i += n - len(b.data) + if i < 0 { + i += len(b.data) + } + return i +} + +// Read reads bytes from the buffer into p and returns the number of +// bytes read. The function never returns an error but might return less +// data than requested. +func (b *buffer) Read(p []byte) (n int, err error) { + n, err = b.Peek(p) + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// Peek reads bytes from the buffer into p without changing the buffer. +// Peek will never return an error but might return less data than +// requested. +func (b *buffer) Peek(p []byte) (n int, err error) { + m := b.Buffered() + n = len(p) + if m < n { + n = m + p = p[:n] + } + k := copy(p, b.data[b.rear:]) + if k < n { + copy(p[k:], b.data) + } + return n, nil +} + +// Discard skips the n next bytes to read from the buffer, returning the +// bytes discarded. +// +// If Discards skips fewer than n bytes, it returns an error. +func (b *buffer) Discard(n int) (discarded int, err error) { + if n < 0 { + return 0, errors.New("buffer.Discard: negative argument") + } + m := b.Buffered() + if m < n { + n = m + err = errors.New( + "buffer.Discard: discarded less bytes then requested") + } + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// ErrNoSpace indicates that there is insufficient space for the Write +// operation. +var ErrNoSpace = errors.New("insufficient space") + +// Write puts data into the buffer. If less bytes are written than +// requested ErrNoSpace is returned. +func (b *buffer) Write(p []byte) (n int, err error) { + m := b.Available() + n = len(p) + if m < n { + n = m + p = p[:m] + err = ErrNoSpace + } + k := copy(b.data[b.front:], p) + if k < n { + copy(b.data, p[k:]) + } + b.front = b.addIndex(b.front, n) + return n, err +} + +// WriteByte writes a single byte into the buffer. The error ErrNoSpace +// is returned if no single byte is available in the buffer for writing. +func (b *buffer) WriteByte(c byte) error { + if b.Available() < 1 { + return ErrNoSpace + } + b.data[b.front] = c + b.front = b.addIndex(b.front, 1) + return nil +} + +// prefixLen returns the length of the common prefix of a and b. +func prefixLen(a, b []byte) int { + if len(a) > len(b) { + a, b = b, a + } + for i, c := range a { + if b[i] != c { + return i + } + } + return len(a) +} + +// matchLen returns the length of the common prefix for the given +// distance from the rear and the byte slice p. +func (b *buffer) matchLen(distance int, p []byte) int { + var n int + i := b.rear - distance + if i < 0 { + if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i { + return n + } + p = p[n:] + i = 0 + } + n += prefixLen(p, b.data[i:]) + return n +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go new file mode 100644 index 00000000..a3696ba0 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go @@ -0,0 +1,37 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// ErrLimit indicates that the limit of the LimitedByteWriter has been +// reached. +var ErrLimit = errors.New("limit reached") + +// LimitedByteWriter provides a byte writer that can be written until a +// limit is reached. The field N provides the number of remaining +// bytes. +type LimitedByteWriter struct { + BW io.ByteWriter + N int64 +} + +// WriteByte writes a single byte to the limited byte writer. It returns +// ErrLimit if the limit has been reached. If the byte is successfully +// written the field N of the LimitedByteWriter will be decremented by +// one. +func (l *LimitedByteWriter) WriteByte(c byte) error { + if l.N <= 0 { + return ErrLimit + } + if err := l.BW.WriteByte(c); err != nil { + return err + } + l.N-- + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go new file mode 100644 index 00000000..16e14db3 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go @@ -0,0 +1,277 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// decoder decodes a raw LZMA stream without any header. +type decoder struct { + // dictionary; the rear pointer of the buffer will be used for + // reading the data. + Dict *decoderDict + // decoder state + State *state + // range decoder + rd *rangeDecoder + // start stores the head value of the dictionary for the LZMA + // stream + start int64 + // size of uncompressed data + size int64 + // end-of-stream encountered + eos bool + // EOS marker found + eosMarker bool +} + +// newDecoder creates a new decoder instance. The parameter size provides +// the expected byte size of the decompressed data. If the size is +// unknown use a negative value. In that case the decoder will look for +// a terminating end-of-stream marker. +func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) { + rd, err := newRangeDecoder(br) + if err != nil { + return nil, err + } + d = &decoder{ + State: state, + Dict: dict, + rd: rd, + size: size, + start: dict.pos(), + } + return d, nil +} + +// Reopen restarts the decoder with a new byte reader and a new size. Reopen +// resets the Decompressed counter to zero. +func (d *decoder) Reopen(br io.ByteReader, size int64) error { + var err error + if d.rd, err = newRangeDecoder(br); err != nil { + return err + } + d.start = d.Dict.pos() + d.size = size + d.eos = false + return nil +} + +// decodeLiteral decodes a single literal from the LZMA stream. +func (d *decoder) decodeLiteral() (op operation, err error) { + litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head) + match := d.Dict.byteAt(int(d.State.rep[0]) + 1) + s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState) + if err != nil { + return nil, err + } + return lit{s}, nil +} + +// errEOS indicates that an EOS marker has been found. +var errEOS = errors.New("EOS marker found") + +// readOp decodes the next operation from the compressed stream. It +// returns the operation. If an explicit end of stream marker is +// identified the eos error is returned. +func (d *decoder) readOp() (op operation, err error) { + // Value of the end of stream (EOS) marker + const eosDist = 1<<32 - 1 + + state, state2, posState := d.State.states(d.Dict.head) + + b, err := d.State.isMatch[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // literal + op, err := d.decodeLiteral() + if err != nil { + return nil, err + } + d.State.updateStateLiteral() + return op, nil + } + b, err = d.State.isRep[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // simple match + d.State.rep[3], d.State.rep[2], d.State.rep[1] = + d.State.rep[2], d.State.rep[1], d.State.rep[0] + + d.State.updateStateMatch() + // The length decoder returns the length offset. + n, err := d.State.lenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + // The dist decoder returns the distance offset. The actual + // distance is 1 higher. + d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n) + if err != nil { + return nil, err + } + if d.State.rep[0] == eosDist { + d.eosMarker = true + return nil, errEOS + } + op = match{n: int(n) + minMatchLen, + distance: int64(d.State.rep[0]) + minDistance} + return op, nil + } + b, err = d.State.isRepG0[state].Decode(d.rd) + if err != nil { + return nil, err + } + dist := d.State.rep[0] + if b == 0 { + // rep match 0 + b, err = d.State.isRepG0Long[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + d.State.updateStateShortRep() + op = match{n: 1, distance: int64(dist) + minDistance} + return op, nil + } + } else { + b, err = d.State.isRepG1[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[1] + } else { + b, err = d.State.isRepG2[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[2] + } else { + dist = d.State.rep[3] + d.State.rep[3] = d.State.rep[2] + } + d.State.rep[2] = d.State.rep[1] + } + d.State.rep[1] = d.State.rep[0] + d.State.rep[0] = dist + } + n, err := d.State.repLenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + d.State.updateStateRep() + op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance} + return op, nil +} + +// apply takes the operation and transforms the decoder dictionary accordingly. +func (d *decoder) apply(op operation) error { + var err error + switch x := op.(type) { + case match: + err = d.Dict.writeMatch(x.distance, x.n) + case lit: + err = d.Dict.WriteByte(x.b) + default: + panic("op is neither a match nor a literal") + } + return err +} + +// decompress fills the dictionary unless no space for new data is +// available. If the end of the LZMA stream has been reached io.EOF will +// be returned. +func (d *decoder) decompress() error { + if d.eos { + return io.EOF + } + for d.Dict.Available() >= maxMatchLen { + op, err := d.readOp() + switch err { + case nil: + break + case errEOS: + d.eos = true + if !d.rd.possiblyAtEnd() { + return errDataAfterEOS + } + if d.size >= 0 && d.size != d.Decompressed() { + return errSize + } + return io.EOF + case io.EOF: + d.eos = true + return io.ErrUnexpectedEOF + default: + return err + } + if err = d.apply(op); err != nil { + return err + } + if d.size >= 0 && d.Decompressed() >= d.size { + d.eos = true + if d.Decompressed() > d.size { + return errSize + } + if !d.rd.possiblyAtEnd() { + switch _, err = d.readOp(); err { + case nil: + return errSize + case io.EOF: + return io.ErrUnexpectedEOF + case errEOS: + break + default: + return err + } + } + return io.EOF + } + } + return nil +} + +// Errors that may be returned while decoding data. +var ( + errDataAfterEOS = errors.New("lzma: data after end of stream marker") + errSize = errors.New("lzma: wrong uncompressed data size") +) + +// Read reads data from the buffer. If no more data is available io.EOF is +// returned. +func (d *decoder) Read(p []byte) (n int, err error) { + var k int + for { + // Read of decoder dict never returns an error. + k, err = d.Dict.Read(p[n:]) + if err != nil { + panic(fmt.Errorf("dictionary read error %s", err)) + } + if k == 0 && d.eos { + return n, io.EOF + } + n += k + if n >= len(p) { + return n, nil + } + if err = d.decompress(); err != nil && err != io.EOF { + return n, err + } + } +} + +// Decompressed returns the number of bytes decompressed by the decoder. +func (d *decoder) Decompressed() int64 { + return d.Dict.pos() - d.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go new file mode 100644 index 00000000..564a12b8 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go @@ -0,0 +1,135 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// decoderDict provides the dictionary for the decoder. The whole +// dictionary is used as reader buffer. +type decoderDict struct { + buf buffer + head int64 +} + +// newDecoderDict creates a new decoder dictionary. The whole dictionary +// will be used as reader buffer. +func newDecoderDict(dictCap int) (d *decoderDict, err error) { + // lower limit supports easy test cases + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New("lzma: dictCap out of range") + } + d = &decoderDict{buf: *newBuffer(dictCap)} + return d, nil +} + +// Reset clears the dictionary. The read buffer is not changed, so the +// buffered data can still be read. +func (d *decoderDict) Reset() { + d.head = 0 +} + +// WriteByte writes a single byte into the dictionary. It is used to +// write literals into the dictionary. +func (d *decoderDict) WriteByte(c byte) error { + if err := d.buf.WriteByte(c); err != nil { + return err + } + d.head++ + return nil +} + +// pos returns the position of the dictionary head. +func (d *decoderDict) pos() int64 { return d.head } + +// dictLen returns the actual length of the dictionary. +func (d *decoderDict) dictLen() int { + capacity := d.buf.Cap() + if d.head >= int64(capacity) { + return capacity + } + return int(d.head) +} + +// byteAt returns a byte stored in the dictionary. If the distance is +// non-positive or exceeds the current length of the dictionary the zero +// byte is returned. +func (d *decoderDict) byteAt(dist int) byte { + if !(0 < dist && dist <= d.dictLen()) { + return 0 + } + i := d.buf.front - dist + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// writeMatch writes the match at the top of the dictionary. The given +// distance must point in the current dictionary and the length must not +// exceed the maximum length 273 supported in LZMA. +// +// The error value ErrNoSpace indicates that no space is available in +// the dictionary for writing. You need to read from the dictionary +// first. +func (d *decoderDict) writeMatch(dist int64, length int) error { + if !(0 < dist && dist <= int64(d.dictLen())) { + return errors.New("writeMatch: distance out of range") + } + if !(0 < length && length <= maxMatchLen) { + return errors.New("writeMatch: length out of range") + } + if length > d.buf.Available() { + return ErrNoSpace + } + d.head += int64(length) + + i := d.buf.front - int(dist) + if i < 0 { + i += len(d.buf.data) + } + for length > 0 { + var p []byte + if i >= d.buf.front { + p = d.buf.data[i:] + i = 0 + } else { + p = d.buf.data[i:d.buf.front] + i = d.buf.front + } + if len(p) > length { + p = p[:length] + } + if _, err := d.buf.Write(p); err != nil { + panic(fmt.Errorf("d.buf.Write returned error %s", err)) + } + length -= len(p) + } + return nil +} + +// Write writes the given bytes into the dictionary and advances the +// head. +func (d *decoderDict) Write(p []byte) (n int, err error) { + n, err = d.buf.Write(p) + d.head += int64(n) + return n, err +} + +// Available returns the number of available bytes for writing into the +// decoder dictionary. +func (d *decoderDict) Available() int { return d.buf.Available() } + +// Read reads data from the buffer contained in the decoder dictionary. +func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) } + +// Buffered returns the number of bytes currently buffered in the +// decoder dictionary. +func (d *decoderDict) buffered() int { return d.buf.Buffered() } + +// Peek gets data from the buffer without advancing the rear index. +func (d *decoderDict) peek(p []byte) (n int, err error) { return d.buf.Peek(p) } diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go new file mode 100644 index 00000000..e08eb989 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go @@ -0,0 +1,49 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "fmt" + +// directCodec allows the encoding and decoding of values with a fixed number +// of bits. The number of bits must be in the range [1,32]. +type directCodec byte + +// makeDirectCodec creates a directCodec. The function panics if the number of +// bits is not in the range [1,32]. +func makeDirectCodec(bits int) directCodec { + if !(1 <= bits && bits <= 32) { + panic(fmt.Errorf("bits=%d out of range", bits)) + } + return directCodec(bits) +} + +// Bits returns the number of bits supported by this codec. +func (dc directCodec) Bits() int { + return int(dc) +} + +// Encode uses the range encoder to encode a value with the fixed number of +// bits. The most-significant bit is encoded first. +func (dc directCodec) Encode(e *rangeEncoder, v uint32) error { + for i := int(dc) - 1; i >= 0; i-- { + if err := e.DirectEncodeBit(v >> uint(i)); err != nil { + return err + } + } + return nil +} + +// Decode uses the range decoder to decode a value with the given number of +// given bits. The most-significant bit is decoded first. +func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) { + for i := int(dc) - 1; i >= 0; i-- { + x, err := d.DirectDecodeBit() + if err != nil { + return 0, err + } + v = (v << 1) | x + } + return v, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go new file mode 100644 index 00000000..b053a2dc --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go @@ -0,0 +1,156 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// Constants used by the distance codec. +const ( + // minimum supported distance + minDistance = 1 + // maximum supported distance, value is used for the eos marker. + maxDistance = 1 << 32 + // number of the supported len states + lenStates = 4 + // start for the position models + startPosModel = 4 + // first index with align bits support + endPosModel = 14 + // bits for the position slots + posSlotBits = 6 + // number of align bits + alignBits = 4 + // maximum position slot + maxPosSlot = 63 +) + +// distCodec provides encoding and decoding of distance values. +type distCodec struct { + posSlotCodecs [lenStates]treeCodec + posModel [endPosModel - startPosModel]treeReverseCodec + alignCodec treeReverseCodec +} + +// deepcopy initializes dc as deep copy of the source. +func (dc *distCodec) deepcopy(src *distCodec) { + if dc == src { + return + } + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i]) + } + for i := range dc.posModel { + dc.posModel[i].deepcopy(&src.posModel[i]) + } + dc.alignCodec.deepcopy(&src.alignCodec) +} + +// distBits returns the number of bits required to encode dist. +func distBits(dist uint32) int { + if dist < startPosModel { + return 6 + } + // slot s > 3, dist d + // s = 2(bits(d)-1) + bit(d, bits(d)-2) + // s>>1 = bits(d)-1 + // bits(d) = 32-nlz32(d) + // s>>1=31-nlz32(d) + // n = 5 + (s>>1) = 36 - nlz32(d) + return 36 - nlz32(dist) +} + +// newDistCodec creates a new distance codec. +func (dc *distCodec) init() { + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits) + } + for i := range dc.posModel { + posSlot := startPosModel + i + bits := (posSlot >> 1) - 1 + dc.posModel[i] = makeTreeReverseCodec(bits) + } + dc.alignCodec = makeTreeReverseCodec(alignBits) +} + +// lenState converts the value l to a supported lenState value. +func lenState(l uint32) uint32 { + if l >= lenStates { + l = lenStates - 1 + } + return l +} + +// Encode encodes the distance using the parameter l. Dist can have values from +// the full range of uint32 values. To get the distance offset the actual match +// distance has to be decreased by 1. A distance offset of 0xffffffff (eos) +// indicates the end of the stream. +func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) { + // Compute the posSlot using nlz32 + var posSlot uint32 + var bits uint32 + if dist < startPosModel { + posSlot = dist + } else { + bits = uint32(30 - nlz32(dist)) + posSlot = startPosModel - 2 + (bits << 1) + posSlot += (dist >> uint(bits)) & 1 + } + + if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil { + return + } + + switch { + case posSlot < startPosModel: + return nil + case posSlot < endPosModel: + tc := &dc.posModel[posSlot-startPosModel] + return tc.Encode(dist, e) + } + dic := directCodec(bits - alignBits) + if err = dic.Encode(e, dist>>alignBits); err != nil { + return + } + return dc.alignCodec.Encode(dist, e) +} + +// Decode decodes the distance offset using the parameter l. The dist value +// 0xffffffff (eos) indicates the end of the stream. Add one to the distance +// offset to get the actual match distance. +func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) { + posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d) + if err != nil { + return + } + + // posSlot equals distance + if posSlot < startPosModel { + return posSlot, nil + } + + // posSlot uses the individual models + bits := (posSlot >> 1) - 1 + dist = (2 | (posSlot & 1)) << bits + var u uint32 + if posSlot < endPosModel { + tc := &dc.posModel[posSlot-startPosModel] + if u, err = tc.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil + } + + // posSlots use direct encoding and a single model for the four align + // bits. + dic := directCodec(bits - alignBits) + if u, err = dic.Decode(d); err != nil { + return 0, err + } + dist += u << alignBits + if u, err = dc.alignCodec.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/vendor/github.com/ulikunitz/xz/lzma/encoder.go new file mode 100644 index 00000000..18ce0099 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoder.go @@ -0,0 +1,268 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "fmt" + "io" +) + +// opLenMargin provides the upper limit of the number of bytes required +// to encode a single operation. +const opLenMargin = 10 + +// compressFlags control the compression process. +type compressFlags uint32 + +// Values for compressFlags. +const ( + // all data should be compressed, even if compression is not + // optimal. + all compressFlags = 1 << iota +) + +// encoderFlags provide the flags for an encoder. +type encoderFlags uint32 + +// Flags for the encoder. +const ( + // eosMarker requests an EOS marker to be written. + eosMarker encoderFlags = 1 << iota +) + +// Encoder compresses data buffered in the encoder dictionary and writes +// it into a byte writer. +type encoder struct { + dict *encoderDict + state *state + re *rangeEncoder + start int64 + // generate eos marker + marker bool + limit bool + margin int +} + +// newEncoder creates a new encoder. If the byte writer must be +// limited use LimitedByteWriter provided by this package. The flags +// argument supports the eosMarker flag, controlling whether a +// terminating end-of-stream marker must be written. +func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict, + flags encoderFlags) (e *encoder, err error) { + + re, err := newRangeEncoder(bw) + if err != nil { + return nil, err + } + e = &encoder{ + dict: dict, + state: state, + re: re, + marker: flags&eosMarker != 0, + start: dict.Pos(), + margin: opLenMargin, + } + if e.marker { + e.margin += 5 + } + return e, nil +} + +// Write writes the bytes from p into the dictionary. If not enough +// space is available the data in the dictionary buffer will be +// compressed to make additional space available. If the limit of the +// underlying writer has been reached ErrLimit will be returned. +func (e *encoder) Write(p []byte) (n int, err error) { + for { + k, err := e.dict.Write(p[n:]) + n += k + if err == ErrNoSpace { + if err = e.compress(0); err != nil { + return n, err + } + continue + } + return n, err + } +} + +// Reopen reopens the encoder with a new byte writer. +func (e *encoder) Reopen(bw io.ByteWriter) error { + var err error + if e.re, err = newRangeEncoder(bw); err != nil { + return err + } + e.start = e.dict.Pos() + e.limit = false + return nil +} + +// writeLiteral writes a literal into the LZMA stream +func (e *encoder) writeLiteral(l lit) error { + var err error + state, state2, _ := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil { + return err + } + litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos()) + match := e.dict.ByteAt(int(e.state.rep[0]) + 1) + err = e.state.litCodec.Encode(e.re, l.b, state, match, litState) + if err != nil { + return err + } + e.state.updateStateLiteral() + return nil +} + +// iverson implements the Iverson operator as proposed by Donald Knuth in his +// book Concrete Mathematics. +func iverson(ok bool) uint32 { + if ok { + return 1 + } + return 0 +} + +// writeMatch writes a repetition operation into the operation stream +func (e *encoder) writeMatch(m match) error { + var err error + if !(minDistance <= m.distance && m.distance <= maxDistance) { + panic(fmt.Errorf("match distance %d out of range", m.distance)) + } + dist := uint32(m.distance - minDistance) + if !(minMatchLen <= m.n && m.n <= maxMatchLen) && + !(dist == e.state.rep[0] && m.n == 1) { + panic(fmt.Errorf( + "match length %d out of range; dist %d rep[0] %d", + m.n, dist, e.state.rep[0])) + } + state, state2, posState := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil { + return err + } + g := 0 + for ; g < 4; g++ { + if e.state.rep[g] == dist { + break + } + } + b := iverson(g < 4) + if err = e.state.isRep[state].Encode(e.re, b); err != nil { + return err + } + n := uint32(m.n - minMatchLen) + if b == 0 { + // simple match + e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] = + e.state.rep[2], e.state.rep[1], e.state.rep[0], dist + e.state.updateStateMatch() + if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil { + return err + } + return e.state.distCodec.Encode(e.re, dist, n) + } + b = iverson(g != 0) + if err = e.state.isRepG0[state].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + // g == 0 + b = iverson(m.n != 1) + if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + e.state.updateStateShortRep() + return nil + } + } else { + // g in {1,2,3} + b = iverson(g != 1) + if err = e.state.isRepG1[state].Encode(e.re, b); err != nil { + return err + } + if b == 1 { + // g in {2,3} + b = iverson(g != 2) + err = e.state.isRepG2[state].Encode(e.re, b) + if err != nil { + return err + } + if b == 1 { + e.state.rep[3] = e.state.rep[2] + } + e.state.rep[2] = e.state.rep[1] + } + e.state.rep[1] = e.state.rep[0] + e.state.rep[0] = dist + } + e.state.updateStateRep() + return e.state.repLenCodec.Encode(e.re, n, posState) +} + +// writeOp writes a single operation to the range encoder. The function +// checks whether there is enough space available to close the LZMA +// stream. +func (e *encoder) writeOp(op operation) error { + if e.re.Available() < int64(e.margin) { + return ErrLimit + } + switch x := op.(type) { + case lit: + return e.writeLiteral(x) + case match: + return e.writeMatch(x) + default: + panic("unexpected operation") + } +} + +// compress compressed data from the dictionary buffer. If the flag all +// is set, all data in the dictionary buffer will be compressed. The +// function returns ErrLimit if the underlying writer has reached its +// limit. +func (e *encoder) compress(flags compressFlags) error { + n := 0 + if flags&all == 0 { + n = maxMatchLen - 1 + } + d := e.dict + m := d.m + for d.Buffered() > n { + op := m.NextOp(e.state.rep) + if err := e.writeOp(op); err != nil { + return err + } + d.Discard(op.Len()) + } + return nil +} + +// eosMatch is a pseudo operation that indicates the end of the stream. +var eosMatch = match{distance: maxDistance, n: minMatchLen} + +// Close terminates the LZMA stream. If requested the end-of-stream +// marker will be written. If the byte writer limit has been or will be +// reached during compression of the remaining data in the buffer the +// LZMA stream will be closed and data will remain in the buffer. +func (e *encoder) Close() error { + err := e.compress(all) + if err != nil && err != ErrLimit { + return err + } + if e.marker { + if err := e.writeMatch(eosMatch); err != nil { + return err + } + } + err = e.re.Close() + return err +} + +// Compressed returns the number bytes of the input data that been +// compressed. +func (e *encoder) Compressed() int64 { + return e.dict.Pos() - e.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go new file mode 100644 index 00000000..9d0fbc70 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go @@ -0,0 +1,149 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// matcher is an interface that supports the identification of the next +// operation. +type matcher interface { + io.Writer + SetDict(d *encoderDict) + NextOp(rep [4]uint32) operation +} + +// encoderDict provides the dictionary of the encoder. It includes an +// addtional buffer atop of the actual dictionary. +type encoderDict struct { + buf buffer + m matcher + head int64 + capacity int + // preallocated array + data [maxMatchLen]byte +} + +// newEncoderDict creates the encoder dictionary. The argument bufSize +// defines the size of the additional buffer. +func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) { + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New( + "lzma: dictionary capacity out of range") + } + if bufSize < 1 { + return nil, errors.New( + "lzma: buffer size must be larger than zero") + } + d = &encoderDict{ + buf: *newBuffer(dictCap + bufSize), + capacity: dictCap, + m: m, + } + m.SetDict(d) + return d, nil +} + +// Discard discards n bytes. Note that n must not be larger than +// MaxMatchLen. +func (d *encoderDict) Discard(n int) { + p := d.data[:n] + k, _ := d.buf.Read(p) + if k < n { + panic(fmt.Errorf("lzma: can't discard %d bytes", n)) + } + d.head += int64(n) + d.m.Write(p) +} + +// Len returns the data available in the encoder dictionary. +func (d *encoderDict) Len() int { + n := d.buf.Available() + if int64(n) > d.head { + return int(d.head) + } + return n +} + +// DictLen returns the actual length of data in the dictionary. +func (d *encoderDict) DictLen() int { + if d.head < int64(d.capacity) { + return int(d.head) + } + return d.capacity +} + +// Available returns the number of bytes that can be written by a +// following Write call. +func (d *encoderDict) Available() int { + return d.buf.Available() - d.DictLen() +} + +// Write writes data into the dictionary buffer. Note that the position +// of the dictionary head will not be moved. If there is not enough +// space in the buffer ErrNoSpace will be returned. +func (d *encoderDict) Write(p []byte) (n int, err error) { + m := d.Available() + if len(p) > m { + p = p[:m] + err = ErrNoSpace + } + var e error + if n, e = d.buf.Write(p); e != nil { + err = e + } + return n, err +} + +// Pos returns the position of the head. +func (d *encoderDict) Pos() int64 { return d.head } + +// ByteAt returns the byte at the given distance. +func (d *encoderDict) ByteAt(distance int) byte { + if !(0 < distance && distance <= d.Len()) { + return 0 + } + i := d.buf.rear - distance + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// CopyN copies the last n bytes from the dictionary into the provided +// writer. This is used for copying uncompressed data into an +// uncompressed segment. +func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) { + if n <= 0 { + return 0, nil + } + m := d.Len() + if n > m { + n = m + err = ErrNoSpace + } + i := d.buf.rear - n + var e error + if i < 0 { + i += len(d.buf.data) + if written, e = w.Write(d.buf.data[i:]); e != nil { + return written, e + } + i = 0 + } + var k int + k, e = w.Write(d.buf.data[i:d.buf.rear]) + written += k + if e != nil { + err = e + } + return written, err +} + +// Buffered returns the number of bytes in the buffer. +func (d *encoderDict) Buffered() int { return d.buf.Buffered() } diff --git a/vendor/github.com/ulikunitz/xz/lzma/fox.lzma b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma new file mode 100644 index 0000000000000000000000000000000000000000..5edad633266eb5173a7c39761dc8b9e71efbfe80 GIT binary patch literal 67 zcma!LU}#|Y4+RWbQXGqzRntCtR~%i$`d{za%}WYWYfXMUl6~Q5_UjH?=5CuO0w(I5 UuQ#VXelz{mI_3ZW`W7$%0HEw6g#Z8m literal 0 HcmV?d00001 diff --git a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go new file mode 100644 index 00000000..d786a974 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go @@ -0,0 +1,309 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + + "github.com/ulikunitz/xz/internal/hash" +) + +/* For compression we need to find byte sequences that match the byte + * sequence at the dictionary head. A hash table is a simple method to + * provide this capability. + */ + +// maxMatches limits the number of matches requested from the Matches +// function. This controls the speed of the overall encoding. +const maxMatches = 16 + +// shortDists defines the number of short distances supported by the +// implementation. +const shortDists = 8 + +// The minimum is somehow arbitrary but the maximum is limited by the +// memory requirements of the hash table. +const ( + minTableExponent = 9 + maxTableExponent = 20 +) + +// newRoller contains the function used to create an instance of the +// hash.Roller. +var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) } + +// hashTable stores the hash table including the rolling hash method. +// +// We implement chained hashing into a circular buffer. Each entry in +// the circular buffer stores the delta distance to the next position with a +// word that has the same hash value. +type hashTable struct { + dict *encoderDict + // actual hash table + t []int64 + // circular list data with the offset to the next word + data []uint32 + front int + // mask for computing the index for the hash table + mask uint64 + // hash offset; initial value is -int64(wordLen) + hoff int64 + // length of the hashed word + wordLen int + // hash roller for computing the hash values for the Write + // method + wr hash.Roller + // hash roller for computing arbitrary hashes + hr hash.Roller + // preallocated slices + p [maxMatches]int64 + distances [maxMatches + shortDists]int +} + +// hashTableExponent derives the hash table exponent from the dictionary +// capacity. +func hashTableExponent(n uint32) int { + e := 30 - nlz32(n) + switch { + case e < minTableExponent: + e = minTableExponent + case e > maxTableExponent: + e = maxTableExponent + } + return e +} + +// newHashTable creates a new hash table for words of length wordLen +func newHashTable(capacity int, wordLen int) (t *hashTable, err error) { + if !(0 < capacity) { + return nil, errors.New( + "newHashTable: capacity must not be negative") + } + exp := hashTableExponent(uint32(capacity)) + if !(1 <= wordLen && wordLen <= 4) { + return nil, errors.New("newHashTable: " + + "argument wordLen out of range") + } + n := 1 << uint(exp) + if n <= 0 { + panic("newHashTable: exponent is too large") + } + t = &hashTable{ + t: make([]int64, n), + data: make([]uint32, capacity), + mask: (uint64(1) << uint(exp)) - 1, + hoff: -int64(wordLen), + wordLen: wordLen, + wr: newRoller(wordLen), + hr: newRoller(wordLen), + } + return t, nil +} + +func (t *hashTable) SetDict(d *encoderDict) { t.dict = d } + +// buffered returns the number of bytes that are currently hashed. +func (t *hashTable) buffered() int { + n := t.hoff + 1 + switch { + case n <= 0: + return 0 + case n >= int64(len(t.data)): + return len(t.data) + } + return int(n) +} + +// addIndex adds n to an index ensuring that is stays inside the +// circular buffer for the hash chain. +func (t *hashTable) addIndex(i, n int) int { + i += n - len(t.data) + if i < 0 { + i += len(t.data) + } + return i +} + +// putDelta puts the delta instance at the current front of the circular +// chain buffer. +func (t *hashTable) putDelta(delta uint32) { + t.data[t.front] = delta + t.front = t.addIndex(t.front, 1) +} + +// putEntry puts a new entry into the hash table. If there is already a +// value stored it is moved into the circular chain buffer. +func (t *hashTable) putEntry(h uint64, pos int64) { + if pos < 0 { + return + } + i := h & t.mask + old := t.t[i] - 1 + t.t[i] = pos + 1 + var delta int64 + if old >= 0 { + delta = pos - old + if delta > 1<<32-1 || delta > int64(t.buffered()) { + delta = 0 + } + } + t.putDelta(uint32(delta)) +} + +// WriteByte converts a single byte into a hash and puts them into the hash +// table. +func (t *hashTable) WriteByte(b byte) error { + h := t.wr.RollByte(b) + t.hoff++ + t.putEntry(h, t.hoff) + return nil +} + +// Write converts the bytes provided into hash tables and stores the +// abbreviated offsets into the hash table. The method will never return an +// error. +func (t *hashTable) Write(p []byte) (n int, err error) { + for _, b := range p { + // WriteByte doesn't generate an error. + t.WriteByte(b) + } + return len(p), nil +} + +// getMatches the matches for a specific hash. The functions returns the +// number of positions found. +// +// TODO: Make a getDistances because that we are actually interested in. +func (t *hashTable) getMatches(h uint64, positions []int64) (n int) { + if t.hoff < 0 || len(positions) == 0 { + return 0 + } + buffered := t.buffered() + tailPos := t.hoff + 1 - int64(buffered) + rear := t.front - buffered + if rear >= 0 { + rear -= len(t.data) + } + // get the slot for the hash + pos := t.t[h&t.mask] - 1 + delta := pos - tailPos + for { + if delta < 0 { + return n + } + positions[n] = tailPos + delta + n++ + if n >= len(positions) { + return n + } + i := rear + int(delta) + if i < 0 { + i += len(t.data) + } + u := t.data[i] + if u == 0 { + return n + } + delta -= int64(u) + } +} + +// hash computes the rolling hash for the word stored in p. For correct +// results its length must be equal to t.wordLen. +func (t *hashTable) hash(p []byte) uint64 { + var h uint64 + for _, b := range p { + h = t.hr.RollByte(b) + } + return h +} + +// Matches fills the positions slice with potential matches. The +// functions returns the number of positions filled into positions. The +// byte slice p must have word length of the hash table. +func (t *hashTable) Matches(p []byte, positions []int64) int { + if len(p) != t.wordLen { + panic(fmt.Errorf( + "byte slice must have length %d", t.wordLen)) + } + h := t.hash(p) + return t.getMatches(h, positions) +} + +// NextOp identifies the next operation using the hash table. +// +// TODO: Use all repetitions to find matches. +func (t *hashTable) NextOp(rep [4]uint32) operation { + // get positions + data := t.dict.data[:maxMatchLen] + n, _ := t.dict.buf.Peek(data) + data = data[:n] + var p []int64 + if n < t.wordLen { + p = t.p[:0] + } else { + p = t.p[:maxMatches] + n = t.Matches(data[:t.wordLen], p) + p = p[:n] + } + + // convert positions in potential distances + head := t.dict.head + dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8) + for _, pos := range p { + dis := int(head - pos) + if dis > shortDists { + dists = append(dists, dis) + } + } + + // check distances + var m match + dictLen := t.dict.DictLen() + for _, dist := range dists { + if dist > dictLen { + continue + } + + // Here comes a trick. We are only interested in matches + // that are longer than the matches we have been found + // before. So before we test the whole byte sequence at + // the given distance, we test the first byte that would + // make the match longer. If it doesn't match the byte + // to match, we don't to care any longer. + i := t.dict.buf.rear - dist + m.n + if i < 0 { + i += len(t.dict.buf.data) + } + if t.dict.buf.data[i] != data[m.n] { + // We can't get a longer match. Jump to the next + // distance. + continue + } + + n := t.dict.buf.matchLen(dist, data) + switch n { + case 0: + continue + case 1: + if uint32(dist-minDistance) != rep[0] { + continue + } + } + if n > m.n { + m = match{int64(dist), n} + if n == len(data) { + // No better match will be found. + break + } + } + } + + if m.n == 0 { + return lit{data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go new file mode 100644 index 00000000..bc708969 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header.go @@ -0,0 +1,167 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// uint32LE reads an uint32 integer from a byte slice +func uint32LE(b []byte) uint32 { + x := uint32(b[3]) << 24 + x |= uint32(b[2]) << 16 + x |= uint32(b[1]) << 8 + x |= uint32(b[0]) + return x +} + +// uint64LE converts the uint64 value stored as little endian to an uint64 +// value. +func uint64LE(b []byte) uint64 { + x := uint64(b[7]) << 56 + x |= uint64(b[6]) << 48 + x |= uint64(b[5]) << 40 + x |= uint64(b[4]) << 32 + x |= uint64(b[3]) << 24 + x |= uint64(b[2]) << 16 + x |= uint64(b[1]) << 8 + x |= uint64(b[0]) + return x +} + +// putUint32LE puts an uint32 integer into a byte slice that must have at least +// a length of 4 bytes. +func putUint32LE(b []byte, x uint32) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) +} + +// putUint64LE puts the uint64 value into the byte slice as little endian +// value. The byte slice b must have at least place for 8 bytes. +func putUint64LE(b []byte, x uint64) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) + b[4] = byte(x >> 32) + b[5] = byte(x >> 40) + b[6] = byte(x >> 48) + b[7] = byte(x >> 56) +} + +// noHeaderSize defines the value of the length field in the LZMA header. +const noHeaderSize uint64 = 1<<64 - 1 + +// HeaderLen provides the length of the LZMA file header. +const HeaderLen = 13 + +// header represents the header of an LZMA file. +type header struct { + properties Properties + dictCap int + // uncompressed size; negative value if no size is given + size int64 +} + +// marshalBinary marshals the header. +func (h *header) marshalBinary() (data []byte, err error) { + if err = h.properties.verify(); err != nil { + return nil, err + } + if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) { + return nil, fmt.Errorf("lzma: DictCap %d out of range", + h.dictCap) + } + + data = make([]byte, 13) + + // property byte + data[0] = h.properties.Code() + + // dictionary capacity + putUint32LE(data[1:5], uint32(h.dictCap)) + + // uncompressed size + var s uint64 + if h.size > 0 { + s = uint64(h.size) + } else { + s = noHeaderSize + } + putUint64LE(data[5:], s) + + return data, nil +} + +// unmarshalBinary unmarshals the header. +func (h *header) unmarshalBinary(data []byte) error { + if len(data) != HeaderLen { + return errors.New("lzma.unmarshalBinary: data has wrong length") + } + + // properties + var err error + if h.properties, err = PropertiesForCode(data[0]); err != nil { + return err + } + + // dictionary capacity + h.dictCap = int(uint32LE(data[1:])) + if h.dictCap < 0 { + return errors.New( + "LZMA header: dictionary capacity exceeds maximum " + + "integer") + } + + // uncompressed size + s := uint64LE(data[5:]) + if s == noHeaderSize { + h.size = -1 + } else { + h.size = int64(s) + if h.size < 0 { + return errors.New( + "LZMA header: uncompressed size " + + "out of int64 range") + } + } + + return nil +} + +// validDictCap checks whether the dictionary capacity is correct. This +// is used to weed out wrong file headers. +func validDictCap(dictcap int) bool { + if int64(dictcap) == MaxDictCap { + return true + } + for n := uint(10); n < 32; n++ { + if dictcap == 1<= 10 or 2^32-1. If +// there is an explicit size it must not exceed 256 GiB. The length of +// the data argument must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + if err := h.unmarshalBinary(data); err != nil { + return false + } + if !validDictCap(h.dictCap) { + return false + } + return h.size < 0 || h.size <= 1<<38 +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go new file mode 100644 index 00000000..ac6a71a5 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go @@ -0,0 +1,398 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +const ( + // maximum size of compressed data in a chunk + maxCompressed = 1 << 16 + // maximum size of uncompressed data in a chunk + maxUncompressed = 1 << 21 +) + +// chunkType represents the type of an LZMA2 chunk. Note that this +// value is an internal representation and no actual encoding of a LZMA2 +// chunk header. +type chunkType byte + +// Possible values for the chunk type. +const ( + // end of stream + cEOS chunkType = iota + // uncompressed; reset dictionary + cUD + // uncompressed; no reset of dictionary + cU + // LZMA compressed; no reset + cL + // LZMA compressed; reset state + cLR + // LZMA compressed; reset state; new property value + cLRN + // LZMA compressed; reset state; new property value; reset dictionary + cLRND +) + +// chunkTypeStrings provide a string representation for the chunk types. +var chunkTypeStrings = [...]string{ + cEOS: "EOS", + cU: "U", + cUD: "UD", + cL: "L", + cLR: "LR", + cLRN: "LRN", + cLRND: "LRND", +} + +// String returns a string representation of the chunk type. +func (c chunkType) String() string { + if !(cEOS <= c && c <= cLRND) { + return "unknown" + } + return chunkTypeStrings[c] +} + +// Actual encodings for the chunk types in the value. Note that the high +// uncompressed size bits are stored in the header byte additionally. +const ( + hEOS = 0 + hUD = 1 + hU = 2 + hL = 1 << 7 + hLR = 1<<7 | 1<<5 + hLRN = 1<<7 | 1<<6 + hLRND = 1<<7 | 1<<6 | 1<<5 +) + +// errHeaderByte indicates an unsupported value for the chunk header +// byte. These bytes starts the variable-length chunk header. +var errHeaderByte = errors.New("lzma: unsupported chunk header byte") + +// headerChunkType converts the header byte into a chunk type. It +// ignores the uncompressed size bits in the chunk header byte. +func headerChunkType(h byte) (c chunkType, err error) { + if h&hL == 0 { + // no compression + switch h { + case hEOS: + c = cEOS + case hUD: + c = cUD + case hU: + c = cU + default: + return 0, errHeaderByte + } + return + } + switch h & hLRND { + case hL: + c = cL + case hLR: + c = cLR + case hLRN: + c = cLRN + case hLRND: + c = cLRND + default: + return 0, errHeaderByte + } + return +} + +// uncompressedHeaderLen provides the length of an uncompressed header +const uncompressedHeaderLen = 3 + +// headerLen returns the length of the LZMA2 header for a given chunk +// type. +func headerLen(c chunkType) int { + switch c { + case cEOS: + return 1 + case cU, cUD: + return uncompressedHeaderLen + case cL, cLR: + return 5 + case cLRN, cLRND: + return 6 + } + panic(fmt.Errorf("unsupported chunk type %d", c)) +} + +// chunkHeader represents the contents of a chunk header. +type chunkHeader struct { + ctype chunkType + uncompressed uint32 + compressed uint16 + props Properties +} + +// String returns a string representation of the chunk header. +func (h *chunkHeader) String() string { + return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed, + h.compressed, &h.props) +} + +// UnmarshalBinary reads the content of the chunk header from the data +// slice. The slice must have the correct length. +func (h *chunkHeader) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return errors.New("no data") + } + c, err := headerChunkType(data[0]) + if err != nil { + return err + } + + n := headerLen(c) + if len(data) < n { + return errors.New("incomplete data") + } + if len(data) > n { + return errors.New("invalid data length") + } + + *h = chunkHeader{ctype: c} + if c == cEOS { + return nil + } + + h.uncompressed = uint32(uint16BE(data[1:3])) + if c <= cU { + return nil + } + h.uncompressed |= uint32(data[0]&^hLRND) << 16 + + h.compressed = uint16BE(data[3:5]) + if c <= cLR { + return nil + } + + h.props, err = PropertiesForCode(data[5]) + return err +} + +// MarshalBinary encodes the chunk header value. The function checks +// whether the content of the chunk header is correct. +func (h *chunkHeader) MarshalBinary() (data []byte, err error) { + if h.ctype > cLRND { + return nil, errors.New("invalid chunk type") + } + if err = h.props.verify(); err != nil { + return nil, err + } + + data = make([]byte, headerLen(h.ctype)) + + switch h.ctype { + case cEOS: + return data, nil + case cUD: + data[0] = hUD + case cU: + data[0] = hU + case cL: + data[0] = hL + case cLR: + data[0] = hLR + case cLRN: + data[0] = hLRN + case cLRND: + data[0] = hLRND + } + + putUint16BE(data[1:3], uint16(h.uncompressed)) + if h.ctype <= cU { + return data, nil + } + data[0] |= byte(h.uncompressed>>16) &^ hLRND + + putUint16BE(data[3:5], h.compressed) + if h.ctype <= cLR { + return data, nil + } + + data[5] = h.props.Code() + return data, nil +} + +// readChunkHeader reads the chunk header from the IO reader. +func readChunkHeader(r io.Reader) (h *chunkHeader, err error) { + p := make([]byte, 1, 6) + if _, err = io.ReadFull(r, p); err != nil { + return + } + c, err := headerChunkType(p[0]) + if err != nil { + return + } + p = p[:headerLen(c)] + if _, err = io.ReadFull(r, p[1:]); err != nil { + return + } + h = new(chunkHeader) + if err = h.UnmarshalBinary(p); err != nil { + return nil, err + } + return h, nil +} + +// uint16BE converts a big-endian uint16 representation to an uint16 +// value. +func uint16BE(p []byte) uint16 { + return uint16(p[0])<<8 | uint16(p[1]) +} + +// putUint16BE puts the big-endian uint16 presentation into the given +// slice. +func putUint16BE(p []byte, x uint16) { + p[0] = byte(x >> 8) + p[1] = byte(x) +} + +// chunkState is used to manage the state of the chunks +type chunkState byte + +// start and stop define the initial and terminating state of the chunk +// state +const ( + start chunkState = 'S' + stop = 'T' +) + +// errors for the chunk state handling +var ( + errChunkType = errors.New("lzma: unexpected chunk type") + errState = errors.New("lzma: wrong chunk state") +) + +// next transitions state based on chunk type input +func (c *chunkState) next(ctype chunkType) error { + switch *c { + // start state + case 'S': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cLRND: + *c = 'L' + default: + return errChunkType + } + // normal LZMA mode + case 'L': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + *c = 'U' + case cL, cLR, cLRN, cLRND: + break + default: + return errChunkType + } + // reset required + case 'R': + switch ctype { + case cEOS: + *c = 'T' + case cUD, cU: + break + case cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // uncompressed + case 'U': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + break + case cL, cLR, cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // terminal state + case 'T': + return errChunkType + default: + return errState + } + return nil +} + +// defaultChunkType returns the default chunk type for each chunk state. +func (c chunkState) defaultChunkType() chunkType { + switch c { + case 'S': + return cLRND + case 'L', 'U': + return cL + case 'R': + return cLRN + default: + // no error + return cEOS + } +} + +// maxDictCap defines the maximum dictionary capacity supported by the +// LZMA2 dictionary capacity encoding. +const maxDictCap = 1<<32 - 1 + +// maxDictCapCode defines the maximum dictionary capacity code. +const maxDictCapCode = 40 + +// The function decodes the dictionary capacity byte, but doesn't change +// for the correct range of the given byte. +func decodeDictCap(c byte) int64 { + return (2 | int64(c)&1) << (11 + (c>>1)&0x1f) +} + +// DecodeDictCap decodes the encoded dictionary capacity. The function +// returns an error if the code is out of range. +func DecodeDictCap(c byte) (n int64, err error) { + if c >= maxDictCapCode { + if c == maxDictCapCode { + return maxDictCap, nil + } + return 0, errors.New("lzma: invalid dictionary size code") + } + return decodeDictCap(c), nil +} + +// EncodeDictCap encodes a dictionary capacity. The function returns the +// code for the capacity that is greater or equal n. If n exceeds the +// maximum support dictionary capacity, the maximum value is returned. +func EncodeDictCap(n int64) byte { + a, b := byte(0), byte(40) + for a < b { + c := a + (b-a)>>1 + m := decodeDictCap(c) + if n <= m { + if n == m { + return c + } + b = c + } else { + a = c + 1 + } + } + return a +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go new file mode 100644 index 00000000..e5177309 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go @@ -0,0 +1,129 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// maxPosBits defines the number of bits of the position value that are used to +// to compute the posState value. The value is used to select the tree codec +// for length encoding and decoding. +const maxPosBits = 4 + +// minMatchLen and maxMatchLen give the minimum and maximum values for +// encoding and decoding length values. minMatchLen is also used as base +// for the encoded length values. +const ( + minMatchLen = 2 + maxMatchLen = minMatchLen + 16 + 256 - 1 +) + +// lengthCodec support the encoding of the length value. +type lengthCodec struct { + choice [2]prob + low [1 << maxPosBits]treeCodec + mid [1 << maxPosBits]treeCodec + high treeCodec +} + +// deepcopy initializes the lc value as deep copy of the source value. +func (lc *lengthCodec) deepcopy(src *lengthCodec) { + if lc == src { + return + } + lc.choice = src.choice + for i := range lc.low { + lc.low[i].deepcopy(&src.low[i]) + } + for i := range lc.mid { + lc.mid[i].deepcopy(&src.mid[i]) + } + lc.high.deepcopy(&src.high) +} + +// init initializes a new length codec. +func (lc *lengthCodec) init() { + for i := range lc.choice { + lc.choice[i] = probInit + } + for i := range lc.low { + lc.low[i] = makeTreeCodec(3) + } + for i := range lc.mid { + lc.mid[i] = makeTreeCodec(3) + } + lc.high = makeTreeCodec(8) +} + +// lBits gives the number of bits used for the encoding of the l value +// provided to the range encoder. +func lBits(l uint32) int { + switch { + case l < 8: + return 4 + case l < 16: + return 5 + default: + return 10 + } +} + +// Encode encodes the length offset. The length offset l can be compute by +// subtracting minMatchLen (2) from the actual length. +// +// l = length - minMatchLen +// +func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32, +) (err error) { + if l > maxMatchLen-minMatchLen { + return errors.New("lengthCodec.Encode: l out of range") + } + if l < 8 { + if err = lc.choice[0].Encode(e, 0); err != nil { + return + } + return lc.low[posState].Encode(e, l) + } + if err = lc.choice[0].Encode(e, 1); err != nil { + return + } + if l < 16 { + if err = lc.choice[1].Encode(e, 0); err != nil { + return + } + return lc.mid[posState].Encode(e, l-8) + } + if err = lc.choice[1].Encode(e, 1); err != nil { + return + } + if err = lc.high.Encode(e, l-16); err != nil { + return + } + return nil +} + +// Decode reads the length offset. Add minMatchLen to compute the actual length +// to the length offset l. +func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32, +) (l uint32, err error) { + var b uint32 + if b, err = lc.choice[0].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.low[posState].Decode(d) + return + } + if b, err = lc.choice[1].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.mid[posState].Decode(d) + l += 8 + return + } + l, err = lc.high.Decode(d) + l += 16 + return +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go new file mode 100644 index 00000000..c949d6eb --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go @@ -0,0 +1,132 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// literalCodec supports the encoding of literal. It provides 768 probability +// values per literal state. The upper 512 probabilities are used with the +// context of a match bit. +type literalCodec struct { + probs []prob +} + +// deepcopy initializes literal codec c as a deep copy of the source. +func (c *literalCodec) deepcopy(src *literalCodec) { + if c == src { + return + } + c.probs = make([]prob, len(src.probs)) + copy(c.probs, src.probs) +} + +// init initializes the literal codec. +func (c *literalCodec) init(lc, lp int) { + switch { + case !(minLC <= lc && lc <= maxLC): + panic("lc out of range") + case !(minLP <= lp && lp <= maxLP): + panic("lp out of range") + } + c.probs = make([]prob, 0x300<= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + bit := (r >> 7) & 1 + r <<= 1 + i := ((1 + matchBit) << 8) | symbol + if err = probs[i].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit := (r >> 7) & 1 + r <<= 1 + if err = probs[symbol].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + } + return nil +} + +// Decode decodes a literal byte using the range decoder as well as the LZMA +// state, a match byte, and the literal state. +func (c *literalCodec) Decode(d *rangeDecoder, + state uint32, match byte, litState uint32, +) (s byte, err error) { + k := litState * 0x300 + probs := c.probs[k : k+0x300] + symbol := uint32(1) + if state >= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + i := ((1 + matchBit) << 8) | symbol + bit, err := d.DecodeBit(&probs[i]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit, err := d.DecodeBit(&probs[symbol]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + } + s = byte(symbol - 0x100) + return s, nil +} + +// minLC and maxLC define the range for LC values. +const ( + minLC = 0 + maxLC = 8 +) + +// minLC and maxLC define the range for LP values. +const ( + minLP = 0 + maxLP = 4 +) + +// minState and maxState define a range for the state values stored in +// the State values. +const ( + minState = 0 + maxState = 11 +) diff --git a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go new file mode 100644 index 00000000..4a244eb1 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go @@ -0,0 +1,52 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// MatchAlgorithm identifies an algorithm to find matches in the +// dictionary. +type MatchAlgorithm byte + +// Supported matcher algorithms. +const ( + HashTable4 MatchAlgorithm = iota + BinaryTree +) + +// maStrings are used by the String method. +var maStrings = map[MatchAlgorithm]string{ + HashTable4: "HashTable4", + BinaryTree: "BinaryTree", +} + +// String returns a string representation of the Matcher. +func (a MatchAlgorithm) String() string { + if s, ok := maStrings[a]; ok { + return s + } + return "unknown" +} + +var errUnsupportedMatchAlgorithm = errors.New( + "lzma: unsupported match algorithm value") + +// verify checks whether the matcher value is supported. +func (a MatchAlgorithm) verify() error { + if _, ok := maStrings[a]; !ok { + return errUnsupportedMatchAlgorithm + } + return nil +} + +func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) { + switch a { + case HashTable4: + return newHashTable(dictCap, 4) + case BinaryTree: + return newBinTree(dictCap) + } + return nil, errUnsupportedMatchAlgorithm +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go new file mode 100644 index 00000000..733bb99d --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go @@ -0,0 +1,80 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "unicode" +) + +// operation represents an operation on the dictionary during encoding or +// decoding. +type operation interface { + Len() int +} + +// rep represents a repetition at the given distance and the given length +type match struct { + // supports all possible distance values, including the eos marker + distance int64 + // length + n int +} + +// verify checks whether the match is valid. If that is not the case an +// error is returned. +func (m match) verify() error { + if !(minDistance <= m.distance && m.distance <= maxDistance) { + return errors.New("distance out of range") + } + if !(1 <= m.n && m.n <= maxMatchLen) { + return errors.New("length out of range") + } + return nil +} + +// l return the l-value for the match, which is the difference of length +// n and 2. +func (m match) l() uint32 { + return uint32(m.n - minMatchLen) +} + +// dist returns the dist value for the match, which is one less of the +// distance stored in the match. +func (m match) dist() uint32 { + return uint32(m.distance - minDistance) +} + +// Len returns the number of bytes matched. +func (m match) Len() int { + return m.n +} + +// String returns a string representation for the repetition. +func (m match) String() string { + return fmt.Sprintf("M{%d,%d}", m.distance, m.n) +} + +// lit represents a single byte literal. +type lit struct { + b byte +} + +// Len returns 1 for the single byte literal. +func (l lit) Len() int { + return 1 +} + +// String returns a string representation for the literal. +func (l lit) String() string { + var c byte + if unicode.IsPrint(rune(l.b)) { + c = l.b + } else { + c = '.' + } + return fmt.Sprintf("L{%c/%02x}", c, l.b) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/prob.go b/vendor/github.com/ulikunitz/xz/lzma/prob.go new file mode 100644 index 00000000..24d50ec6 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/prob.go @@ -0,0 +1,53 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// movebits defines the number of bits used for the updates of probability +// values. +const movebits = 5 + +// probbits defines the number of bits of a probability value. +const probbits = 11 + +// probInit defines 0.5 as initial value for prob values. +const probInit prob = 1 << (probbits - 1) + +// Type prob represents probabilities. The type can also be used to encode and +// decode single bits. +type prob uint16 + +// Dec decreases the probability. The decrease is proportional to the +// probability value. +func (p *prob) dec() { + *p -= *p >> movebits +} + +// Inc increases the probability. The Increase is proportional to the +// difference of 1 and the probability value. +func (p *prob) inc() { + *p += ((1 << probbits) - *p) >> movebits +} + +// Computes the new bound for a given range using the probability value. +func (p prob) bound(r uint32) uint32 { + return (r >> probbits) * uint32(p) +} + +// Bits returns 1. One is the number of bits that can be encoded or decoded +// with a single prob value. +func (p prob) Bits() int { + return 1 +} + +// Encode encodes the least-significant bit of v. Note that the p value will be +// changed. +func (p *prob) Encode(e *rangeEncoder, v uint32) error { + return e.EncodeBit(v, p) +} + +// Decode decodes a single bit. Note that the p value will change. +func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) { + return d.DecodeBit(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/properties.go b/vendor/github.com/ulikunitz/xz/lzma/properties.go new file mode 100644 index 00000000..23418e25 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/properties.go @@ -0,0 +1,69 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// maximum and minimum values for the LZMA properties. +const ( + minPB = 0 + maxPB = 4 +) + +// maxPropertyCode is the possible maximum of a properties code byte. +const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1 + +// Properties contains the parameters LC, LP and PB. The parameter LC +// defines the number of literal context bits; parameter LP the number +// of literal position bits and PB the number of position bits. +type Properties struct { + LC int + LP int + PB int +} + +// String returns the properties in a string representation. +func (p *Properties) String() string { + return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB) +} + +// PropertiesForCode converts a properties code byte into a Properties value. +func PropertiesForCode(code byte) (p Properties, err error) { + if code > maxPropertyCode { + return p, errors.New("lzma: invalid properties code") + } + p.LC = int(code % 9) + code /= 9 + p.LP = int(code % 5) + code /= 5 + p.PB = int(code % 5) + return p, err +} + +// verify checks the properties for correctness. +func (p *Properties) verify() error { + if p == nil { + return errors.New("lzma: properties are nil") + } + if !(minLC <= p.LC && p.LC <= maxLC) { + return errors.New("lzma: lc out of range") + } + if !(minLP <= p.LP && p.LP <= maxLP) { + return errors.New("lzma: lp out of range") + } + if !(minPB <= p.PB && p.PB <= maxPB) { + return errors.New("lzma: pb out of range") + } + return nil +} + +// Code converts the properties to a byte. The function assumes that +// the properties components are all in range. +func (p Properties) Code() byte { + return byte((p.PB*5+p.LP)*9 + p.LC) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go new file mode 100644 index 00000000..6361c5e7 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go @@ -0,0 +1,248 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// rangeEncoder implements range encoding of single bits. The low value can +// overflow therefore we need uint64. The cache value is used to handle +// overflows. +type rangeEncoder struct { + lbw *LimitedByteWriter + nrange uint32 + low uint64 + cacheLen int64 + cache byte +} + +// maxInt64 provides the maximal value of the int64 type +const maxInt64 = 1<<63 - 1 + +// newRangeEncoder creates a new range encoder. +func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) { + lbw, ok := bw.(*LimitedByteWriter) + if !ok { + lbw = &LimitedByteWriter{BW: bw, N: maxInt64} + } + return &rangeEncoder{ + lbw: lbw, + nrange: 0xffffffff, + cacheLen: 1}, nil +} + +// Available returns the number of bytes that still can be written. The +// method takes the bytes that will be currently written by Close into +// account. +func (e *rangeEncoder) Available() int64 { + return e.lbw.N - (e.cacheLen + 4) +} + +// writeByte writes a single byte to the underlying writer. An error is +// returned if the limit is reached. The written byte will be counted if +// the underlying writer doesn't return an error. +func (e *rangeEncoder) writeByte(c byte) error { + if e.Available() < 1 { + return ErrLimit + } + return e.lbw.WriteByte(c) +} + +// DirectEncodeBit encodes the least-significant bit of b with probability 1/2. +func (e *rangeEncoder) DirectEncodeBit(b uint32) error { + e.nrange >>= 1 + e.low += uint64(e.nrange) & (0 - (uint64(b) & 1)) + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// EncodeBit encodes the least significant bit of b. The p value will be +// updated by the function depending on the bit encoded. +func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error { + bound := p.bound(e.nrange) + if b&1 == 0 { + e.nrange = bound + p.inc() + } else { + e.low += uint64(bound) + e.nrange -= bound + p.dec() + } + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// Close writes a complete copy of the low value. +func (e *rangeEncoder) Close() error { + for i := 0; i < 5; i++ { + if err := e.shiftLow(); err != nil { + return err + } + } + return nil +} + +// shiftLow shifts the low value for 8 bit. The shifted byte is written into +// the byte writer. The cache value is used to handle overflows. +func (e *rangeEncoder) shiftLow() error { + if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 { + tmp := e.cache + for { + err := e.writeByte(tmp + byte(e.low>>32)) + if err != nil { + return err + } + tmp = 0xff + e.cacheLen-- + if e.cacheLen <= 0 { + if e.cacheLen < 0 { + panic("negative cacheLen") + } + break + } + } + e.cache = byte(uint32(e.low) >> 24) + } + e.cacheLen++ + e.low = uint64(uint32(e.low) << 8) + return nil +} + +// rangeDecoder decodes single bits of the range encoding stream. +type rangeDecoder struct { + br io.ByteReader + nrange uint32 + code uint32 +} + +// init initializes the range decoder, by reading from the byte reader. +func (d *rangeDecoder) init() error { + d.nrange = 0xffffffff + d.code = 0 + + b, err := d.br.ReadByte() + if err != nil { + return err + } + if b != 0 { + return errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return err + } + } + + if d.code >= d.nrange { + return errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return nil +} + +// newRangeDecoder initializes a range decoder. It reads five bytes from the +// reader and therefore may return an error. +func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) { + d = &rangeDecoder{br: br, nrange: 0xffffffff} + + b, err := d.br.ReadByte() + if err != nil { + return nil, err + } + if b != 0 { + return nil, errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return nil, err + } + } + + if d.code >= d.nrange { + return nil, errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return d, nil +} + +// possiblyAtEnd checks whether the decoder may be at the end of the stream. +func (d *rangeDecoder) possiblyAtEnd() bool { + return d.code == 0 +} + +// DirectDecodeBit decodes a bit with probability 1/2. The return value b will +// contain the bit at the least-significant position. All other bits will be +// zero. +func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) { + d.nrange >>= 1 + d.code -= d.nrange + t := 0 - (d.code >> 31) + d.code += d.nrange & t + b = (t + 1) & 1 + + // d.code will stay less then d.nrange + + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// decodeBit decodes a single bit. The bit will be returned at the +// least-significant position. All other bits will be zero. The probability +// value will be updated. +func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) { + bound := p.bound(d.nrange) + if d.code < bound { + d.nrange = bound + p.inc() + b = 0 + } else { + d.code -= bound + d.nrange -= bound + p.dec() + b = 1 + } + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// updateCode reads a new byte into the code. +func (d *rangeDecoder) updateCode() error { + b, err := d.br.ReadByte() + if err != nil { + return err + } + d.code = (d.code << 8) | uint32(b) + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go new file mode 100644 index 00000000..2ef3dcaa --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go @@ -0,0 +1,100 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lzma supports the decoding and encoding of LZMA streams. +// Reader and Writer support the classic LZMA format. Reader2 and +// Writer2 support the decoding and encoding of LZMA2 streams. +// +// The package is written completely in Go and doesn't rely on any external +// library. +package lzma + +import ( + "errors" + "io" +) + +// ReaderConfig stores the parameters for the reader of the classic LZMA +// format. +type ReaderConfig struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero values will +// be replaced by default values. +func (c *ReaderConfig) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader provides a reader for LZMA files or streams. +type Reader struct { + lzma io.Reader + h header + d *decoder +} + +// NewReader creates a new reader for an LZMA stream using the classic +// format. NewReader reads and checks the header of the LZMA stream. +func NewReader(lzma io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(lzma) +} + +// NewReader creates a new reader for an LZMA stream in the classic +// format. The function reads and verifies the the header of the LZMA +// stream. +func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(lzma, data); err != nil { + if err == io.EOF { + return nil, errors.New("lzma: unexpected EOF") + } + return nil, err + } + r = &Reader{lzma: lzma} + if err = r.h.unmarshalBinary(data); err != nil { + return nil, err + } + if r.h.dictCap < MinDictCap { + return nil, errors.New("lzma: dictionary capacity too small") + } + dictCap := r.h.dictCap + if c.DictCap > dictCap { + dictCap = c.DictCap + } + + state := newState(r.h.properties) + dict, err := newDecoderDict(dictCap) + if err != nil { + return nil, err + } + r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size) + if err != nil { + return nil, err + } + return r, nil +} + +// EOSMarker indicates that an EOS marker has been encountered. +func (r *Reader) EOSMarker() bool { + return r.d.eosMarker +} + +// Read returns uncompressed data. +func (r *Reader) Read(p []byte) (n int, err error) { + return r.d.Read(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go new file mode 100644 index 00000000..a55cfaa4 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go @@ -0,0 +1,232 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" + + "github.com/ulikunitz/xz/internal/xlog" +) + +// Reader2Config stores the parameters for the LZMA2 reader. +// format. +type Reader2Config struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *Reader2Config) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero configuration values +// will be replaced by default values. +func (c *Reader2Config) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader2 supports the reading of LZMA2 chunk sequences. Note that the +// first chunk should have a dictionary reset and the first compressed +// chunk a properties reset. The chunk sequence may not be terminated by +// an end-of-stream chunk. +type Reader2 struct { + r io.Reader + err error + + dict *decoderDict + ur *uncompressedReader + decoder *decoder + chunkReader io.Reader + + cstate chunkState + ctype chunkType +} + +// NewReader2 creates a reader for an LZMA2 chunk sequence. +func NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + return Reader2Config{}.NewReader2(lzma2) +} + +// NewReader2 creates an LZMA2 reader using the given configuration. +func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader2{r: lzma2, cstate: start} + r.dict, err = newDecoderDict(c.DictCap) + if err != nil { + return nil, err + } + if err = r.startChunk(); err != nil { + r.err = err + } + return r, nil +} + +// uncompressed tests whether the chunk type specifies an uncompressed +// chunk. +func uncompressed(ctype chunkType) bool { + return ctype == cU || ctype == cUD +} + +// startChunk parses a new chunk. +func (r *Reader2) startChunk() error { + r.chunkReader = nil + header, err := readChunkHeader(r.r) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + xlog.Debugf("chunk header %v", header) + if err = r.cstate.next(header.ctype); err != nil { + return err + } + if r.cstate == stop { + return io.EOF + } + if header.ctype == cUD || header.ctype == cLRND { + r.dict.Reset() + } + size := int64(header.uncompressed) + 1 + if uncompressed(header.ctype) { + if r.ur != nil { + r.ur.Reopen(r.r, size) + } else { + r.ur = newUncompressedReader(r.r, r.dict, size) + } + r.chunkReader = r.ur + return nil + } + br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1)) + if r.decoder == nil { + state := newState(header.props) + r.decoder, err = newDecoder(br, state, r.dict, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil + } + switch header.ctype { + case cLR: + r.decoder.State.Reset() + case cLRN, cLRND: + r.decoder.State = newState(header.props) + } + err = r.decoder.Reopen(br, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil +} + +// Read reads data from the LZMA2 chunk sequence. +func (r *Reader2) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + for n < len(p) { + var k int + k, err = r.chunkReader.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + err = r.startChunk() + if err == nil { + continue + } + } + r.err = err + return n, err + } + if k == 0 { + r.err = errors.New("lzma: Reader2 doesn't get data") + return n, r.err + } + } + return n, nil +} + +// EOS returns whether the LZMA2 stream has been terminated by an +// end-of-stream chunk. +func (r *Reader2) EOS() bool { + return r.cstate == stop +} + +// uncompressedReader is used to read uncompressed chunks. +type uncompressedReader struct { + lr io.LimitedReader + Dict *decoderDict + eof bool + err error +} + +// newUncompressedReader initializes a new uncompressedReader. +func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader { + ur := &uncompressedReader{ + lr: io.LimitedReader{R: r, N: size}, + Dict: dict, + } + return ur +} + +// Reopen reinitializes an uncompressed reader. +func (ur *uncompressedReader) Reopen(r io.Reader, size int64) { + ur.err = nil + ur.eof = false + ur.lr = io.LimitedReader{R: r, N: size} +} + +// fill reads uncompressed data into the dictionary. +func (ur *uncompressedReader) fill() error { + if !ur.eof { + n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available())) + if err != io.EOF { + return err + } + ur.eof = true + if n > 0 { + return nil + } + } + if ur.lr.N != 0 { + return io.ErrUnexpectedEOF + } + return io.EOF +} + +// Read reads uncompressed data from the limited reader. +func (ur *uncompressedReader) Read(p []byte) (n int, err error) { + if ur.err != nil { + return 0, ur.err + } + for { + var k int + k, err = ur.Dict.Read(p[n:]) + n += k + if n >= len(p) { + return n, nil + } + if err != nil { + break + } + err = ur.fill() + if err != nil { + break + } + } + ur.err = err + return n, err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go new file mode 100644 index 00000000..50235105 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/state.go @@ -0,0 +1,151 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// states defines the overall state count +const states = 12 + +// State maintains the full state of the operation encoding or decoding +// process. +type state struct { + rep [4]uint32 + isMatch [states << maxPosBits]prob + isRepG0Long [states << maxPosBits]prob + isRep [states]prob + isRepG0 [states]prob + isRepG1 [states]prob + isRepG2 [states]prob + litCodec literalCodec + lenCodec lengthCodec + repLenCodec lengthCodec + distCodec distCodec + state uint32 + posBitMask uint32 + Properties Properties +} + +// initProbSlice initializes a slice of probabilities. +func initProbSlice(p []prob) { + for i := range p { + p[i] = probInit + } +} + +// Reset sets all state information to the original values. +func (s *state) Reset() { + p := s.Properties + *s = state{ + Properties: p, + // dict: s.dict, + posBitMask: (uint32(1) << uint(p.PB)) - 1, + } + initProbSlice(s.isMatch[:]) + initProbSlice(s.isRep[:]) + initProbSlice(s.isRepG0[:]) + initProbSlice(s.isRepG1[:]) + initProbSlice(s.isRepG2[:]) + initProbSlice(s.isRepG0Long[:]) + s.litCodec.init(p.LC, p.LP) + s.lenCodec.init() + s.repLenCodec.init() + s.distCodec.init() +} + +// initState initializes the state. +func initState(s *state, p Properties) { + *s = state{Properties: p} + s.Reset() +} + +// newState creates a new state from the give Properties. +func newState(p Properties) *state { + s := &state{Properties: p} + s.Reset() + return s +} + +// deepcopy initializes s as a deep copy of the source. +func (s *state) deepcopy(src *state) { + if s == src { + return + } + s.rep = src.rep + s.isMatch = src.isMatch + s.isRepG0Long = src.isRepG0Long + s.isRep = src.isRep + s.isRepG0 = src.isRepG0 + s.isRepG1 = src.isRepG1 + s.isRepG2 = src.isRepG2 + s.litCodec.deepcopy(&src.litCodec) + s.lenCodec.deepcopy(&src.lenCodec) + s.repLenCodec.deepcopy(&src.repLenCodec) + s.distCodec.deepcopy(&src.distCodec) + s.state = src.state + s.posBitMask = src.posBitMask + s.Properties = src.Properties +} + +// cloneState creates a new clone of the give state. +func cloneState(src *state) *state { + s := new(state) + s.deepcopy(src) + return s +} + +// updateStateLiteral updates the state for a literal. +func (s *state) updateStateLiteral() { + switch { + case s.state < 4: + s.state = 0 + return + case s.state < 10: + s.state -= 3 + return + } + s.state -= 6 +} + +// updateStateMatch updates the state for a match. +func (s *state) updateStateMatch() { + if s.state < 7 { + s.state = 7 + } else { + s.state = 10 + } +} + +// updateStateRep updates the state for a repetition. +func (s *state) updateStateRep() { + if s.state < 7 { + s.state = 8 + } else { + s.state = 11 + } +} + +// updateStateShortRep updates the state for a short repetition. +func (s *state) updateStateShortRep() { + if s.state < 7 { + s.state = 9 + } else { + s.state = 11 + } +} + +// states computes the states of the operation codec. +func (s *state) states(dictHead int64) (state1, state2, posState uint32) { + state1 = s.state + posState = uint32(dictHead) & s.posBitMask + state2 = (s.state << maxPosBits) | posState + return +} + +// litState computes the literal state. +func (s *state) litState(prev byte, dictHead int64) uint32 { + lp, lc := uint(s.Properties.LP), uint(s.Properties.LC) + litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) | + (uint32(prev) >> (8 - lc)) + return litState +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go new file mode 100644 index 00000000..504b3d78 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go @@ -0,0 +1,133 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// treeCodec encodes or decodes values with a fixed bit size. It is using a +// tree of probability value. The root of the tree is the most-significant bit. +type treeCodec struct { + probTree +} + +// makeTreeCodec makes a tree codec. The bits value must be inside the range +// [1,32]. +func makeTreeCodec(bits int) treeCodec { + return treeCodec{makeProbTree(bits)} +} + +// deepcopy initializes tc as a deep copy of the source. +func (tc *treeCodec) deepcopy(src *treeCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// Encode uses the range encoder to encode a fixed-bit-size value. +func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) { + m := uint32(1) + for i := int(tc.bits) - 1; i >= 0; i-- { + b := (v >> uint(i)) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may +// be caused by the range decoder. +func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := 0; j < int(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + } + return m - (1 << uint(tc.bits)), nil +} + +// treeReverseCodec is another tree codec, where the least-significant bit is +// the start of the probability tree. +type treeReverseCodec struct { + probTree +} + +// deepcopy initializes the treeReverseCodec as a deep copy of the +// source. +func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must +// be in the range [1,32]. +func makeTreeReverseCodec(bits int) treeReverseCodec { + return treeReverseCodec{makeProbTree(bits)} +} + +// Encode uses range encoder to encode a fixed-bit-size value. The range +// encoder may cause errors. +func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) { + m := uint32(1) + for i := uint(0); i < uint(tc.bits); i++ { + b := (v >> i) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors +// returned by the range decoder will be returned. +func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := uint(0); j < uint(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + v |= b << j + } + return v, nil +} + +// probTree stores enough probability values to be used by the treeEncode and +// treeDecode methods of the range coder types. +type probTree struct { + probs []prob + bits byte +} + +// deepcopy initializes the probTree value as a deep copy of the source. +func (t *probTree) deepcopy(src *probTree) { + if t == src { + return + } + t.probs = make([]prob, len(src.probs)) + copy(t.probs, src.probs) + t.bits = src.bits +} + +// makeProbTree initializes a probTree structure. +func makeProbTree(bits int) probTree { + if !(1 <= bits && bits <= 32) { + panic("bits outside of range [1,32]") + } + t := probTree{ + bits: byte(bits), + probs: make([]prob, 1< 0 { + c.SizeInHeader = true + } + if !c.SizeInHeader { + c.EOSMarker = true + } +} + +// Verify checks WriterConfig for errors. Verify will replace zero +// values with default values. +func (c *WriterConfig) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.SizeInHeader { + if c.Size < 0 { + return errors.New("lzma: negative size not supported") + } + } else if !c.EOSMarker { + return errors.New("lzma: EOS marker is required") + } + if err = c.Matcher.verify(); err != nil { + return err + } + + return nil +} + +// header returns the header structure for this configuration. +func (c *WriterConfig) header() header { + h := header{ + properties: *c.Properties, + dictCap: c.DictCap, + size: -1, + } + if c.SizeInHeader { + h.size = c.Size + } + return h +} + +// Writer writes an LZMA stream in the classic format. +type Writer struct { + h header + bw io.ByteWriter + buf *bufio.Writer + e *encoder +} + +// NewWriter creates a new LZMA writer for the classic format. The +// method will write the header to the underlying stream. +func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{h: c.header()} + + var ok bool + w.bw, ok = lzma.(io.ByteWriter) + if !ok { + w.buf = bufio.NewWriter(lzma) + w.bw = w.buf + } + state := newState(w.h.properties) + m, err := c.Matcher.new(w.h.dictCap) + if err != nil { + return nil, err + } + dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m) + if err != nil { + return nil, err + } + var flags encoderFlags + if c.EOSMarker { + flags = eosMarker + } + if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil { + return nil, err + } + + if err = w.writeHeader(); err != nil { + return nil, err + } + return w, nil +} + +// NewWriter creates a new LZMA writer using the classic format. The +// function writes the header to the underlying stream. +func NewWriter(lzma io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(lzma) +} + +// writeHeader writes the LZMA header into the stream. +func (w *Writer) writeHeader() error { + data, err := w.h.marshalBinary() + if err != nil { + return err + } + _, err = w.bw.(io.Writer).Write(data) + return err +} + +// Write puts data into the Writer. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.h.size >= 0 { + m := w.h.size + m -= w.e.Compressed() + int64(w.e.dict.Buffered()) + if m < 0 { + m = 0 + } + if m < int64(len(p)) { + p = p[:m] + err = ErrNoSpace + } + } + var werr error + if n, werr = w.e.Write(p); werr != nil { + err = werr + } + return n, err +} + +// Close closes the writer stream. It ensures that all data from the +// buffer will be compressed and the LZMA stream will be finished. +func (w *Writer) Close() error { + if w.h.size >= 0 { + n := w.e.Compressed() + int64(w.e.dict.Buffered()) + if n != w.h.size { + return errSize + } + } + err := w.e.Close() + if w.buf != nil { + ferr := w.buf.Flush() + if err == nil { + err = ferr + } + } + return err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/vendor/github.com/ulikunitz/xz/lzma/writer2.go new file mode 100644 index 00000000..7c1afe15 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/writer2.go @@ -0,0 +1,305 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bytes" + "errors" + "io" +) + +// Writer2Config is used to create a Writer2 using parameters. +type Writer2Config struct { + // The properties for the encoding. If the it is nil the value + // {LC: 3, LP: 0, PB: 2} will be chosen. + Properties *Properties + // The capacity of the dictionary. If DictCap is zero, the value + // 8 MiB will be chosen. + DictCap int + // Size of the lookahead buffer; value 0 indicates default size + // 4096 + BufSize int + // Match algorithm + Matcher MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *Writer2Config) fill() { + if c.Properties == nil { + c.Properties = &Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } +} + +// Verify checks the Writer2Config for correctness. Zero values will be +// replaced by default values. +func (c *Writer2Config) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.Properties.LC+c.Properties.LP > 4 { + return errors.New("lzma: sum of lc and lp exceeds 4") + } + if err = c.Matcher.verify(); err != nil { + return err + } + return nil +} + +// Writer2 supports the creation of an LZMA2 stream. But note that +// written data is buffered, so call Flush or Close to write data to the +// underlying writer. The Close method writes the end-of-stream marker +// to the stream. So you may be able to concatenate the output of two +// writers as long the output of the first writer has only been flushed +// but not closed. +// +// Any change to the fields Properties, DictCap must be done before the +// first call to Write, Flush or Close. +type Writer2 struct { + w io.Writer + + start *state + encoder *encoder + + cstate chunkState + ctype chunkType + + buf bytes.Buffer + lbw LimitedByteWriter +} + +// NewWriter2 creates an LZMA2 chunk sequence writer with the default +// parameters and options. +func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + return Writer2Config{}.NewWriter2(lzma2) +} + +// NewWriter2 creates a new LZMA2 writer using the given configuration. +func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer2{ + w: lzma2, + start: newState(*c.Properties), + cstate: start, + ctype: start.defaultChunkType(), + } + w.buf.Grow(maxCompressed) + w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed} + m, err := c.Matcher.new(c.DictCap) + if err != nil { + return nil, err + } + d, err := newEncoderDict(c.DictCap, c.BufSize, m) + if err != nil { + return nil, err + } + w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0) + if err != nil { + return nil, err + } + return w, nil +} + +// written returns the number of bytes written to the current chunk +func (w *Writer2) written() int { + if w.encoder == nil { + return 0 + } + return int(w.encoder.Compressed()) + w.encoder.dict.Buffered() +} + +// errClosed indicates that the writer is closed. +var errClosed = errors.New("lzma: writer closed") + +// Writes data to LZMA2 stream. Note that written data will be buffered. +// Use Flush or Close to ensure that data is written to the underlying +// writer. +func (w *Writer2) Write(p []byte) (n int, err error) { + if w.cstate == stop { + return 0, errClosed + } + for n < len(p) { + m := maxUncompressed - w.written() + if m <= 0 { + panic("lzma: maxUncompressed reached") + } + var q []byte + if n+m < len(p) { + q = p[n : n+m] + } else { + q = p[n:] + } + k, err := w.encoder.Write(q) + n += k + if err != nil && err != ErrLimit { + return n, err + } + if err == ErrLimit || k == m { + if err = w.flushChunk(); err != nil { + return n, err + } + } + } + return n, nil +} + +// writeUncompressedChunk writes an uncompressed chunk to the LZMA2 +// stream. +func (w *Writer2) writeUncompressedChunk() error { + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("lzma: can't write empty uncompressed chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + switch w.ctype { + case cLRND: + w.ctype = cUD + default: + w.ctype = cU + } + w.encoder.state = w.start + + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = w.encoder.dict.CopyN(w.w, int(u)) + return err +} + +// writeCompressedChunk writes a compressed chunk to the underlying +// writer. +func (w *Writer2) writeCompressedChunk() error { + if w.ctype == cU || w.ctype == cUD { + panic("chunk type uncompressed") + } + + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("writeCompressedChunk: empty chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + c := w.buf.Len() + if c <= 0 { + panic("no compressed data") + } + if c > maxCompressed { + panic("overrun of compressed data limit") + } + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + compressed: uint16(c - 1), + props: w.encoder.state.Properties, + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = io.Copy(w.w, &w.buf) + return err +} + +// writes a single chunk to the underlying writer. +func (w *Writer2) writeChunk() error { + u := int(uncompressedHeaderLen + w.encoder.Compressed()) + c := headerLen(w.ctype) + w.buf.Len() + if u < c { + return w.writeUncompressedChunk() + } + return w.writeCompressedChunk() +} + +// flushChunk terminates the current chunk. The encoder will be reset +// to support the next chunk. +func (w *Writer2) flushChunk() error { + if w.written() == 0 { + return nil + } + var err error + if err = w.encoder.Close(); err != nil { + return err + } + if err = w.writeChunk(); err != nil { + return err + } + w.buf.Reset() + w.lbw.N = maxCompressed + if err = w.encoder.Reopen(&w.lbw); err != nil { + return err + } + if err = w.cstate.next(w.ctype); err != nil { + return err + } + w.ctype = w.cstate.defaultChunkType() + w.start = cloneState(w.encoder.state) + return nil +} + +// Flush writes all buffered data out to the underlying stream. This +// could result in multiple chunks to be created. +func (w *Writer2) Flush() error { + if w.cstate == stop { + return errClosed + } + for w.written() > 0 { + if err := w.flushChunk(); err != nil { + return err + } + } + return nil +} + +// Close terminates the LZMA2 stream with an EOS chunk. +func (w *Writer2) Close() error { + if w.cstate == stop { + return errClosed + } + if err := w.Flush(); err != nil { + return nil + } + // write zero byte EOS chunk + _, err := w.w.Write([]byte{0}) + if err != nil { + return err + } + w.cstate = stop + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzmafilter.go b/vendor/github.com/ulikunitz/xz/lzmafilter.go new file mode 100644 index 00000000..69cf5f7c --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzmafilter.go @@ -0,0 +1,117 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "fmt" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// LZMA filter constants. +const ( + lzmaFilterID = 0x21 + lzmaFilterLen = 3 +) + +// lzmaFilter declares the LZMA2 filter information stored in an xz +// block header. +type lzmaFilter struct { + dictCap int64 +} + +// String returns a representation of the LZMA filter. +func (f lzmaFilter) String() string { + return fmt.Sprintf("LZMA dict cap %#x", f.dictCap) +} + +// id returns the ID for the LZMA2 filter. +func (f lzmaFilter) id() uint64 { return lzmaFilterID } + +// MarshalBinary converts the lzmaFilter in its encoded representation. +func (f lzmaFilter) MarshalBinary() (data []byte, err error) { + c := lzma.EncodeDictCap(f.dictCap) + return []byte{lzmaFilterID, 1, c}, nil +} + +// UnmarshalBinary unmarshals the given data representation of the LZMA2 +// filter. +func (f *lzmaFilter) UnmarshalBinary(data []byte) error { + if len(data) != lzmaFilterLen { + return errors.New("xz: data for LZMA2 filter has wrong length") + } + if data[0] != lzmaFilterID { + return errors.New("xz: wrong LZMA2 filter id") + } + if data[1] != 1 { + return errors.New("xz: wrong LZMA2 filter size") + } + dc, err := lzma.DecodeDictCap(data[2]) + if err != nil { + return errors.New("xz: wrong LZMA2 dictionary size property") + } + + f.dictCap = dc + return nil +} + +// reader creates a new reader for the LZMA2 filter. +func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader, + err error) { + + config := new(lzma.Reader2Config) + if c != nil { + config.DictCap = c.DictCap + } + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fr, err = config.NewReader2(r) + if err != nil { + return nil, err + } + return fr, nil +} + +// writeCloser creates a io.WriteCloser for the LZMA2 filter. +func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig, +) (fw io.WriteCloser, err error) { + config := new(lzma.Writer2Config) + if c != nil { + *config = lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + } + + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fw, err = config.NewWriter2(w) + if err != nil { + return nil, err + } + return fw, nil +} + +// last returns true, because an LZMA2 filter must be the last filter in +// the filter list. +func (f lzmaFilter) last() bool { return true } diff --git a/vendor/github.com/ulikunitz/xz/make-docs b/vendor/github.com/ulikunitz/xz/make-docs new file mode 100755 index 00000000..a8c612ce --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/make-docs @@ -0,0 +1,5 @@ +#!/bin/sh + +set -x +pandoc -t html5 -f markdown -s --css=doc/md.css -o README.html README.md +pandoc -t html5 -f markdown -s --css=doc/md.css -o TODO.html TODO.md diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go new file mode 100644 index 00000000..0634c6bc --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/reader.go @@ -0,0 +1,373 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xz supports the compression and decompression of xz files. It +// supports version 1.0.4 of the specification without the non-LZMA2 +// filters. See http://tukaani.org/xz/xz-file-format-1.0.4.txt +package xz + +import ( + "bytes" + "errors" + "fmt" + "hash" + "io" + + "github.com/ulikunitz/xz/internal/xlog" + "github.com/ulikunitz/xz/lzma" +) + +// ReaderConfig defines the parameters for the xz reader. The +// SingleStream parameter requests the reader to assume that the +// underlying stream contains only a single stream. +type ReaderConfig struct { + DictCap int + SingleStream bool +} + +// fill replaces all zero values with their default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader parameters for Validity. Zero values will be +// replaced by default values. +func (c *ReaderConfig) Verify() error { + if c == nil { + return errors.New("xz: reader parameters are nil") + } + lc := lzma.Reader2Config{DictCap: c.DictCap} + if err := lc.Verify(); err != nil { + return err + } + return nil +} + +// Reader supports the reading of one or multiple xz streams. +type Reader struct { + ReaderConfig + + xz io.Reader + sr *streamReader +} + +// streamReader decodes a single xz stream +type streamReader struct { + ReaderConfig + + xz io.Reader + br *blockReader + newHash func() hash.Hash + h header + index []record +} + +// NewReader creates a new xz reader using the default parameters. +// The function reads and checks the header of the first XZ stream. The +// reader will process multiple streams including padding. +func NewReader(xz io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(xz) +} + +// NewReader creates an xz stream reader. The created reader will be +// able to process multiple streams and padding unless a SingleStream +// has been set in the reader configuration c. +func (c ReaderConfig) NewReader(xz io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader{ + ReaderConfig: c, + xz: xz, + } + if r.sr, err = c.newStreamReader(xz); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + return r, nil +} + +var errUnexpectedData = errors.New("xz: unexpected data after stream") + +// Read reads uncompressed data from the stream. +func (r *Reader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.sr == nil { + if r.SingleStream { + data := make([]byte, 1) + _, err = io.ReadFull(r.xz, data) + if err != io.EOF { + return n, errUnexpectedData + } + return n, io.EOF + } + for { + r.sr, err = r.ReaderConfig.newStreamReader(r.xz) + if err != errPadding { + break + } + } + if err != nil { + return n, err + } + } + k, err := r.sr.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.sr = nil + continue + } + return n, err + } + } + return n, nil +} + +var errPadding = errors.New("xz: padding (4 zero bytes) encountered") + +// newStreamReader creates a new xz stream reader using the given configuration +// parameters. NewReader reads and checks the header of the xz stream. +func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(xz, data[:4]); err != nil { + return nil, err + } + if bytes.Equal(data[:4], []byte{0, 0, 0, 0}) { + return nil, errPadding + } + if _, err = io.ReadFull(xz, data[4:]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + r = &streamReader{ + ReaderConfig: c, + xz: xz, + index: make([]record, 0, 4), + } + if err = r.h.UnmarshalBinary(data); err != nil { + return nil, err + } + xlog.Debugf("xz header %s", r.h) + if r.newHash, err = newHashFunc(r.h.flags); err != nil { + return nil, err + } + return r, nil +} + +// errIndex indicates an error with the xz file index. +var errIndex = errors.New("xz: error in xz file index") + +// readTail reads the index body and the xz footer. +func (r *streamReader) readTail() error { + index, n, err := readIndexBody(r.xz) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + if len(index) != len(r.index) { + return fmt.Errorf("xz: index length is %d; want %d", + len(index), len(r.index)) + } + for i, rec := range r.index { + if rec != index[i] { + return fmt.Errorf("xz: record %d is %v; want %v", + i, rec, index[i]) + } + } + + p := make([]byte, footerLen) + if _, err = io.ReadFull(r.xz, p); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + var f footer + if err = f.UnmarshalBinary(p); err != nil { + return err + } + xlog.Debugf("xz footer %s", f) + if f.flags != r.h.flags { + return errors.New("xz: footer flags incorrect") + } + if f.indexSize != int64(n)+1 { + return errors.New("xz: index size in footer wrong") + } + return nil +} + +// Read reads actual data from the xz stream. +func (r *streamReader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.br == nil { + bh, hlen, err := readBlockHeader(r.xz) + if err != nil { + if err == errIndexIndicator { + if err = r.readTail(); err != nil { + return n, err + } + return n, io.EOF + } + return n, err + } + xlog.Debugf("block %v", *bh) + r.br, err = r.ReaderConfig.newBlockReader(r.xz, bh, + hlen, r.newHash()) + if err != nil { + return n, err + } + } + k, err := r.br.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.index = append(r.index, r.br.record()) + r.br = nil + } else { + return n, err + } + } + } + return n, nil +} + +// countingReader is a reader that counts the bytes read. +type countingReader struct { + r io.Reader + n int64 +} + +// Read reads data from the wrapped reader and adds it to the n field. +func (lr *countingReader) Read(p []byte) (n int, err error) { + n, err = lr.r.Read(p) + lr.n += int64(n) + return n, err +} + +// blockReader supports the reading of a block. +type blockReader struct { + lxz countingReader + header *blockHeader + headerLen int + n int64 + hash hash.Hash + r io.Reader + err error +} + +// newBlockReader creates a new block reader. +func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader, + hlen int, hash hash.Hash) (br *blockReader, err error) { + + br = &blockReader{ + lxz: countingReader{r: xz}, + header: h, + headerLen: hlen, + hash: hash, + } + + fr, err := c.newFilterReader(&br.lxz, h.filters) + if err != nil { + return nil, err + } + br.r = io.TeeReader(fr, br.hash) + + return br, nil +} + +// uncompressedSize returns the uncompressed size of the block. +func (br *blockReader) uncompressedSize() int64 { + return br.n +} + +// compressedSize returns the compressed size of the block. +func (br *blockReader) compressedSize() int64 { + return br.lxz.n +} + +// unpaddedSize computes the unpadded size for the block. +func (br *blockReader) unpaddedSize() int64 { + n := int64(br.headerLen) + n += br.compressedSize() + n += int64(br.hash.Size()) + return n +} + +// record returns the index record for the current block. +func (br *blockReader) record() record { + return record{br.unpaddedSize(), br.uncompressedSize()} +} + +// errBlockSize indicates that the size of the block in the block header +// is wrong. +var errBlockSize = errors.New("xz: wrong uncompressed size for block") + +// Read reads data from the block. +func (br *blockReader) Read(p []byte) (n int, err error) { + n, err = br.r.Read(p) + br.n += int64(n) + + u := br.header.uncompressedSize + if u >= 0 && br.uncompressedSize() > u { + return n, errors.New("xz: wrong uncompressed size for block") + } + c := br.header.compressedSize + if c >= 0 && br.compressedSize() > c { + return n, errors.New("xz: wrong compressed size for block") + } + if err != io.EOF { + return n, err + } + if br.uncompressedSize() < u || br.compressedSize() < c { + return n, io.ErrUnexpectedEOF + } + + s := br.hash.Size() + k := padLen(br.lxz.n) + q := make([]byte, k+s, k+2*s) + if _, err = io.ReadFull(br.lxz.r, q); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err + } + if !allZeros(q[:k]) { + return n, errors.New("xz: non-zero block padding") + } + checkSum := q[k:] + computedSum := br.hash.Sum(checkSum[s:]) + if !bytes.Equal(checkSum, computedSum) { + return n, errors.New("xz: checksum error for block") + } + return n, io.EOF +} + +func (c *ReaderConfig) newFilterReader(r io.Reader, f []filter) (fr io.Reader, + err error) { + + if err = verifyFilters(f); err != nil { + return nil, err + } + + fr = r + for i := len(f) - 1; i >= 0; i-- { + fr, err = f[i].reader(fr, c) + if err != nil { + return nil, err + } + } + return fr, nil +} diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go new file mode 100644 index 00000000..c126f709 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/writer.go @@ -0,0 +1,386 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "hash" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// WriterConfig describe the parameters for an xz writer. +type WriterConfig struct { + Properties *lzma.Properties + DictCap int + BufSize int + BlockSize int64 + // checksum method: CRC32, CRC64 or SHA256 + CheckSum byte + // match algorithm + Matcher lzma.MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *WriterConfig) fill() { + if c.Properties == nil { + c.Properties = &lzma.Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } + if c.BlockSize == 0 { + c.BlockSize = maxInt64 + } + if c.CheckSum == 0 { + c.CheckSum = CRC64 + } +} + +// Verify checks the configuration for errors. Zero values will be +// replaced by default values. +func (c *WriterConfig) Verify() error { + if c == nil { + return errors.New("xz: writer configuration is nil") + } + c.fill() + lc := lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + if err := lc.Verify(); err != nil { + return err + } + if c.BlockSize <= 0 { + return errors.New("xz: block size out of range") + } + if err := verifyFlags(c.CheckSum); err != nil { + return err + } + return nil +} + +// filters creates the filter list for the given parameters. +func (c *WriterConfig) filters() []filter { + return []filter{&lzmaFilter{int64(c.DictCap)}} +} + +// maxInt64 defines the maximum 64-bit signed integer. +const maxInt64 = 1<<63 - 1 + +// verifyFilters checks the filter list for the length and the right +// sequence of filters. +func verifyFilters(f []filter) error { + if len(f) == 0 { + return errors.New("xz: no filters") + } + if len(f) > 4 { + return errors.New("xz: more than four filters") + } + for _, g := range f[:len(f)-1] { + if g.last() { + return errors.New("xz: last filter is not last") + } + } + if !f[len(f)-1].last() { + return errors.New("xz: wrong last filter") + } + return nil +} + +// newFilterWriteCloser converts a filter list into a WriteCloser that +// can be used by a blockWriter. +func (c *WriterConfig) newFilterWriteCloser(w io.Writer, f []filter) (fw io.WriteCloser, err error) { + if err = verifyFilters(f); err != nil { + return nil, err + } + fw = nopWriteCloser(w) + for i := len(f) - 1; i >= 0; i-- { + fw, err = f[i].writeCloser(fw, c) + if err != nil { + return nil, err + } + } + return fw, nil +} + +// nopWCloser implements a WriteCloser with a Close method not doing +// anything. +type nopWCloser struct { + io.Writer +} + +// Close returns nil and doesn't do anything else. +func (c nopWCloser) Close() error { + return nil +} + +// nopWriteCloser converts the Writer into a WriteCloser with a Close +// function that does nothing beside returning nil. +func nopWriteCloser(w io.Writer) io.WriteCloser { + return nopWCloser{w} +} + +// Writer compresses data written to it. It is an io.WriteCloser. +type Writer struct { + WriterConfig + + xz io.Writer + bw *blockWriter + newHash func() hash.Hash + h header + index []record + closed bool +} + +// newBlockWriter creates a new block writer writes the header out. +func (w *Writer) newBlockWriter() error { + var err error + w.bw, err = w.WriterConfig.newBlockWriter(w.xz, w.newHash()) + if err != nil { + return err + } + if err = w.bw.writeHeader(w.xz); err != nil { + return err + } + return nil +} + +// closeBlockWriter closes a block writer and records the sizes in the +// index. +func (w *Writer) closeBlockWriter() error { + var err error + if err = w.bw.Close(); err != nil { + return err + } + w.index = append(w.index, w.bw.record()) + return nil +} + +// NewWriter creates a new xz writer using default parameters. +func NewWriter(xz io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(xz) +} + +// NewWriter creates a new Writer using the given configuration parameters. +func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{ + WriterConfig: c, + xz: xz, + h: header{c.CheckSum}, + index: make([]record, 0, 4), + } + if w.newHash, err = newHashFunc(c.CheckSum); err != nil { + return nil, err + } + data, err := w.h.MarshalBinary() + if _, err = xz.Write(data); err != nil { + return nil, err + } + if err = w.newBlockWriter(); err != nil { + return nil, err + } + return w, nil + +} + +// Write compresses the uncompressed data provided. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.closed { + return 0, errClosed + } + for { + k, err := w.bw.Write(p[n:]) + n += k + if err != errNoSpace { + return n, err + } + if err = w.closeBlockWriter(); err != nil { + return n, err + } + if err = w.newBlockWriter(); err != nil { + return n, err + } + } +} + +// Close closes the writer and adds the footer to the Writer. Close +// doesn't close the underlying writer. +func (w *Writer) Close() error { + if w.closed { + return errClosed + } + w.closed = true + var err error + if err = w.closeBlockWriter(); err != nil { + return err + } + + f := footer{flags: w.h.flags} + if f.indexSize, err = writeIndex(w.xz, w.index); err != nil { + return err + } + data, err := f.MarshalBinary() + if err != nil { + return err + } + if _, err = w.xz.Write(data); err != nil { + return err + } + return nil +} + +// countingWriter is a writer that counts all data written to it. +type countingWriter struct { + w io.Writer + n int64 +} + +// Write writes data to the countingWriter. +func (cw *countingWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + if err == nil && cw.n < 0 { + return n, errors.New("xz: counter overflow") + } + return +} + +// blockWriter is writes a single block. +type blockWriter struct { + cxz countingWriter + // mw combines io.WriteCloser w and the hash. + mw io.Writer + w io.WriteCloser + n int64 + blockSize int64 + closed bool + headerLen int + + filters []filter + hash hash.Hash +} + +// newBlockWriter creates a new block writer. +func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWriter, err error) { + bw = &blockWriter{ + cxz: countingWriter{w: xz}, + blockSize: c.BlockSize, + filters: c.filters(), + hash: hash, + } + bw.w, err = c.newFilterWriteCloser(&bw.cxz, bw.filters) + if err != nil { + return nil, err + } + bw.mw = io.MultiWriter(bw.w, bw.hash) + return bw, nil +} + +// writeHeader writes the header. If the function is called after Close +// the commpressedSize and uncompressedSize fields will be filled. +func (bw *blockWriter) writeHeader(w io.Writer) error { + h := blockHeader{ + compressedSize: -1, + uncompressedSize: -1, + filters: bw.filters, + } + if bw.closed { + h.compressedSize = bw.compressedSize() + h.uncompressedSize = bw.uncompressedSize() + } + data, err := h.MarshalBinary() + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + bw.headerLen = len(data) + return nil +} + +// compressed size returns the amount of data written to the underlying +// stream. +func (bw *blockWriter) compressedSize() int64 { + return bw.cxz.n +} + +// uncompressedSize returns the number of data written to the +// blockWriter +func (bw *blockWriter) uncompressedSize() int64 { + return bw.n +} + +// unpaddedSize returns the sum of the header length, the uncompressed +// size of the block and the hash size. +func (bw *blockWriter) unpaddedSize() int64 { + if bw.headerLen <= 0 { + panic("xz: block header not written") + } + n := int64(bw.headerLen) + n += bw.compressedSize() + n += int64(bw.hash.Size()) + return n +} + +// record returns the record for the current stream. Call Close before +// calling this method. +func (bw *blockWriter) record() record { + return record{bw.unpaddedSize(), bw.uncompressedSize()} +} + +var errClosed = errors.New("xz: writer already closed") + +var errNoSpace = errors.New("xz: no space") + +// Write writes uncompressed data to the block writer. +func (bw *blockWriter) Write(p []byte) (n int, err error) { + if bw.closed { + return 0, errClosed + } + + t := bw.blockSize - bw.n + if int64(len(p)) > t { + err = errNoSpace + p = p[:t] + } + + var werr error + n, werr = bw.mw.Write(p) + bw.n += int64(n) + if werr != nil { + return n, werr + } + return n, err +} + +// Close closes the writer. +func (bw *blockWriter) Close() error { + if bw.closed { + return errClosed + } + bw.closed = true + if err := bw.w.Close(); err != nil { + return err + } + s := bw.hash.Size() + k := padLen(bw.cxz.n) + p := make([]byte, k+s) + bw.hash.Sum(p[k:k]) + if _, err := bw.cxz.w.Write(p); err != nil { + return err + } + return nil +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 7ad2a316..3d817971 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -38,12 +38,54 @@ "revision": "2ee87856327ba09384cabd113bc6b5d174e9ec0f", "revisionTime": "2017-07-27T06:48:18Z" }, + { + "checksumSHA1": "92dnVWesQCC1xueK1Du/6c+yLOk=", + "path": "github.com/dsnet/compress", + "revision": "cc9eb1d7ad760af14e8f918698f745e80377af4f", + "revisionTime": "2017-12-08T18:51:09Z" + }, + { + "checksumSHA1": "Q8Y8aBNAuiO4/HVyj9PRyBz50YM=", + "path": "github.com/dsnet/compress/bzip2", + "revision": "cc9eb1d7ad760af14e8f918698f745e80377af4f", + "revisionTime": "2017-12-08T18:51:09Z" + }, + { + "checksumSHA1": "rUK6wJzSweagbKHcRUU1TWkQq/0=", + "path": "github.com/dsnet/compress/bzip2/internal/sais", + "revision": "cc9eb1d7ad760af14e8f918698f745e80377af4f", + "revisionTime": "2017-12-08T18:51:09Z" + }, + { + "checksumSHA1": "u6VJ7jTVulLgPZaXKWCIHc4hbQs=", + "path": "github.com/dsnet/compress/internal", + "revision": "cc9eb1d7ad760af14e8f918698f745e80377af4f", + "revisionTime": "2017-12-08T18:51:09Z" + }, + { + "checksumSHA1": "KDfyyvx86cyY/HUA2SSWRWjn7yI=", + "path": "github.com/dsnet/compress/internal/errors", + "revision": "cc9eb1d7ad760af14e8f918698f745e80377af4f", + "revisionTime": "2017-12-08T18:51:09Z" + }, + { + "checksumSHA1": "Txyi+DYhWRT65KnJokyQWB2xj3A=", + "path": "github.com/dsnet/compress/internal/prefix", + "revision": "cc9eb1d7ad760af14e8f918698f745e80377af4f", + "revisionTime": "2017-12-08T18:51:09Z" + }, { "checksumSHA1": "yqF125xVSkmfLpIVGrLlfE05IUk=", "path": "github.com/golang/protobuf/proto", "revision": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9", "revisionTime": "2017-10-21T04:39:52Z" }, + { + "checksumSHA1": "p/8vSviYF91gFflhrt5vkyksroo=", + "path": "github.com/golang/snappy", + "revision": "553a641470496b2327abcac10b36396bd98e45c9", + "revisionTime": "2017-02-15T23:32:05Z" + }, { "checksumSHA1": "3HVfwgLpCDH8JX211UWdrSi/GU4=", "path": "github.com/lib/pq", @@ -56,12 +98,90 @@ "revision": "b609790bd85edf8e9ab7e0f8912750a786177bcf", "revisionTime": "2017-10-22T19:20:43Z" }, + { + "checksumSHA1": "k3e1TD8wrhxfUUG3pQBb10ppNGA=", + "path": "github.com/magefile/mage", + "revision": "81dbe7074be509fcdc5e496481a8e01276332745", + "revisionTime": "2018-02-12T16:24:26Z" + }, + { + "checksumSHA1": "KODorM0Am1g55qObNz3jVOdRVFs=", + "path": "github.com/magefile/mage/build", + "revision": "81dbe7074be509fcdc5e496481a8e01276332745", + "revisionTime": "2018-02-12T16:24:26Z" + }, + { + "checksumSHA1": "jdM6DuMtXKrl42m0pM/1YOAPkxc=", + "path": "github.com/magefile/mage/mage", + "revision": "81dbe7074be509fcdc5e496481a8e01276332745", + "revisionTime": "2018-02-12T16:24:26Z" + }, + { + "checksumSHA1": "TkAemcxaY44gsEjO1BiBxwlEI4A=", + "path": "github.com/magefile/mage/mg", + "revision": "81dbe7074be509fcdc5e496481a8e01276332745", + "revisionTime": "2018-02-12T16:24:26Z" + }, + { + "checksumSHA1": "b1qY9BFtpJnIZEa8yvpJCRbOhRM=", + "path": "github.com/magefile/mage/parse", + "revision": "81dbe7074be509fcdc5e496481a8e01276332745", + "revisionTime": "2018-02-12T16:24:26Z" + }, + { + "checksumSHA1": "fEuDveZzYX6oqYOT9jqyZROun/Q=", + "path": "github.com/magefile/mage/parse/srcimporter", + "revision": "81dbe7074be509fcdc5e496481a8e01276332745", + "revisionTime": "2018-02-12T16:24:26Z" + }, + { + "checksumSHA1": "0/j3qlGc8fsWG42uIDZ5p8tVzPM=", + "path": "github.com/magefile/mage/sh", + "revision": "81dbe7074be509fcdc5e496481a8e01276332745", + "revisionTime": "2018-02-12T16:24:26Z" + }, + { + "checksumSHA1": "oAjx69UIs6F6hPh+2GQSBMaHAfc=", + "path": "github.com/magefile/mage/target", + "revision": "81dbe7074be509fcdc5e496481a8e01276332745", + "revisionTime": "2018-02-12T16:24:26Z" + }, + { + "checksumSHA1": "He+VtZO7BsPDCZhZtJ1IkNp629o=", + "path": "github.com/magefile/mage/types", + "revision": "81dbe7074be509fcdc5e496481a8e01276332745", + "revisionTime": "2018-02-12T16:24:26Z" + }, { "checksumSHA1": "bKMZjd2wPw13VwoE7mBeSv5djFA=", "path": "github.com/matttproud/golang_protobuf_extensions/pbutil", "revision": "c12348ce28de40eed0136aa2b644d0ee0650e56c", "revisionTime": "2016-04-24T11:30:07Z" }, + { + "checksumSHA1": "VqPwpjQKzPYZcTkqZOIk8b+gYqI=", + "path": "github.com/mholt/archiver", + "revision": "26cf5bb32d07aa4e8d0de15f56ce516f4641d7df", + "revisionTime": "2017-10-12T05:23:41Z" + }, + { + "checksumSHA1": "rz0k2HRJ9gx11wt/gqATiRd2qz8=", + "path": "github.com/nwaples/rardecode", + "revision": "e06696f847aeda6f39a8f0b7cdff193b7690aef6", + "revisionTime": "2017-03-13T01:07:58Z" + }, + { + "checksumSHA1": "xKzx54LbkghuMauevGWevn5ip3w=", + "path": "github.com/pierrec/lz4", + "revision": "ed8d4cc3b461464e69798080a0092bd028910298", + "revisionTime": "2018-01-13T15:17:03Z" + }, + { + "checksumSHA1": "zPWRjzsPeXCoqmidIcJtHbvrvRs=", + "path": "github.com/pierrec/xxHash/xxHash32", + "revision": "a0006b13c722f7f12368c00a3d3c2ae8a999a0c6", + "revisionTime": "2017-07-14T08:24:55Z" + }, { "checksumSHA1": "hu0MsbTdFzZxNRyAxe2HmTFFFak=", "path": "github.com/prometheus/client_golang/prometheus", @@ -122,6 +242,36 @@ "revision": "89742aefa4b206dcf400792f3bd35b542998eb3b", "revisionTime": "2017-08-22T13:27:46Z" }, + { + "checksumSHA1": "2CJmLcvYL6KW7gp2xaSdorR4i54=", + "path": "github.com/tmthrgd/go-bindata/restore", + "revision": "40f4993ede74f673cfe96bed75ef8513a389a00a", + "revisionTime": "2017-11-30T10:15:03Z" + }, + { + "checksumSHA1": "qgMa75aMGbkFY0jIqqqgVnCUoNA=", + "path": "github.com/ulikunitz/xz", + "revision": "0c6b41e72360850ca4f98dc341fd999726ea007f", + "revisionTime": "2017-06-05T21:53:11Z" + }, + { + "checksumSHA1": "vjnTkzNrMs5Xj6so/fq0mQ6dT1c=", + "path": "github.com/ulikunitz/xz/internal/hash", + "revision": "0c6b41e72360850ca4f98dc341fd999726ea007f", + "revisionTime": "2017-06-05T21:53:11Z" + }, + { + "checksumSHA1": "m0pm57ASBK/CTdmC0ppRHO17mBs=", + "path": "github.com/ulikunitz/xz/internal/xlog", + "revision": "0c6b41e72360850ca4f98dc341fd999726ea007f", + "revisionTime": "2017-06-05T21:53:11Z" + }, + { + "checksumSHA1": "2vZw6zc8xuNlyVz2QKvdlNSZQ1U=", + "path": "github.com/ulikunitz/xz/lzma", + "revision": "0c6b41e72360850ca4f98dc341fd999726ea007f", + "revisionTime": "2017-06-05T21:53:11Z" + }, { "checksumSHA1": "nqWNlnMmVpt628zzvyo6Yv2CX5Q=", "path": "golang.org/x/crypto/ssh/terminal",