aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFélix Sipma <felix+debian@gueux.org>2017-02-02 12:48:08 +0100
committerFélix Sipma <felix+debian@gueux.org>2017-02-02 12:48:08 +0100
commit7dfc2914070a6f6e31c872050351133b3f263113 (patch)
tree50ab01463a9ee00a5aa6c7d10b5b6b2ea262a6d7
parent2d97cde7a4b935ade9d8dbf05dff136fac357e3b (diff)
New upstream version 0.4.0upstream/0.4.0
-rw-r--r--CONTRIBUTING.md7
-rw-r--r--README.md16
-rw-r--r--VERSION2
-rw-r--r--build.go16
-rwxr-xr-xbuild_release_binaries.sh64
-rw-r--r--doc/Design.md13
-rw-r--r--doc/FAQ.md20
-rw-r--r--doc/Manual.md4
-rw-r--r--doc/REST_backend.md13
-rw-r--r--run_integration_tests.go2
-rw-r--r--src/cmds/restic/cmd_cat.go26
-rw-r--r--src/cmds/restic/cmd_forget.go6
-rw-r--r--src/cmds/restic/cmd_key.go6
-rw-r--r--src/cmds/restic/cmd_list.go15
-rw-r--r--src/cmds/restic/cmd_ls.go35
-rw-r--r--src/cmds/restic/cmd_prune.go27
-rw-r--r--src/cmds/restic/cmd_snapshots.go21
-rw-r--r--src/cmds/restic/global.go2
-rw-r--r--src/cmds/restic/global_debug.go60
-rw-r--r--src/cmds/restic/global_release.go9
-rw-r--r--src/cmds/restic/integration_test.go64
-rw-r--r--src/cmds/restic/main.go19
-rw-r--r--src/restic/archiver/archive_reader_test.go2
-rw-r--r--src/restic/archiver/archiver_duplication_test.go15
-rw-r--r--src/restic/backend.go20
-rw-r--r--src/restic/backend/local/backend_test.go7
-rw-r--r--src/restic/backend/local/local.go99
-rw-r--r--src/restic/backend/mem/backend_test.go7
-rw-r--r--src/restic/backend/mem/mem_backend.go93
-rw-r--r--src/restic/backend/rest/backend_test.go7
-rw-r--r--src/restic/backend/rest/rest.go105
-rw-r--r--src/restic/backend/rest/rest_test.go2
-rw-r--r--src/restic/backend/s3/backend_test.go7
-rw-r--r--src/restic/backend/s3/s3.go176
-rw-r--r--src/restic/backend/s3/s3_test.go2
-rw-r--r--src/restic/backend/sftp/backend_test.go7
-rw-r--r--src/restic/backend/sftp/sftp.go123
-rw-r--r--src/restic/backend/test/backend_test.go7
-rw-r--r--src/restic/backend/test/tests.go277
-rw-r--r--src/restic/backend/utils.go58
-rw-r--r--src/restic/backend/utils_test.go12
-rw-r--r--src/restic/buffer.go21
-rw-r--r--src/restic/checker/checker.go5
-rw-r--r--src/restic/checker/checker_test.go63
-rw-r--r--src/restic/fuse/file.go31
-rw-r--r--src/restic/fuse/file_test.go171
-rw-r--r--src/restic/hashing/reader.go29
-rw-r--r--src/restic/hashing/reader_test.go73
-rw-r--r--src/restic/hashing/writer.go31
-rw-r--r--src/restic/hashing/writer_test.go74
-rw-r--r--src/restic/id.go10
-rw-r--r--src/restic/index/index.go83
-rw-r--r--src/restic/index/index_test.go91
-rw-r--r--src/restic/lock.go8
-rw-r--r--src/restic/lock_test.go6
-rw-r--r--src/restic/mock/backend.go37
-rw-r--r--src/restic/node.go2
-rw-r--r--src/restic/pack/pack.go10
-rw-r--r--src/restic/pack/pack_test.go11
-rw-r--r--src/restic/readerat.go24
-rw-r--r--src/restic/repository/index.go26
-rw-r--r--src/restic/repository/index_rebuild.go3
-rw-r--r--src/restic/repository/index_test.go17
-rw-r--r--src/restic/repository/key.go5
-rw-r--r--src/restic/repository/master_index.go8
-rw-r--r--src/restic/repository/packer_manager.go85
-rw-r--r--src/restic/repository/packer_manager_test.go61
-rw-r--r--src/restic/repository/repack.go82
-rw-r--r--src/restic/repository/repository.go57
-rw-r--r--src/restic/repository/repository_test.go158
-rw-r--r--src/restic/test/helpers.go2
-rw-r--r--src/restic/testing.go25
-rw-r--r--src/restic/testing_test.go11
-rw-r--r--vendor/manifest6
74 files changed, 1757 insertions, 1042 deletions
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 97b5c0e..5529ea7 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -3,7 +3,10 @@ This document describes the way you can contribute to the restic project.
Ways to Help Out
================
-Thank you for your contribution!
+Thank you for your contribution! Please **open an issue first** (or add a
+comment to an existing issue) if you plan to work on any code or add a new
+feature. This way, duplicate work is prevented and we can discuss your ideas
+and design first.
There are several ways you can help us out. First of all code contributions and
bug fixes are most welcome. However even "minor" details as fixing spelling
@@ -83,7 +86,7 @@ The following commands can be used to run all the tests:
[...]
If you want to run your tests on Linux, OpenBSD or FreeBSD, you can use
-[vagrant](https://www.vagrantup.com/) with the proveded `Vagrantfile` to
+[vagrant](https://www.vagrantup.com/) with the provided `Vagrantfile` to
quickly set up VMs and run the tests, e.g.:
$ vagrant up freebsd
diff --git a/README.md b/README.md
index a7d3489..8645935 100644
--- a/README.md
+++ b/README.md
@@ -11,7 +11,8 @@ restic is a backup program that is fast, efficient and secure. Detailed
information can be found in [the documentation](doc/index.md) and [the user
manual](doc/Manual.md). The [design document](doc/Design.md) lists the
technical background and gives detailed information about the structure of the
-repository and the data saved therein.
+repository and the data saved therein. The file [FAQ.md](doc/FAQ.md) lists the
+most frequently asked questions.
The latest documentation can be viewed online at
<https://restic.readthedocs.io/en/latest>. On the bottom left corner there is
@@ -39,10 +40,15 @@ Building restic with gccgo may work, but is not supported.
Contribute and Documentation
============================
-Contributions are welcome! More information and a description of the
-development environment can be found in [`CONTRIBUTING.md`](CONTRIBUTING.md). A
-document describing the design of restic and the data structures stored on the
-back end is contained in [`doc/Design.md`](doc/Design.md).
+Contributions are welcome! Please **open an issue first** (or add a comment to
+an existing issue) if you plan to work on any code or add a new feature. This
+way, duplicate work is prevented and we can discuss your ideas and design
+first.
+
+More information and a description of the development environment can be found
+in [`CONTRIBUTING.md`](CONTRIBUTING.md). A document describing the design of
+restic and the data structures stored on the back end is contained in
+[`doc/Design.md`](doc/Design.md).
If you'd like to start contributing to restic, but don't know exactly what do
to, have a look at this great article by Dave Cheney:
diff --git a/VERSION b/VERSION
index 1c09c74..1d0ba9e 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-0.3.3
+0.4.0
diff --git a/build.go b/build.go
index 8406492..ec55660 100644
--- a/build.go
+++ b/build.go
@@ -174,6 +174,7 @@ func showUsage(output io.Writer) {
fmt.Fprintf(output, " -t --tags specify additional build tags\n")
fmt.Fprintf(output, " -k --keep-gopath do not remove the GOPATH after build\n")
fmt.Fprintf(output, " -T --test run tests\n")
+ fmt.Fprintf(output, " -o --output set output file name\n")
fmt.Fprintf(output, " --goos value set GOOS for cross-compilation\n")
fmt.Fprintf(output, " --goarch value set GOARCH for cross-compilation\n")
}
@@ -204,7 +205,7 @@ func cleanEnv() (env []string) {
func build(cwd, goos, goarch, gopath string, args ...string) error {
args = append([]string{"build"}, args...)
cmd := exec.Command("go", args...)
- cmd.Env = append(cleanEnv(), "GOPATH="+gopath, "GOARCH="+goarch, "GOOS="+goos)
+ cmd.Env = append(cleanEnv(), "GOPATH="+gopath, "GOARCH="+goarch, "GOOS="+goos, "CGO_ENABLED=0")
cmd.Dir = cwd
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
@@ -298,6 +299,8 @@ func main() {
targetGOOS := runtime.GOOS
targetGOARCH := runtime.GOARCH
+ var outputFilename string
+
for i, arg := range params {
if skipNext {
skipNext = false
@@ -315,6 +318,9 @@ func main() {
}
skipNext = true
buildTags = strings.Split(params[i+1], " ")
+ case "-o", "--output":
+ skipNext = true
+ outputFilename = params[i+1]
case "-T", "--test":
runTests = true
case "--goos":
@@ -377,9 +383,11 @@ func main() {
}
}()
- outputFilename := config.Name
- if targetGOOS == "windows" {
- outputFilename += ".exe"
+ if outputFilename == "" {
+ outputFilename = config.Name
+ if targetGOOS == "windows" {
+ outputFilename += ".exe"
+ }
}
cwd, err := os.Getwd()
diff --git a/build_release_binaries.sh b/build_release_binaries.sh
new file mode 100755
index 0000000..dced596
--- /dev/null
+++ b/build_release_binaries.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+set -e
+
+if [[ -z "$VERSION" ]]; then
+ echo '$VERSION unset'
+ exit 1
+fi
+
+dir=$(mktemp -d --tmpdir restic-release-XXXXXX)
+echo "path is ${dir}"
+
+for R in \
+ darwin/386 \
+ darwin/amd64 \
+ freebsd/386 \
+ freebsd/amd64 \
+ freebsd/arm \
+ linux/386 \
+ linux/amd64 \
+ linux/arm \
+ linux/arm64 \
+ openbsd/386 \
+ openbsd/amd64 \
+ windows/386 \
+ windows/amd64 \
+ ; do \
+
+ OS=$(dirname $R)
+ ARCH=$(basename $R)
+ filename=restic_${VERSION}_${OS}_${ARCH}
+
+ if [[ "$OS" == "windows" ]]; then
+ filename="${filename}.exe"
+ fi
+
+ echo $filename
+
+ go run ../build.go --goos $OS --goarch $ARCH --output ${filename}
+ if [[ "$OS" == "windows" ]]; then
+ zip ${filename%.exe}.zip ${filename}
+ rm ${filename}
+ mv ${filename%.exe}.zip ${dir}
+ else
+ bzip2 ${filename}
+ mv ${filename}.bz2 ${dir}
+ fi
+done
+
+echo "packing sources"
+git archive --format=tar --prefix=restic-$VERSION/ v$VERSION | gzip -n > restic-$VERSION.tar.gz
+mv restic-$VERSION.tar.gz ${dir}
+
+echo "creating checksums"
+pushd ${dir}
+sha256sum restic_*.{zip,bz2} > SHA256SUMS
+gpg --armor --detach-sign SHA256SUMS
+popd
+
+echo "creating source signature file"
+gpg --armor --detach-sign ${dir}/restic-$VERSION.tar.gz
+
+echo
+echo "done, path is ${dir}"
diff --git a/doc/Design.md b/doc/Design.md
index 6e9cd39..117554d 100644
--- a/doc/Design.md
+++ b/doc/Design.md
@@ -327,10 +327,11 @@ A snapshot references a tree by the SHA-256 hash of the JSON string
representation of its contents. Trees and data are saved in pack files in a
subdirectory of the directory `data`.
-The command `restic cat tree` can be used to inspect the tree referenced above:
+The command `restic cat blob` can be used to inspect the tree referenced above
+(piping the output of the command to `jq .` so that the JSON is indented):
```console
-$ restic -r /tmp/restic-repo cat tree b8138ab08a4722596ac89c917827358da4672eac68e3c03a8115b88dbf4bfb59
+$ restic -r /tmp/restic-repo cat blob b8138ab08a4722596ac89c917827358da4672eac68e3c03a8115b88dbf4bfb59 | jq .
enter password for repository:
{
"nodes": [
@@ -356,11 +357,11 @@ A tree contains a list of entries (in the field `nodes`) which contain meta
data like a name and timestamps. When the entry references a directory, the
field `subtree` contains the plain text ID of another tree object.
-When the command `restic cat tree` is used, the storage hash is needed to print
+When the command `restic cat blob` is used, the plaintext ID is needed to print
a tree. The tree referenced above can be dumped as follows:
```console
-$ restic -r /tmp/restic-repo cat tree 8b238c8811cc362693e91a857460c78d3acf7d9edb2f111048691976803cf16e
+$ restic -r /tmp/restic-repo cat blob 8b238c8811cc362693e91a857460c78d3acf7d9edb2f111048691976803cf16e
enter password for repository:
{
"nodes": [
@@ -389,8 +390,8 @@ enter password for repository:
This tree contains a file entry. This time, the `subtree` field is not present
and the `content` field contains a list with one plain text SHA-256 hash.
-The command `restic cat data` can be used to extract and decrypt data given a
-plaintext ID, e.g. for the data mentioned above:
+The command `restic cat blob` can also be used to extract and decrypt data
+given a plaintext ID, e.g. for the data mentioned above:
```console
$ restic -r /tmp/restic-repo cat blob 50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d | sha256sum
diff --git a/doc/FAQ.md b/doc/FAQ.md
new file mode 100644
index 0000000..6719fc5
--- /dev/null
+++ b/doc/FAQ.md
@@ -0,0 +1,20 @@
+FAQ
+===
+
+This is the list of Frequently Asked Questions for restic.
+
+`restic check` reports packs that aren't referenced in any index, is my repository broken?
+------------------------------------------------------------------------------------------
+
+When `restic check` reports that there are pack files in the repository that are not referenced in any index, that's (in contrast to what restic reports at the moment) not a source for concern. The output looks like this:
+
+ $ restic check
+ Create exclusive lock for repository
+ Load indexes
+ Check all packs
+ pack 819a9a52e4f51230afa89aefbf90df37fb70996337ae57e6f7a822959206a85e: not referenced in any index
+ pack de299e69fb075354a3775b6b045d152387201f1cdc229c31d1caa34c3b340141: not referenced in any index
+ Check snapshots, trees and blobs
+ Fatal: repository contains errors
+
+The message means that there is more data stored in the repo than strictly necessary. With high probability this is duplicate data. In order to clean it up, the command `restic prune` can be used. The cause of this bug is not yet known.
diff --git a/doc/Manual.md b/doc/Manual.md
index 350201d..0219a6a 100644
--- a/doc/Manual.md
+++ b/doc/Manual.md
@@ -77,7 +77,7 @@ Available Commands:
Flags:
--no-lock do not lock the repo, this allows some operations on read-only repos
-p, --password-file string read the repository password from a file
- -q, --quiet do not outputcomprehensive progress report
+ -q, --quiet do not output comprehensive progress report
-r, --repo string repository to backup to or restore from (default: $RESTIC_REPOSITORY)
Use "restic [command] --help" for more information about a command.
@@ -110,7 +110,7 @@ Flags:
Global Flags:
--no-lock do not lock the repo, this allows some operations on read-only repos
-p, --password-file string read the repository password from a file
- -q, --quiet do not outputcomprehensive progress report
+ -q, --quiet do not output comprehensive progress report
-r, --repo string repository to backup to or restore from (default: $RESTIC_REPOSITORY)
```
diff --git a/doc/REST_backend.md b/doc/REST_backend.md
index 78a3fd4..6423d95 100644
--- a/doc/REST_backend.md
+++ b/doc/REST_backend.md
@@ -6,6 +6,19 @@ following values are valid for `{type}`: `data`, `keys`, `locks`, `snapshots`,
`index`, `config`. `{path}` is a path to the repository, so that multiple
different repositories can be accessed. The default path is `/`.
+## POST {path}?create=true
+
+This request is used to initially create a new repository. The server responds
+with "200 OK" if the repository structure was created successfully or already
+exists, otherwise an error is returned.
+
+## DELETE {path}
+
+Deletes the repository on the server side. The server responds with "200 OK" if
+the repository was successfully removed. If this function is not implemented
+the server returns "501 Not Implemented", if this it is denied by the server it
+returns "403 Forbidden".
+
## HEAD {path}/config
Returns "200 OK" if the repository has a configuration,
diff --git a/run_integration_tests.go b/run_integration_tests.go
index bfdddeb..8c03ed8 100644
--- a/run_integration_tests.go
+++ b/run_integration_tests.go
@@ -176,7 +176,7 @@ func (env *TravisEnvironment) Prepare() error {
"windows/386", "windows/amd64",
"darwin/386", "darwin/amd64",
"freebsd/386", "freebsd/amd64",
- "opendbsd/386", "opendbsd/amd64",
+ "openbsd/386", "openbsd/amd64",
}
if !strings.HasPrefix(runtime.Version(), "go1.3") {
env.goxOSArch = append(env.goxOSArch,
diff --git a/src/cmds/restic/cmd_cat.go b/src/cmds/restic/cmd_cat.go
index d95e227..d9e723a 100644
--- a/src/cmds/restic/cmd_cat.go
+++ b/src/cmds/restic/cmd_cat.go
@@ -9,13 +9,12 @@ import (
"restic"
"restic/backend"
- "restic/debug"
"restic/errors"
"restic/repository"
)
var cmdCat = &cobra.Command{
- Use: "cat [flags] [pack|blob|tree|snapshot|key|masterkey|config|lock] ID",
+ Use: "cat [flags] [pack|blob|snapshot|index|key|masterkey|config|lock] ID",
Short: "print internal objects to stdout",
Long: `
The "cat" command is used to print internal objects to stdout.
@@ -99,7 +98,7 @@ func runCat(gopts GlobalOptions, args []string) error {
return nil
case "key":
h := restic.Handle{Type: restic.KeyFile, Name: id.String()}
- buf, err := backend.LoadAll(repo.Backend(), h, nil)
+ buf, err := backend.LoadAll(repo.Backend(), h)
if err != nil {
return err
}
@@ -150,7 +149,7 @@ func runCat(gopts GlobalOptions, args []string) error {
switch tpe {
case "pack":
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
- buf, err := backend.LoadAll(repo.Backend(), h, nil)
+ buf, err := backend.LoadAll(repo.Backend(), h)
if err != nil {
return err
}
@@ -172,7 +171,7 @@ func runCat(gopts GlobalOptions, args []string) error {
blob := list[0]
buf := make([]byte, blob.Length)
- n, err := repo.LoadBlob(restic.DataBlob, id, buf)
+ n, err := repo.LoadBlob(t, id, buf)
if err != nil {
return err
}
@@ -184,23 +183,6 @@ func runCat(gopts GlobalOptions, args []string) error {
return errors.Fatal("blob not found")
- case "tree":
- debug.Log("cat tree %v", id.Str())
- tree, err := repo.LoadTree(id)
- if err != nil {
- debug.Log("unable to load tree %v: %v", id.Str(), err)
- return err
- }
-
- buf, err := json.MarshalIndent(&tree, "", " ")
- if err != nil {
- debug.Log("error json.MarshalIndent(): %v", err)
- return err
- }
-
- _, err = os.Stdout.Write(append(buf, '\n'))
- return nil
-
default:
return errors.Fatal("invalid type")
}
diff --git a/src/cmds/restic/cmd_forget.go b/src/cmds/restic/cmd_forget.go
index e5a47d3..df84f2a 100644
--- a/src/cmds/restic/cmd_forget.go
+++ b/src/cmds/restic/cmd_forget.go
@@ -133,7 +133,8 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
}
if !opts.DryRun {
- err = repo.Backend().Remove(restic.SnapshotFile, id.String())
+ h := restic.Handle{Type: restic.SnapshotFile, Name: id.String()}
+ err = repo.Backend().Remove(h)
if err != nil {
return err
}
@@ -201,7 +202,8 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
if !opts.DryRun {
for _, sn := range remove {
- err = repo.Backend().Remove(restic.SnapshotFile, sn.ID().String())
+ h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
+ err = repo.Backend().Remove(h)
if err != nil {
return err
}
diff --git a/src/cmds/restic/cmd_key.go b/src/cmds/restic/cmd_key.go
index 4e99f0c..946585a 100644
--- a/src/cmds/restic/cmd_key.go
+++ b/src/cmds/restic/cmd_key.go
@@ -87,7 +87,8 @@ func deleteKey(repo *repository.Repository, name string) error {
return errors.Fatal("refusing to remove key currently used to access repository")
}
- err := repo.Backend().Remove(restic.KeyFile, name)
+ h := restic.Handle{Type: restic.KeyFile, Name: name}
+ err := repo.Backend().Remove(h)
if err != nil {
return err
}
@@ -107,7 +108,8 @@ func changePassword(gopts GlobalOptions, repo *repository.Repository) error {
return errors.Fatalf("creating new key failed: %v\n", err)
}
- err = repo.Backend().Remove(restic.KeyFile, repo.KeyName())
+ h := restic.Handle{Type: restic.KeyFile, Name: repo.KeyName()}
+ err = repo.Backend().Remove(h)
if err != nil {
return err
}
diff --git a/src/cmds/restic/cmd_list.go b/src/cmds/restic/cmd_list.go
index a37de8d..561943a 100644
--- a/src/cmds/restic/cmd_list.go
+++ b/src/cmds/restic/cmd_list.go
@@ -1,8 +1,10 @@
package main
import (
+ "fmt"
"restic"
"restic/errors"
+ "restic/index"
"github.com/spf13/cobra"
)
@@ -52,6 +54,19 @@ func runList(opts GlobalOptions, args []string) error {
t = restic.KeyFile
case "locks":
t = restic.LockFile
+ case "blobs":
+ idx, err := index.Load(repo, nil)
+ if err != nil {
+ return err
+ }
+
+ for _, pack := range idx.Packs {
+ for _, entry := range pack.Entries {
+ fmt.Printf("%v %v\n", entry.Type, entry.ID)
+ }
+ }
+
+ return nil
default:
return errors.Fatal("invalid type")
}
diff --git a/src/cmds/restic/cmd_ls.go b/src/cmds/restic/cmd_ls.go
index 3b09054..bee9d1e 100644
--- a/src/cmds/restic/cmd_ls.go
+++ b/src/cmds/restic/cmd_ls.go
@@ -17,22 +17,35 @@ var cmdLs = &cobra.Command{
Short: "list files in a snapshot",
Long: `
The "ls" command allows listing files and directories in a snapshot.
+
+The special snapshot-ID "latest" can be used to list files and directories of the latest snapshot in the repository.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return runLs(globalOptions, args)
},
}
-var listLong bool
+// LsOptions collects all options for the ls command.
+type LsOptions struct {
+ ListLong bool
+ Host string
+ Paths []string
+}
+
+var lsOptions LsOptions
func init() {
cmdRoot.AddCommand(cmdLs)
- cmdLs.Flags().BoolVarP(&listLong, "long", "l", false, "use a long listing format showing size and mode")
+ flags := cmdLs.Flags()
+ flags.BoolVarP(&lsOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
+
+ flags.StringVarP(&lsOptions.Host, "host", "H", "", `only consider snapshots for this host when the snapshot ID is "latest"`)
+ flags.StringSliceVar(&lsOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path` for snapshot ID \"latest\"")
}
func printNode(prefix string, n *restic.Node) string {
- if !listLong {
+ if !lsOptions.ListLong {
return filepath.Join(prefix, n.Name)
}
@@ -86,9 +99,19 @@ func runLs(gopts GlobalOptions, args []string) error {
return err
}
- id, err := restic.FindSnapshot(repo, args[0])
- if err != nil {
- return err
+ snapshotIDString := args[0]
+ var id restic.ID
+
+ if snapshotIDString == "latest" {
+ id, err = restic.FindLatestSnapshot(repo, lsOptions.Paths, lsOptions.Host)
+ if err != nil {
+ Exitf(1, "latest snapshot for criteria not found: %v Paths:%v Host:%v", err, lsOptions.Paths, lsOptions.Host)
+ }
+ } else {
+ id, err = restic.FindSnapshot(repo, snapshotIDString)
+ if err != nil {
+ Exitf(1, "invalid id %q: %v", snapshotIDString, err)
+ }
}
sn, err := restic.LoadSnapshot(repo, id)
diff --git a/src/cmds/restic/cmd_prune.go b/src/cmds/restic/cmd_prune.go
index 98c32f7..f43c99f 100644
--- a/src/cmds/restic/cmd_prune.go
+++ b/src/cmds/restic/cmd_prune.go
@@ -103,11 +103,13 @@ func runPrune(gopts GlobalOptions) error {
return err
}
+ blobs := 0
for _, pack := range idx.Packs {
stats.bytes += pack.Size
+ blobs += len(pack.Entries)
}
Verbosef("repository contains %v packs (%v blobs) with %v bytes\n",
- len(idx.Packs), len(idx.Blobs), formatBytes(uint64(stats.bytes)))
+ len(idx.Packs), blobs, formatBytes(uint64(stats.bytes)))
blobCount := make(map[restic.BlobHandle]int)
duplicateBlobs := 0
@@ -164,14 +166,17 @@ func runPrune(gopts GlobalOptions) error {
// find packs that need a rewrite
rewritePacks := restic.NewIDSet()
- for h, blob := range idx.Blobs {
- if !usedBlobs.Has(h) {
- rewritePacks.Merge(blob.Packs)
- continue
- }
+ for _, pack := range idx.Packs {
+ for _, blob := range pack.Entries {
+ h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
+ if !usedBlobs.Has(h) {
+ rewritePacks.Insert(pack.ID)
+ continue
+ }
- if blobCount[h] > 1 {
- rewritePacks.Merge(blob.Packs)
+ if blobCount[h] > 1 {
+ rewritePacks.Insert(pack.ID)
+ }
}
}
@@ -214,7 +219,8 @@ func runPrune(gopts GlobalOptions) error {
}
for packID := range removePacks {
- err = repo.Backend().Remove(restic.DataFile, packID.String())
+ h := restic.Handle{Type: restic.DataFile, Name: packID.String()}
+ err = repo.Backend().Remove(h)
if err != nil {
Warnf("unable to remove file %v from the repository\n", packID.Str())
}
@@ -234,7 +240,8 @@ func runPrune(gopts GlobalOptions) error {
var supersedes restic.IDs
for idxID := range repo.List(restic.IndexFile, done) {
- err := repo.Backend().Remove(restic.IndexFile, idxID.String())
+ h := restic.Handle{Type: restic.IndexFile, Name: idxID.String()}
+ err := repo.Backend().Remove(h)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", idxID.Str(), err)
}
diff --git a/src/cmds/restic/cmd_snapshots.go b/src/cmds/restic/cmd_snapshots.go
index 6195ffb..74a7c12 100644
--- a/src/cmds/restic/cmd_snapshots.go
+++ b/src/cmds/restic/cmd_snapshots.go
@@ -57,8 +57,8 @@ func runSnapshots(opts SnapshotOptions, gopts GlobalOptions, args []string) erro
}
tab := NewTable()
- tab.Header = fmt.Sprintf("%-8s %-19s %-10s %-10s %s", "ID", "Date", "Host", "Tags", "Directory")
- tab.RowFormat = "%-8s %-19s %-10s %-10s %s"
+ tab.Header = fmt.Sprintf("%-8s %-19s %-10s %-10s %-3s %s", "ID", "Date", "Host", "Tags", "", "Directory")
+ tab.RowFormat = "%-8s %-19s %-10s %-10s %-3s %s"
done := make(chan struct{})
defer close(done)
@@ -97,9 +97,15 @@ func runSnapshots(opts SnapshotOptions, gopts GlobalOptions, args []string) erro
firstTag = sn.Tags[0]
}
- tab.Rows = append(tab.Rows, []interface{}{sn.ID().Str(), sn.Time.Format(TimeFormat), sn.Hostname, firstTag, sn.Paths[0]})
-
rows := len(sn.Paths)
+
+ treeElement := " "
+ if rows != 1 {
+ treeElement = "┌──"
+ }
+
+ tab.Rows = append(tab.Rows, []interface{}{sn.ID().Str(), sn.Time.Format(TimeFormat), sn.Hostname, firstTag, treeElement, sn.Paths[0]})
+
if len(sn.Tags) > rows {
rows = len(sn.Tags)
}
@@ -115,7 +121,12 @@ func runSnapshots(opts SnapshotOptions, gopts GlobalOptions, args []string) erro
tag = sn.Tags[i]
}
- tab.Rows = append(tab.Rows, []interface{}{"", "", "", tag, path})
+ treeElement := "│"
+ if i == (rows - 1) {
+ treeElement = "└──"
+ }
+
+ tab.Rows = append(tab.Rows, []interface{}{"", "", "", tag, treeElement, path})
}
}
diff --git a/src/cmds/restic/global.go b/src/cmds/restic/global.go
index fdedeee..8757130 100644
--- a/src/cmds/restic/global.go
+++ b/src/cmds/restic/global.go
@@ -51,7 +51,7 @@ func init() {
f := cmdRoot.PersistentFlags()
f.StringVarP(&globalOptions.Repo, "repo", "r", os.Getenv("RESTIC_REPOSITORY"), "repository to backup to or restore from (default: $RESTIC_REPOSITORY)")
f.StringVarP(&globalOptions.PasswordFile, "password-file", "p", "", "read the repository password from a file")
- f.BoolVarP(&globalOptions.Quiet, "quiet", "q", false, "do not outputcomprehensive progress report")
+ f.BoolVarP(&globalOptions.Quiet, "quiet", "q", false, "do not output comprehensive progress report")
f.BoolVar(&globalOptions.NoLock, "no-lock", false, "do not lock the repo, this allows some operations on read-only repos")
restoreTerminal()
diff --git a/src/cmds/restic/global_debug.go b/src/cmds/restic/global_debug.go
new file mode 100644
index 0000000..6e443f2
--- /dev/null
+++ b/src/cmds/restic/global_debug.go
@@ -0,0 +1,60 @@
+// +build debug
+
+package main
+
+import (
+ "fmt"
+ "net/http"
+ _ "net/http/pprof"
+ "os"
+ "restic/errors"
+
+ "github.com/pkg/profile"
+)
+
+var (
+ listenMemoryProfile string
+ memProfilePath string
+ cpuProfilePath string
+
+ prof interface {
+ Stop()
+ }
+)
+
+func init() {
+ f := cmdRoot.PersistentFlags()
+ f.StringVar(&listenMemoryProfile, "listen-profile", "", "listen on this `address:port` for memory profiling")
+ f.StringVar(&memProfilePath, "mem-profile", "", "write memory profile to `dir`")
+ f.StringVar(&cpuProfilePath, "cpu-profile", "", "write cpu profile to `dir`")
+}
+
+func runDebug() error {
+ if listenMemoryProfile != "" {
+ fmt.Fprintf(os.Stderr, "running memory profile HTTP server on %v\n", listenMemoryProfile)
+ go func() {
+ err := http.ListenAndServe(listenMemoryProfile, nil)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "memory profile listen failed: %v\n", err)
+ }
+ }()
+ }
+
+ if memProfilePath != "" && cpuProfilePath != "" {
+ return errors.Fatal("only one profile (memory or CPU) may be activated at the same time")
+ }
+
+ if memProfilePath != "" {
+ prof = profile.Start(profile.Quiet, profile.MemProfile, profile.ProfilePath(memProfilePath))
+ } else if memProfilePath != "" {
+ prof = profile.Start(profile.Quiet, profile.CPUProfile, profile.ProfilePath(memProfilePath))
+ }
+
+ return nil
+}
+
+func shutdownDebug() {
+ if prof != nil {
+ prof.Stop()
+ }
+}
diff --git a/src/cmds/restic/global_release.go b/src/cmds/restic/global_release.go
new file mode 100644
index 0000000..0a3bc8f
--- /dev/null
+++ b/src/cmds/restic/global_release.go
@@ -0,0 +1,9 @@
+// +build !debug
+
+package main
+
+// runDebug is a noop without the debug tag.
+func runDebug() error { return nil }
+
+// shutdownDebug is a noop without the debug tag.
+func shutdownDebug() {}
diff --git a/src/cmds/restic/integration_test.go b/src/cmds/restic/integration_test.go
index d678426..4263677 100644
--- a/src/cmds/restic/integration_test.go
+++ b/src/cmds/restic/integration_test.go
@@ -7,6 +7,7 @@ import (
"fmt"
"io"
"io/ioutil"
+ mrand "math/rand"
"os"
"path/filepath"
"regexp"
@@ -159,6 +160,15 @@ func testRunFind(t testing.TB, gopts GlobalOptions, pattern string) []string {
return strings.Split(string(buf.Bytes()), "\n")
}
+func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) {
+ opts := ForgetOptions{}
+ OK(t, runForget(opts, gopts, args))
+}
+
+func testRunPrune(t testing.TB, gopts GlobalOptions) {
+ OK(t, runPrune(gopts))
+}
+
func TestBackup(t *testing.T) {
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
datafile := filepath.Join("testdata", "backup-data.tar.gz")
@@ -730,6 +740,30 @@ func TestRestoreFilter(t *testing.T) {
})
}
+func TestRestore(t *testing.T) {
+ withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
+ testRunInit(t, gopts)
+
+ for i := 0; i < 10; i++ {
+ p := filepath.Join(env.testdata, fmt.Sprintf("foo/bar/testfile%v", i))
+ OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ OK(t, appendRandomData(p, uint(mrand.Intn(5<<21))))
+ }
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, []string{env.testdata}, opts, gopts)
+ testRunCheck(t, gopts)
+
+ // Restore latest without any filters
+ restoredir := filepath.Join(env.base, "restore")
+ testRunRestoreLatest(t, gopts, restoredir, nil, "")
+
+ Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, filepath.Base(env.testdata))),
+ "directories are not equal")
+ })
+}
+
func TestRestoreLatest(t *testing.T) {
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
@@ -947,3 +981,33 @@ func TestCheckRestoreNoLock(t *testing.T) {
testRunRestore(t, gopts, filepath.Join(env.base, "restore"), snapshotIDs[0])
})
}
+
+func TestPrune(t *testing.T) {
+ withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
+ datafile := filepath.Join("testdata", "backup-data.tar.gz")
+ fd, err := os.Open(datafile)
+ if os.IsNotExist(errors.Cause(err)) {
+ t.Skipf("unable to find data file %q, skipping", datafile)
+ return
+ }
+ OK(t, err)
+ OK(t, fd.Close())
+
+ testRunInit(t, gopts)
+
+ SetupTarTestFixture(t, env.testdata, datafile)
+ opts := BackupOptions{}
+
+ testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0", "1")}, opts, gopts)
+ testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0", "2")}, opts, gopts)
+ testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0", "3")}, opts, gopts)
+
+ snapshotIDs := testRunList(t, "snapshots", gopts)
+ Assert(t, len(snapshotIDs) == 3,
+ "expected one snapshot, got %v", snapshotIDs)
+
+ testRunForget(t, gopts, snapshotIDs[0].String())
+ testRunPrune(t, gopts)
+ testRunCheck(t, gopts)
+ })
+}
diff --git a/src/cmds/restic/main.go b/src/cmds/restic/main.go
index fed711b..03f7f90 100644
--- a/src/cmds/restic/main.go
+++ b/src/cmds/restic/main.go
@@ -5,7 +5,6 @@ import (
"os"
"restic"
"restic/debug"
- "runtime"
"github.com/spf13/cobra"
@@ -22,17 +21,15 @@ directories in an encrypted repository stored on different backends.
`,
SilenceErrors: true,
SilenceUsage: true,
-}
-func init() {
- // set GOMAXPROCS to number of CPUs
- if runtime.Version() < "go1.5" {
- gomaxprocs := os.Getenv("GOMAXPROCS")
- debug.Log("read GOMAXPROCS from env variable, value: %s", gomaxprocs)
- if gomaxprocs == "" {
- runtime.GOMAXPROCS(runtime.NumCPU())
- }
- }
+ // run the debug functions for all subcommands (if build tag "debug" is
+ // enabled)
+ PersistentPreRunE: func(*cobra.Command, []string) error {
+ return runDebug()
+ },
+ PersistentPostRun: func(*cobra.Command, []string) {
+ shutdownDebug()
+ },
}
func main() {
diff --git a/src/restic/archiver/archive_reader_test.go b/src/restic/archiver/archive_reader_test.go
index f2de248..c24a0be 100644
--- a/src/restic/archiver/archive_reader_test.go
+++ b/src/restic/archiver/archive_reader_test.go
@@ -44,7 +44,7 @@ func checkSavedFile(t *testing.T, repo restic.Repository, treeID restic.ID, name
t.Fatal(err)
}
- buf := make([]byte, int(size))
+ buf := restic.NewBlobBuffer(int(size))
n := loadBlob(t, repo, id, buf)
if n != len(buf) {
t.Errorf("wrong number of bytes read, want %d, got %d", len(buf), n)
diff --git a/src/restic/archiver/archiver_duplication_test.go b/src/restic/archiver/archiver_duplication_test.go
index aadfc59..c7de1cc 100644
--- a/src/restic/archiver/archiver_duplication_test.go
+++ b/src/restic/archiver/archiver_duplication_test.go
@@ -39,15 +39,15 @@ func randomID() restic.ID {
func forgetfulBackend() restic.Backend {
be := &mock.Backend{}
- be.TestFn = func(t restic.FileType, name string) (bool, error) {
+ be.TestFn = func(h restic.Handle) (bool, error) {
return false, nil
}
- be.LoadFn = func(h restic.Handle, p []byte, off int64) (int, error) {
- return 0, errors.New("not found")
+ be.LoadFn = func(h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
+ return nil, errors.New("not found")
}
- be.SaveFn = func(h restic.Handle, p []byte) error {
+ be.SaveFn = func(h restic.Handle, rd io.Reader) error {
return nil
}
@@ -55,7 +55,7 @@ func forgetfulBackend() restic.Backend {
return restic.FileInfo{}, errors.New("not found")
}
- be.RemoveFn = func(t restic.FileType, name string) error {
+ be.RemoveFn = func(h restic.Handle) error {
return nil
}
@@ -142,6 +142,11 @@ func testArchiverDuplication(t *testing.T) {
close(done)
wg.Wait()
+
+ err = repo.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
}
func TestArchiverDuplication(t *testing.T) {
diff --git a/src/restic/backend.go b/src/restic/backend.go
index 37a8404..4f776b1 100644
--- a/src/restic/backend.go
+++ b/src/restic/backend.go
@@ -1,5 +1,7 @@
package restic
+import "io"
+
// Backend is used to store and access data.
type Backend interface {
// Location returns a string that describes the type and location of the
@@ -7,22 +9,22 @@ type Backend interface {
Location() string
// Test a boolean value whether a File with the name and type exists.
- Test(t FileType, name string) (bool, error)
+ Test(h Handle) (bool, error)
// Remove removes a File with type t and name.
- Remove(t FileType, name string) error
+ Remove(h Handle) error
// Close the backend
Close() error
- // Load returns the data stored in the backend for h at the given offset
- // and saves it in p. Load has the same semantics as io.ReaderAt, except
- // that a negative offset is also allowed. In this case it references a
- // position relative to the end of the file (similar to Seek()).
- Load(h Handle, p []byte, off int64) (int, error)
-
// Save stores the data in the backend under the given handle.
- Save(h Handle, p []byte) error
+ Save(h Handle, rd io.Reader) error
+
+ // Load returns a reader that yields the contents of the file at h at the
+ // given offset. If length is larger than zero, only a portion of the file
+ // is returned. rd must be closed after use. If an error is returned, the
+ // ReadCloser must be nil.
+ Load(h Handle, length int, offset int64) (io.ReadCloser, error)
// Stat returns information about the File identified by h.
Stat(h Handle) (FileInfo, error)
diff --git a/src/restic/backend/local/backend_test.go b/src/restic/backend/local/backend_test.go
index 8954dc8..8607f01 100644
--- a/src/restic/backend/local/backend_test.go
+++ b/src/restic/backend/local/backend_test.go
@@ -51,13 +51,6 @@ func TestLocalBackendLoad(t *testing.T) {
test.TestLoad(t)
}
-func TestLocalBackendLoadNegativeOffset(t *testing.T) {
- if SkipMessage != "" {
- t.Skip(SkipMessage)
- }
- test.TestLoadNegativeOffset(t)
-}
-
func TestLocalBackendSave(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
diff --git a/src/restic/backend/local/local.go b/src/restic/backend/local/local.go
index e09650e..03d812d 100644
--- a/src/restic/backend/local/local.go
+++ b/src/restic/backend/local/local.go
@@ -101,58 +101,18 @@ func dirname(base string, t restic.FileType, name string) string {
return filepath.Join(base, n)
}
-// Load returns the data stored in the backend for h at the given offset and
-// saves it in p. Load has the same semantics as io.ReaderAt, with one
-// exception: when off is lower than zero, it is treated as an offset relative
-// to the end of the file.
-func (b *Local) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
- debug.Log("Load %v, length %v at %v", h, len(p), off)
- if err := h.Valid(); err != nil {
- return 0, err
- }
-
- f, err := fs.Open(filename(b.p, h.Type, h.Name))
- if err != nil {
- return 0, errors.Wrap(err, "Open")
- }
-
- defer func() {
- e := f.Close()
- if err == nil {
- err = errors.Wrap(e, "Close")
- }
- }()
-
- switch {
- case off > 0:
- _, err = f.Seek(off, 0)
- case off < 0:
- _, err = f.Seek(off, 2)
- }
-
- if err != nil {
- return 0, errors.Wrap(err, "Seek")
- }
-
- return io.ReadFull(f, p)
-}
-
-// writeToTempfile saves p into a tempfile in tempdir.
-func writeToTempfile(tempdir string, p []byte) (filename string, err error) {
+// copyToTempfile saves p into a tempfile in tempdir.
+func copyToTempfile(tempdir string, rd io.Reader) (filename string, err error) {
tmpfile, err := ioutil.TempFile(tempdir, "temp-")
if err != nil {
return "", errors.Wrap(err, "TempFile")
}
- n, err := tmpfile.Write(p)
+ _, err = io.Copy(tmpfile, rd)
if err != nil {
return "", errors.Wrap(err, "Write")
}
- if n != len(p) {
- return "", errors.New("not all bytes writen")
- }
-
if err = tmpfile.Sync(); err != nil {
return "", errors.Wrap(err, "Syncn")
}
@@ -166,14 +126,14 @@ func writeToTempfile(tempdir string, p []byte) (filename string, err error) {
}
// Save stores data in the backend at the handle.
-func (b *Local) Save(h restic.Handle, p []byte) (err error) {
- debug.Log("Save %v, length %v", h, len(p))
+func (b *Local) Save(h restic.Handle, rd io.Reader) (err error) {
+ debug.Log("Save %v", h)
if err := h.Valid(); err != nil {
return err
}
- tmpfile, err := writeToTempfile(filepath.Join(b.p, backend.Paths.Temp), p)
- debug.Log("saved %v (%d bytes) to %v", h, len(p), tmpfile)
+ tmpfile, err := copyToTempfile(filepath.Join(b.p, backend.Paths.Temp), rd)
+ debug.Log("saved %v to %v", h, tmpfile)
if err != nil {
return err
}
@@ -210,6 +170,39 @@ func (b *Local) Save(h restic.Handle, p []byte) (err error) {
return setNewFileMode(filename, fi)
}
+// Load returns a reader that yields the contents of the file at h at the
+// given offset. If length is nonzero, only a portion of the file is
+// returned. rd must be closed after use.
+func (b *Local) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
+ debug.Log("Load %v, length %v, offset %v", h, length, offset)
+ if err := h.Valid(); err != nil {
+ return nil, err
+ }
+
+ if offset < 0 {
+ return nil, errors.New("offset is negative")
+ }
+
+ f, err := os.Open(filename(b.p, h.Type, h.Name))
+ if err != nil {
+ return nil, err
+ }
+
+ if offset > 0 {
+ _, err = f.Seek(offset, 0)
+ if err != nil {
+ f.Close()
+ return nil, err
+ }
+ }
+
+ if length > 0 {
+ return backend.LimitReadCloser(f, int64(length)), nil
+ }
+
+ return f, nil
+}
+
// Stat returns information about a blob.
func (b *Local) Stat(h restic.Handle) (restic.FileInfo, error) {
debug.Log("Stat %v", h)
@@ -226,9 +219,9 @@ func (b *Local) Stat(h restic.Handle) (restic.FileInfo, error) {
}
// Test returns true if a blob of the given type and name exists in the backend.
-func (b *Local) Test(t restic.FileType, name string) (bool, error) {
- debug.Log("Test %v %v", t, name)
- _, err := fs.Stat(filename(b.p, t, name))
+func (b *Local) Test(h restic.Handle) (bool, error) {
+ debug.Log("Test %v", h)
+ _, err := fs.Stat(filename(b.p, h.Type, h.Name))
if err != nil {
if os.IsNotExist(errors.Cause(err)) {
return false, nil
@@ -240,9 +233,9 @@ func (b *Local) Test(t restic.FileType, name string) (bool, error) {
}
// Remove removes the blob with the given name and type.
-func (b *Local) Remove(t restic.FileType, name string) error {
- debug.Log("Remove %v %v", t, name)
- fn := filename(b.p, t, name)
+func (b *Local) Remove(h restic.Handle) error {
+ debug.Log("Remove %v", h)
+ fn := filename(b.p, h.Type, h.Name)
// reset read-only flag
err := fs.Chmod(fn, 0666)
diff --git a/src/restic/backend/mem/backend_test.go b/src/restic/backend/mem/backend_test.go
index 6bf1958..13e95f1 100644
--- a/src/restic/backend/mem/backend_test.go
+++ b/src/restic/backend/mem/backend_test.go
@@ -51,13 +51,6 @@ func TestMemBackendLoad(t *testing.T) {
test.TestLoad(t)
}
-func TestMemBackendLoadNegativeOffset(t *testing.T) {
- if SkipMessage != "" {
- t.Skip(SkipMessage)
- }
- test.TestLoadNegativeOffset(t)
-}
-
func TestMemBackendSave(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
diff --git a/src/restic/backend/mem/mem_backend.go b/src/restic/backend/mem/mem_backend.go
index a40ad99..3e96f6a 100644
--- a/src/restic/backend/mem/mem_backend.go
+++ b/src/restic/backend/mem/mem_backend.go
@@ -1,21 +1,19 @@
package mem
import (
+ "bytes"
"io"
+ "io/ioutil"
"restic"
"sync"
+ "restic/backend"
"restic/errors"
"restic/debug"
)
-type entry struct {
- Type restic.FileType
- Name string
-}
-
-type memMap map[entry][]byte
+type memMap map[restic.Handle][]byte
// make sure that MemoryBackend implements backend.Backend
var _ restic.Backend = &MemoryBackend{}
@@ -39,23 +37,23 @@ func New() *MemoryBackend {
}
// Test returns whether a file exists.
-func (be *MemoryBackend) Test(t restic.FileType, name string) (bool, error) {
+func (be *MemoryBackend) Test(h restic.Handle) (bool, error) {
be.m.Lock()
defer be.m.Unlock()
- debug.Log("test %v %v", t, name)
+ debug.Log("Test %v", h)
- if _, ok := be.data[entry{t, name}]; ok {
+ if _, ok := be.data[h]; ok {
return true, nil
}
return false, nil
}
-// Load reads data from the backend.
-func (be *MemoryBackend) Load(h restic.Handle, p []byte, off int64) (int, error) {
+// Save adds new Data to the backend.
+func (be *MemoryBackend) Save(h restic.Handle, rd io.Reader) error {
if err := h.Valid(); err != nil {
- return 0, err
+ return err
}
be.m.Lock()
@@ -65,37 +63,27 @@ func (be *MemoryBackend) Load(h restic.Handle, p []byte, off int64) (int, error)
h.Name = ""
}
- debug.Log("get %v offset %v len %v", h, off, len(p))
-
- if _, ok := be.data[entry{h.Type, h.Name}]; !ok {
- return 0, errors.New("no such data")
+ if _, ok := be.data[h]; ok {
+ return errors.New("file already exists")
}
- buf := be.data[entry{h.Type, h.Name}]
- switch {
- case off > int64(len(buf)):
- return 0, errors.New("offset beyond end of file")
- case off < -int64(len(buf)):
- off = 0
- case off < 0:
- off = int64(len(buf)) + off
+ buf, err := ioutil.ReadAll(rd)
+ if err != nil {
+ return err
}
- buf = buf[off:]
-
- n := copy(p, buf)
+ be.data[h] = buf
+ debug.Log("saved %v bytes at %v", len(buf), h)
- if len(p) > len(buf) {
- return n, io.ErrUnexpectedEOF
- }
-
- return n, nil
+ return nil
}
-// Save adds new Data to the backend.
-func (be *MemoryBackend) Save(h restic.Handle, p []byte) error {
+// Load returns a reader that yields the contents of the file at h at the
+// given offset. If length is nonzero, only a portion of the file is
+// returned. rd must be closed after use.
+func (be *MemoryBackend) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
if err := h.Valid(); err != nil {
- return err
+ return nil, err
}
be.m.Lock()
@@ -105,16 +93,27 @@ func (be *MemoryBackend) Save(h restic.Handle, p []byte) error {
h.Name = ""
}
- if _, ok := be.data[entry{h.Type, h.Name}]; ok {
- return errors.New("file already exists")
+ debug.Log("Load %v offset %v len %v", h, offset, length)
+
+ if offset < 0 {
+ return nil, errors.New("offset is negative")
+ }
+
+ if _, ok := be.data[h]; !ok {
+ return nil, errors.New("no such data")
}
- debug.Log("save %v bytes at %v", len(p), h)
- buf := make([]byte, len(p))
- copy(buf, p)
- be.data[entry{h.Type, h.Name}] = buf
+ buf := be.data[h]
+ if offset > int64(len(buf)) {
+ return nil, errors.New("offset beyond end of file")
+ }
- return nil
+ buf = buf[offset:]
+ if length > 0 && len(buf) > length {
+ buf = buf[:length]
+ }
+
+ return backend.Closer{Reader: bytes.NewReader(buf)}, nil
}
// Stat returns information about a file in the backend.
@@ -132,7 +131,7 @@ func (be *MemoryBackend) Stat(h restic.Handle) (restic.FileInfo, error) {
debug.Log("stat %v", h)
- e, ok := be.data[entry{h.Type, h.Name}]
+ e, ok := be.data[h]
if !ok {
return restic.FileInfo{}, errors.New("no such data")
}
@@ -141,17 +140,17 @@ func (be *MemoryBackend) Stat(h restic.Handle) (restic.FileInfo, error) {
}
// Remove deletes a file from the backend.
-func (be *MemoryBackend) Remove(t restic.FileType, name string) error {
+func (be *MemoryBackend) Remove(h restic.Handle) error {
be.m.Lock()
defer be.m.Unlock()
- debug.Log("get %v %v", t, name)
+ debug.Log("Remove %v", h)
- if _, ok := be.data[entry{t, name}]; !ok {
+ if _, ok := be.data[h]; !ok {
return errors.New("no such data")
}
- delete(be.data, entry{t, name})
+ delete(be.data, h)
return nil
}
diff --git a/src/restic/backend/rest/backend_test.go b/src/restic/backend/rest/backend_test.go
index 9605396..4274bfc 100644
--- a/src/restic/backend/rest/backend_test.go
+++ b/src/restic/backend/rest/backend_test.go
@@ -51,13 +51,6 @@ func TestRestBackendLoad(t *testing.T) {
test.TestLoad(t)
}
-func TestRestBackendLoadNegativeOffset(t *testing.T) {
- if SkipMessage != "" {
- t.Skip(SkipMessage)
- }
- test.TestLoadNegativeOffset(t)
-}
-
func TestRestBackendSave(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
diff --git a/src/restic/backend/rest/rest.go b/src/restic/backend/rest/rest.go
index f01854f..7121de9 100644
--- a/src/restic/backend/rest/rest.go
+++ b/src/restic/backend/rest/rest.go
@@ -1,7 +1,6 @@
package rest
import (
- "bytes"
"encoding/json"
"fmt"
"io"
@@ -12,6 +11,7 @@ import (
"restic"
"strings"
+ "restic/debug"
"restic/errors"
"restic/backend"
@@ -19,6 +19,9 @@ import (
const connLimit = 10
+// make sure the rest backend implements restic.Backend
+var _ restic.Backend = &restBackend{}
+
// restPath returns the path to the given resource.
func restPath(url *url.URL, h restic.Handle) string {
u := *url
@@ -71,34 +74,18 @@ func (b *restBackend) Location() string {
return b.url.String()
}
-// Load returns the data stored in the backend for h at the given offset
-// and saves it in p. Load has the same semantics as io.ReaderAt.
-func (b *restBackend) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
+// Save stores data in the backend at the handle.
+func (b *restBackend) Save(h restic.Handle, rd io.Reader) (err error) {
if err := h.Valid(); err != nil {
- return 0, err
+ return err
}
- // invert offset
- if off < 0 {
- info, err := b.Stat(h)
- if err != nil {
- return 0, errors.Wrap(err, "Stat")
- }
-
- if -off > info.Size {
- off = 0
- } else {
- off = info.Size + off
- }
- }
+ // make sure that client.Post() cannot close the reader by wrapping it in
+ // backend.Closer, which has a noop method.
+ rd = backend.Closer{Reader: rd}
- req, err := http.NewRequest("GET", restPath(b.url, h), nil)
- if err != nil {
- return 0, errors.Wrap(err, "http.NewRequest")
- }
- req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", off, off+int64(len(p))))
<-b.connChan
- resp, err := b.client.Do(req)
+ resp, err := b.client.Post(restPath(b.url, h), "binary/octet-stream", rd)
b.connChan <- struct{}{}
if resp != nil {
@@ -113,45 +100,64 @@ func (b *restBackend) Load(h restic.Handle, p []byte, off int64) (n int, err err
}
if err != nil {
- return 0, errors.Wrap(err, "client.Do")
+ return errors.Wrap(err, "client.Post")
}
- if resp.StatusCode != 200 && resp.StatusCode != 206 {
- return 0, errors.Errorf("unexpected HTTP response code %v", resp.StatusCode)
+
+ if resp.StatusCode != 200 {
+ return errors.Errorf("unexpected HTTP response code %v", resp.StatusCode)
}
- return io.ReadFull(resp.Body, p)
+ return nil
}
-// Save stores data in the backend at the handle.
-func (b *restBackend) Save(h restic.Handle, p []byte) (err error) {
+// Load returns a reader that yields the contents of the file at h at the
+// given offset. If length is nonzero, only a portion of the file is
+// returned. rd must be closed after use.
+func (b *restBackend) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
+ debug.Log("Load %v, length %v, offset %v", h, length, offset)
if err := h.Valid(); err != nil {
- return err
+ return nil, err
}
- <-b.connChan
- resp, err := b.client.Post(restPath(b.url, h), "binary/octet-stream", bytes.NewReader(p))
- b.connChan <- struct{}{}
+ if offset < 0 {
+ return nil, errors.New("offset is negative")
+ }
- if resp != nil {
- defer func() {
- io.Copy(ioutil.Discard, resp.Body)
- e := resp.Body.Close()
+ if length < 0 {
+ return nil, errors.Errorf("invalid length %d", length)
+ }
- if err == nil {
- err = errors.Wrap(e, "Close")
- }
- }()
+ req, err := http.NewRequest("GET", restPath(b.url, h), nil)
+ if err != nil {
+ return nil, errors.Wrap(err, "http.NewRequest")
+ }
+
+ byteRange := fmt.Sprintf("bytes=%d-", offset)
+ if length > 0 {
+ byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1)
}
+ req.Header.Add("Range", byteRange)
+ debug.Log("Load(%v) send range %v", h, byteRange)
+
+ <-b.connChan
+ resp, err := b.client.Do(req)
+ b.connChan <- struct{}{}
if err != nil {
- return errors.Wrap(err, "client.Post")
+ if resp != nil {
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+ }
+ return nil, errors.Wrap(err, "client.Do")
}
- if resp.StatusCode != 200 {
- return errors.Errorf("unexpected HTTP response code %v", resp.StatusCode)
+ if resp.StatusCode != 200 && resp.StatusCode != 206 {
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+ return nil, errors.Errorf("unexpected HTTP response code %v", resp.StatusCode)
}
- return nil
+ return resp.Body, nil
}
// Stat returns information about a blob.
@@ -188,8 +194,8 @@ func (b *restBackend) Stat(h restic.Handle) (restic.FileInfo, error) {
}
// Test returns true if a blob of the given type and name exists in the backend.
-func (b *restBackend) Test(t restic.FileType, name string) (bool, error) {
- _, err := b.Stat(restic.Handle{Type: t, Name: name})
+func (b *restBackend) Test(h restic.Handle) (bool, error) {
+ _, err := b.Stat(h)
if err != nil {
return false, nil
}
@@ -198,8 +204,7 @@ func (b *restBackend) Test(t restic.FileType, name string) (bool, error) {
}
// Remove removes the blob with the given name and type.
-func (b *restBackend) Remove(t restic.FileType, name string) error {
- h := restic.Handle{Type: t, Name: name}
+func (b *restBackend) Remove(h restic.Handle) error {
if err := h.Valid(); err != nil {
return err
}
diff --git a/src/restic/backend/rest/rest_test.go b/src/restic/backend/rest/rest_test.go
index 2e7095b..af7154e 100644
--- a/src/restic/backend/rest/rest_test.go
+++ b/src/restic/backend/rest/rest_test.go
@@ -37,7 +37,7 @@ func init() {
return nil, err
}
- exists, err := be.Test(restic.ConfigFile, "")
+ exists, err := be.Test(restic.Handle{Type: restic.ConfigFile, Name: ""})
if err != nil {
return nil, err
}
diff --git a/src/restic/backend/s3/backend_test.go b/src/restic/backend/s3/backend_test.go
index 9fb4dd3..82eca26 100644
--- a/src/restic/backend/s3/backend_test.go
+++ b/src/restic/backend/s3/backend_test.go
@@ -51,13 +51,6 @@ func TestS3BackendLoad(t *testing.T) {
test.TestLoad(t)
}
-func TestS3BackendLoadNegativeOffset(t *testing.T) {
- if SkipMessage != "" {
- t.Skip(SkipMessage)
- }
- test.TestLoadNegativeOffset(t)
-}
-
func TestS3BackendSave(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
diff --git a/src/restic/backend/s3/s3.go b/src/restic/backend/s3/s3.go
index 56a6833..18f84ae 100644
--- a/src/restic/backend/s3/s3.go
+++ b/src/restic/backend/s3/s3.go
@@ -7,6 +7,7 @@ import (
"restic"
"strings"
+ "restic/backend"
"restic/errors"
"github.com/minio/minio-go"
@@ -54,11 +55,11 @@ func Open(cfg Config) (restic.Backend, error) {
return be, nil
}
-func (be *s3) s3path(t restic.FileType, name string) string {
- if t == restic.ConfigFile {
- return path.Join(be.prefix, string(t))
+func (be *s3) s3path(h restic.Handle) string {
+ if h.Type == restic.ConfigFile {
+ return path.Join(be.prefix, string(h.Type))
}
- return path.Join(be.prefix, string(t), name)
+ return path.Join(be.prefix, string(h.Type), h.Name)
}
func (be *s3) createConnections() {
@@ -73,114 +74,119 @@ func (be *s3) Location() string {
return be.bucketname
}
-// Load returns the data stored in the backend for h at the given offset
-// and saves it in p. Load has the same semantics as io.ReaderAt.
-func (be s3) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
- var obj *minio.Object
+// Save stores data in the backend at the handle.
+func (be *s3) Save(h restic.Handle, rd io.Reader) (err error) {
+ if err := h.Valid(); err != nil {
+ return err
+ }
- debug.Log("%v, offset %v, len %v", h, off, len(p))
- objName := be.s3path(h.Type, h.Name)
+ debug.Log("Save %v", h)
+
+ objName := be.s3path(h)
+
+ // Check key does not already exist
+ _, err = be.client.StatObject(be.bucketname, objName)
+ if err == nil {
+ debug.Log("%v already exists", h)
+ return errors.New("key already exists")
+ }
<-be.connChan
defer func() {
be.connChan <- struct{}{}
}()
- obj, err = be.client.GetObject(be.bucketname, objName)
- if err != nil {
- debug.Log(" err %v", err)
- return 0, errors.Wrap(err, "client.GetObject")
- }
+ debug.Log("PutObject(%v, %v)",
+ be.bucketname, objName)
+ n, err := be.client.PutObject(be.bucketname, objName, rd, "binary/octet-stream")
+ debug.Log("%v -> %v bytes, err %#v", objName, n, err)
- // make sure that the object is closed properly.
- defer func() {
- e := obj.Close()
- if err == nil {
- err = errors.Wrap(e, "Close")
- }
- }()
+ return errors.Wrap(err, "client.PutObject")
+}
- info, err := obj.Stat()
- if err != nil {
- return 0, errors.Wrap(err, "obj.Stat")
+// Load returns a reader that yields the contents of the file at h at the
+// given offset. If length is nonzero, only a portion of the file is
+// returned. rd must be closed after use.
+func (be *s3) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
+ debug.Log("Load %v, length %v, offset %v", h, length, offset)
+ if err := h.Valid(); err != nil {
+ return nil, err
}
- // handle negative offsets
- if off < 0 {
- // if the negative offset is larger than the object itself, read from
- // the beginning.
- if -off > info.Size {
- off = 0
- } else {
- // otherwise compute the offset from the end of the file.
- off = info.Size + off
- }
+ if offset < 0 {
+ return nil, errors.New("offset is negative")
}
- // return an error if the offset is beyond the end of the file
- if off > info.Size {
- return 0, errors.Wrap(io.EOF, "")
+ if length < 0 {
+ return nil, errors.Errorf("invalid length %d", length)
}
- var nextError error
+ var obj *minio.Object
- // manually create an io.ErrUnexpectedEOF
- if off+int64(len(p)) > info.Size {
- newlen := info.Size - off
- p = p[:newlen]
+ objName := be.s3path(h)
- nextError = io.ErrUnexpectedEOF
+ <-be.connChan
+ defer func() {
+ be.connChan <- struct{}{}
+ }()
- debug.Log(" capped buffer to %v byte", len(p))
+ obj, err := be.client.GetObject(be.bucketname, objName)
+ if err != nil {
+ debug.Log(" err %v", err)
+ return nil, errors.Wrap(err, "client.GetObject")
}
- n, err = obj.ReadAt(p, off)
- if int64(n) == info.Size-off && errors.Cause(err) == io.EOF {
- err = nil
- }
+ // if we're going to read the whole object, just pass it on.
+ if length == 0 {
+ debug.Log("Load %v: pass on object", h)
+ _, err = obj.Seek(offset, 0)
+ if err != nil {
+ _ = obj.Close()
+ return nil, errors.Wrap(err, "obj.Seek")
+ }
- if err == nil {
- err = nextError
+ return obj, nil
}
- return n, err
-}
-
-// Save stores data in the backend at the handle.
-func (be s3) Save(h restic.Handle, p []byte) (err error) {
- if err := h.Valid(); err != nil {
- return err
+ // otherwise use a buffer with ReadAt
+ info, err := obj.Stat()
+ if err != nil {
+ _ = obj.Close()
+ return nil, errors.Wrap(err, "obj.Stat")
}
- debug.Log("%v with %d bytes", h, len(p))
-
- objName := be.s3path(h.Type, h.Name)
+ if offset > info.Size {
+ _ = obj.Close()
+ return nil, errors.Errorf("offset larger than file size")
+ }
- // Check key does not already exist
- _, err = be.client.StatObject(be.bucketname, objName)
- if err == nil {
- debug.Log("%v already exists", h)
- return errors.New("key already exists")
+ l := int64(length)
+ if offset+l > info.Size {
+ l = info.Size - offset
}
- <-be.connChan
- defer func() {
- be.connChan <- struct{}{}
- }()
+ buf := make([]byte, l)
+ n, err := obj.ReadAt(buf, offset)
+ debug.Log("Load %v: use buffer with ReadAt: %v, %v", h, n, err)
+ if err == io.EOF {
+ debug.Log("Load %v: shorten buffer %v -> %v", h, len(buf), n)
+ buf = buf[:n]
+ err = nil
+ }
- debug.Log("PutObject(%v, %v, %v, %v)",
- be.bucketname, objName, int64(len(p)), "binary/octet-stream")
- n, err := be.client.PutObject(be.bucketname, objName, bytes.NewReader(p), "binary/octet-stream")
- debug.Log("%v -> %v bytes, err %#v", objName, n, err)
+ if err != nil {
+ _ = obj.Close()
+ return nil, errors.Wrap(err, "obj.ReadAt")
+ }
- return errors.Wrap(err, "client.PutObject")
+ return backend.Closer{Reader: bytes.NewReader(buf)}, nil
}
// Stat returns information about a blob.
-func (be s3) Stat(h restic.Handle) (bi restic.FileInfo, err error) {
+func (be *s3) Stat(h restic.Handle) (bi restic.FileInfo, err error) {
debug.Log("%v", h)
- objName := be.s3path(h.Type, h.Name)
+ objName := be.s3path(h)
var obj *minio.Object
obj, err = be.client.GetObject(be.bucketname, objName)
@@ -207,9 +213,9 @@ func (be s3) Stat(h restic.Handle) (bi restic.FileInfo, err error) {
}
// Test returns true if a blob of the given type and name exists in the backend.
-func (be *s3) Test(t restic.FileType, name string) (bool, error) {
+func (be *s3) Test(h restic.Handle) (bool, error) {
found := false
- objName := be.s3path(t, name)
+ objName := be.s3path(h)
_, err := be.client.StatObject(be.bucketname, objName)
if err == nil {
found = true
@@ -220,10 +226,10 @@ func (be *s3) Test(t restic.FileType, name string) (bool, error) {
}
// Remove removes the blob with the given name and type.
-func (be *s3) Remove(t restic.FileType, name string) error {
- objName := be.s3path(t, name)
+func (be *s3) Remove(h restic.Handle) error {
+ objName := be.s3path(h)
err := be.client.RemoveObject(be.bucketname, objName)
- debug.Log("%v %v -> err %v", t, name, err)
+ debug.Log("Remove(%v) -> err %v", h, err)
return errors.Wrap(err, "client.RemoveObject")
}
@@ -234,7 +240,7 @@ func (be *s3) List(t restic.FileType, done <-chan struct{}) <-chan string {
debug.Log("listing %v", t)
ch := make(chan string)
- prefix := be.s3path(t, "") + "/"
+ prefix := be.s3path(restic.Handle{Type: t}) + "/"
listresp := be.client.ListObjects(be.bucketname, prefix, true, done)
@@ -262,7 +268,7 @@ func (be *s3) removeKeys(t restic.FileType) error {
done := make(chan struct{})
defer close(done)
for key := range be.List(restic.DataFile, done) {
- err := be.Remove(restic.DataFile, key)
+ err := be.Remove(restic.Handle{Type: restic.DataFile, Name: key})
if err != nil {
return err
}
@@ -287,7 +293,7 @@ func (be *s3) Delete() error {
}
}
- return be.Remove(restic.ConfigFile, "")
+ return be.Remove(restic.Handle{Type: restic.ConfigFile})
}
// Close does nothing
diff --git a/src/restic/backend/s3/s3_test.go b/src/restic/backend/s3/s3_test.go
index 355352f..3bee8c8 100644
--- a/src/restic/backend/s3/s3_test.go
+++ b/src/restic/backend/s3/s3_test.go
@@ -44,7 +44,7 @@ func init() {
return nil, err
}
- exists, err := be.Test(restic.ConfigFile, "")
+ exists, err := be.Test(restic.Handle{Type: restic.ConfigFile})
if err != nil {
return nil, err
}
diff --git a/src/restic/backend/sftp/backend_test.go b/src/restic/backend/sftp/backend_test.go
index c28dd8c..a812f8c 100644
--- a/src/restic/backend/sftp/backend_test.go
+++ b/src/restic/backend/sftp/backend_test.go
@@ -51,13 +51,6 @@ func TestSftpBackendLoad(t *testing.T) {
test.TestLoad(t)
}
-func TestSftpBackendLoadNegativeOffset(t *testing.T) {
- if SkipMessage != "" {
- t.Skip(SkipMessage)
- }
- test.TestLoadNegativeOffset(t)
-}
-
func TestSftpBackendSave(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
diff --git a/src/restic/backend/sftp/sftp.go b/src/restic/backend/sftp/sftp.go
index 9555544..65e6f7f 100644
--- a/src/restic/backend/sftp/sftp.go
+++ b/src/restic/backend/sftp/sftp.go
@@ -259,11 +259,11 @@ func (r *SFTP) mkdirAll(dir string, mode os.FileMode) error {
}
// Rename temp file to final name according to type and name.
-func (r *SFTP) renameFile(oldname string, t restic.FileType, name string) error {
- filename := r.filename(t, name)
+func (r *SFTP) renameFile(oldname string, h restic.Handle) error {
+ filename := r.filename(h)
// create directories if necessary
- if t == restic.DataFile {
+ if h.Type == restic.DataFile {
err := r.mkdirAll(path.Dir(filename), backend.Modes.Dir)
if err != nil {
return err
@@ -297,22 +297,22 @@ func Join(parts ...string) string {
}
// Construct path for given restic.Type and name.
-func (r *SFTP) filename(t restic.FileType, name string) string {
- if t == restic.ConfigFile {
+func (r *SFTP) filename(h restic.Handle) string {
+ if h.Type == restic.ConfigFile {
return Join(r.p, "config")
}
- return Join(r.dirname(t, name), name)
+ return Join(r.dirname(h), h.Name)
}
// Construct directory for given backend.Type.
-func (r *SFTP) dirname(t restic.FileType, name string) string {
+func (r *SFTP) dirname(h restic.Handle) string {
var n string
- switch t {
+ switch h.Type {
case restic.DataFile:
n = backend.Paths.Data
- if len(name) > 2 {
- n = Join(n, name[:2])
+ if len(h.Name) > 2 {
+ n = Join(n, h.Name[:2])
}
case restic.SnapshotFile:
n = backend.Paths.Snapshots
@@ -326,85 +326,76 @@ func (r *SFTP) dirname(t restic.FileType, name string) string {
return Join(r.p, n)
}
-// Load returns the data stored in the backend for h at the given offset
-// and saves it in p. Load has the same semantics as io.ReaderAt.
-func (r *SFTP) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
- debug.Log("load %v, %d bytes, offset %v", h, len(p), off)
+// Save stores data in the backend at the handle.
+func (r *SFTP) Save(h restic.Handle, rd io.Reader) (err error) {
+ debug.Log("save to %v", h)
if err := r.clientError(); err != nil {
- return 0, err
+ return err
}
if err := h.Valid(); err != nil {
- return 0, err
+ return err
}
- f, err := r.c.Open(r.filename(h.Type, h.Name))
+ filename, tmpfile, err := r.tempFile()
if err != nil {
- return 0, errors.Wrap(err, "Open")
+ return err
}
- defer func() {
- e := f.Close()
- if err == nil {
- err = errors.Wrap(e, "Close")
- }
- }()
-
- switch {
- case off > 0:
- _, err = f.Seek(off, 0)
- case off < 0:
- _, err = f.Seek(off, 2)
+ n, err := io.Copy(tmpfile, rd)
+ if err != nil {
+ return errors.Wrap(err, "Write")
}
+ debug.Log("saved %v (%d bytes) to %v", h, n, filename)
+
+ err = tmpfile.Close()
if err != nil {
- return 0, errors.Wrap(err, "Seek")
+ return errors.Wrap(err, "Close")
}
- return io.ReadFull(f, p)
+ err = r.renameFile(filename, h)
+ debug.Log("save %v: rename %v: %v",
+ h, path.Base(filename), err)
+ return err
}
-// Save stores data in the backend at the handle.
-func (r *SFTP) Save(h restic.Handle, p []byte) (err error) {
- debug.Log("save %v bytes to %v", h, len(p))
- if err := r.clientError(); err != nil {
- return err
- }
-
+// Load returns a reader that yields the contents of the file at h at the
+// given offset. If length is nonzero, only a portion of the file is
+// returned. rd must be closed after use.
+func (r *SFTP) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
+ debug.Log("Load %v, length %v, offset %v", h, length, offset)
if err := h.Valid(); err != nil {
- return err
+ return nil, err
}
- filename, tmpfile, err := r.tempFile()
- if err != nil {
- return err
+ if offset < 0 {
+ return nil, errors.New("offset is negative")
}
- debug.Log("save %v (%d bytes) to %v", h, len(p), filename)
-
- n, err := tmpfile.Write(p)
+ f, err := r.c.Open(r.filename(h))
if err != nil {
- return errors.Wrap(err, "Write")
+ return nil, err
}
- if n != len(p) {
- return errors.New("not all bytes writen")
+ if offset > 0 {
+ _, err = f.Seek(offset, 0)
+ if err != nil {
+ _ = f.Close()
+ return nil, err
+ }
}
- err = tmpfile.Close()
- if err != nil {
- return errors.Wrap(err, "Close")
+ if length > 0 {
+ return backend.LimitReadCloser(f, int64(length)), nil
}
- err = r.renameFile(filename, h.Type, h.Name)
- debug.Log("save %v: rename %v: %v",
- h, path.Base(filename), err)
- return err
+ return f, nil
}
// Stat returns information about a blob.
func (r *SFTP) Stat(h restic.Handle) (restic.FileInfo, error) {
- debug.Log("stat %v", h)
+ debug.Log("Stat(%v)", h)
if err := r.clientError(); err != nil {
return restic.FileInfo{}, err
}
@@ -413,7 +404,7 @@ func (r *SFTP) Stat(h restic.Handle) (restic.FileInfo, error) {
return restic.FileInfo{}, err
}
- fi, err := r.c.Lstat(r.filename(h.Type, h.Name))
+ fi, err := r.c.Lstat(r.filename(h))
if err != nil {
return restic.FileInfo{}, errors.Wrap(err, "Lstat")
}
@@ -422,13 +413,13 @@ func (r *SFTP) Stat(h restic.Handle) (restic.FileInfo, error) {
}
// Test returns true if a blob of the given type and name exists in the backend.
-func (r *SFTP) Test(t restic.FileType, name string) (bool, error) {
- debug.Log("type %v, name %v", t, name)
+func (r *SFTP) Test(h restic.Handle) (bool, error) {
+ debug.Log("Test(%v)", h)
if err := r.clientError(); err != nil {
return false, err
}
- _, err := r.c.Lstat(r.filename(t, name))
+ _, err := r.c.Lstat(r.filename(h))
if os.IsNotExist(errors.Cause(err)) {
return false, nil
}
@@ -441,13 +432,13 @@ func (r *SFTP) Test(t restic.FileType, name string) (bool, error) {
}
// Remove removes the content stored at name.
-func (r *SFTP) Remove(t restic.FileType, name string) error {
- debug.Log("type %v, name %v", t, name)
+func (r *SFTP) Remove(h restic.Handle) error {
+ debug.Log("Remove(%v)", h)
if err := r.clientError(); err != nil {
return err
}
- return r.c.Remove(r.filename(t, name))
+ return r.c.Remove(r.filename(h))
}
// List returns a channel that yields all names of blobs of type t. A
@@ -462,7 +453,7 @@ func (r *SFTP) List(t restic.FileType, done <-chan struct{}) <-chan string {
if t == restic.DataFile {
// read first level
- basedir := r.dirname(t, "")
+ basedir := r.dirname(restic.Handle{Type: t})
list1, err := r.c.ReadDir(basedir)
if err != nil {
@@ -495,7 +486,7 @@ func (r *SFTP) List(t restic.FileType, done <-chan struct{}) <-chan string {
}
}
} else {
- entries, err := r.c.ReadDir(r.dirname(t, ""))
+ entries, err := r.c.ReadDir(r.dirname(restic.Handle{Type: t}))
if err != nil {
return
}
diff --git a/src/restic/backend/test/backend_test.go b/src/restic/backend/test/backend_test.go
index c577092..b495ce6 100644
--- a/src/restic/backend/test/backend_test.go
+++ b/src/restic/backend/test/backend_test.go
@@ -51,13 +51,6 @@ func TestTestBackendLoad(t *testing.T) {
test.TestLoad(t)
}
-func TestTestBackendLoadNegativeOffset(t *testing.T) {
- if SkipMessage != "" {
- t.Skip(SkipMessage)
- }
- test.TestLoadNegativeOffset(t)
-}
-
func TestTestBackendSave(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
diff --git a/src/restic/backend/test/tests.go b/src/restic/backend/test/tests.go
index c79d450..a6105f9 100644
--- a/src/restic/backend/test/tests.go
+++ b/src/restic/backend/test/tests.go
@@ -4,13 +4,16 @@ import (
"bytes"
"fmt"
"io"
+ "io/ioutil"
"math/rand"
+ "os"
"reflect"
"restic"
+ "restic/errors"
"sort"
+ "strings"
"testing"
- "restic/errors"
"restic/test"
"restic/backend"
@@ -127,7 +130,7 @@ func TestCreateWithConfig(t testing.TB) {
}
// remove config
- err = b.Remove(restic.ConfigFile, "")
+ err = b.Remove(restic.Handle{Type: restic.ConfigFile, Name: ""})
if err != nil {
t.Fatalf("unexpected error removing config: %v", err)
}
@@ -152,12 +155,12 @@ func TestConfig(t testing.TB) {
var testString = "Config"
// create config and read it back
- _, err := backend.LoadAll(b, restic.Handle{Type: restic.ConfigFile}, nil)
+ _, err := backend.LoadAll(b, restic.Handle{Type: restic.ConfigFile})
if err == nil {
t.Fatalf("did not get expected error for non-existing config")
}
- err = b.Save(restic.Handle{Type: restic.ConfigFile}, []byte(testString))
+ err = b.Save(restic.Handle{Type: restic.ConfigFile}, strings.NewReader(testString))
if err != nil {
t.Fatalf("Save() error: %v", err)
}
@@ -166,7 +169,7 @@ func TestConfig(t testing.TB) {
// same config
for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} {
h := restic.Handle{Type: restic.ConfigFile, Name: name}
- buf, err := backend.LoadAll(b, h, nil)
+ buf, err := backend.LoadAll(b, h)
if err != nil {
t.Fatalf("unable to read config with name %q: %v", name, err)
}
@@ -182,12 +185,12 @@ func TestLoad(t testing.TB) {
b := open(t)
defer close(t)
- _, err := b.Load(restic.Handle{}, nil, 0)
+ _, err := b.Load(restic.Handle{}, 0, 0)
if err == nil {
t.Fatalf("Load() did not return an error for invalid handle")
}
- _, err = b.Load(restic.Handle{Type: restic.DataFile, Name: "foobar"}, nil, 0)
+ _, err = b.Load(restic.Handle{Type: restic.DataFile, Name: "foobar"}, 0, 0)
if err == nil {
t.Fatalf("Load() did not return an error for non-existing blob")
}
@@ -198,11 +201,20 @@ func TestLoad(t testing.TB) {
id := restic.Hash(data)
handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
- err = b.Save(handle, data)
+ err = b.Save(handle, bytes.NewReader(data))
if err != nil {
t.Fatalf("Save() error: %v", err)
}
+ rd, err := b.Load(handle, 100, -1)
+ if err == nil {
+ t.Fatalf("Load() returned no error for negative offset!")
+ }
+
+ if rd != nil {
+ t.Fatalf("Load() returned a non-nil reader for negative offset!")
+ }
+
for i := 0; i < 50; i++ {
l := rand.Intn(length + 2000)
o := rand.Intn(length + 2000)
@@ -215,157 +227,60 @@ func TestLoad(t testing.TB) {
d = d[:0]
}
- if l > 0 && l < len(d) {
- d = d[:l]
+ getlen := l
+ if l >= len(d) && rand.Float32() >= 0.5 {
+ getlen = 0
}
- buf := make([]byte, l)
- n, err := b.Load(handle, buf, int64(o))
-
- // if we requested data beyond the end of the file, require
- // ErrUnexpectedEOF error
- if l > len(d) {
- if errors.Cause(err) != io.ErrUnexpectedEOF {
- t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), int64(o))
- }
- err = nil
- buf = buf[:len(d)]
+ if l > 0 && l < len(d) {
+ d = d[:l]
}
+ rd, err := b.Load(handle, getlen, int64(o))
if err != nil {
- t.Errorf("Load(%d, %d): unexpected error: %v", len(buf), int64(o), err)
+ t.Errorf("Load(%d, %d) returned unexpected error: %v", l, o, err)
continue
}
- if n != len(buf) {
- t.Errorf("Load(%d, %d): wrong length returned, want %d, got %d",
- len(buf), int64(o), len(buf), n)
+ buf, err := ioutil.ReadAll(rd)
+ if err != nil {
+ t.Errorf("Load(%d, %d) ReadAll() returned unexpected error: %v", l, o, err)
continue
}
- buf = buf[:n]
- if !bytes.Equal(buf, d) {
- t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), int64(o))
+ if l <= len(d) && len(buf) != l {
+ t.Errorf("Load(%d, %d) wrong number of bytes read: want %d, got %d", l, o, l, len(buf))
continue
}
- }
-
- // test with negative offset
- for i := 0; i < 50; i++ {
- l := rand.Intn(length + 2000)
- o := rand.Intn(length + 2000)
- d := data
- if o < len(d) {
- d = d[len(d)-o:]
- } else {
- o = 0
- }
-
- if l > 0 && l < len(d) {
- d = d[:l]
- }
-
- buf := make([]byte, l)
- n, err := b.Load(handle, buf, -int64(o))
-
- // if we requested data beyond the end of the file, require
- // ErrUnexpectedEOF error
- if l > len(d) {
- if errors.Cause(err) != io.ErrUnexpectedEOF {
- t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), int64(o))
- continue
- }
- err = nil
- buf = buf[:len(d)]
- }
-
- if err != nil {
- t.Errorf("Load(%d, %d): unexpected error: %v", len(buf), int64(o), err)
- continue
- }
-
- if n != len(buf) {
- t.Errorf("Load(%d, %d): wrong length returned, want %d, got %d",
- len(buf), int64(o), len(buf), n)
+ if l > len(d) && len(buf) != len(d) {
+ t.Errorf("Load(%d, %d) wrong number of bytes read for overlong read: want %d, got %d", l, o, l, len(buf))
continue
}
- buf = buf[:n]
if !bytes.Equal(buf, d) {
- t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), int64(o))
+ t.Errorf("Load(%d, %d) returned wrong bytes", l, o)
continue
}
- }
-
- // load with a too-large buffer, this should return io.ErrUnexpectedEOF
- buf := make([]byte, length+100)
- n, err := b.Load(handle, buf, 0)
- if n != length {
- t.Errorf("wrong length for larger buffer returned, want %d, got %d", length, n)
- }
-
- if errors.Cause(err) != io.ErrUnexpectedEOF {
- t.Errorf("wrong error returned for larger buffer: want io.ErrUnexpectedEOF, got %#v", err)
- }
-
- test.OK(t, b.Remove(restic.DataFile, id.String()))
-}
-
-// TestLoadNegativeOffset tests the backend's Load function with negative offsets.
-func TestLoadNegativeOffset(t testing.TB) {
- b := open(t)
- defer close(t)
-
- length := rand.Intn(1<<24) + 2000
-
- data := test.Random(23, length)
- id := restic.Hash(data)
-
- handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
- err := b.Save(handle, data)
- if err != nil {
- t.Fatalf("Save() error: %v", err)
- }
-
- // test normal reads
- for i := 0; i < 50; i++ {
- l := rand.Intn(length + 2000)
- o := -rand.Intn(length + 2000)
-
- buf := make([]byte, l)
- n, err := b.Load(handle, buf, int64(o))
-
- // if we requested data beyond the end of the file, require
- // ErrUnexpectedEOF error
- if len(buf) > -o {
- if errors.Cause(err) != io.ErrUnexpectedEOF {
- t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), o)
- continue
- }
- err = nil
- buf = buf[:-o]
- }
+ err = rd.Close()
if err != nil {
- t.Errorf("Load(%d, %d) returned error: %v", len(buf), o, err)
- continue
- }
-
- if n != len(buf) {
- t.Errorf("Load(%d, %d) returned short read, only got %d bytes", len(buf), o, n)
+ t.Errorf("Load(%d, %d) rd.Close() returned unexpected error: %v", l, o, err)
continue
}
+ }
- p := len(data) + o
- if !bytes.Equal(buf, data[p:p+len(buf)]) {
- t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), o)
- continue
- }
+ test.OK(t, b.Remove(handle))
+}
- }
+type errorCloser struct {
+ io.Reader
+ t testing.TB
+}
- test.OK(t, b.Remove(restic.DataFile, id.String()))
+func (ec errorCloser) Close() error {
+ ec.t.Error("forbidden method close was called")
+ return errors.New("forbidden method close was called")
}
// TestSave tests saving data in the backend.
@@ -384,10 +299,10 @@ func TestSave(t testing.TB) {
Type: restic.DataFile,
Name: fmt.Sprintf("%s-%d", id, i),
}
- err := b.Save(h, data)
+ err := b.Save(h, bytes.NewReader(data))
test.OK(t, err)
- buf, err := backend.LoadAll(b, h, nil)
+ buf, err := backend.LoadAll(b, h)
test.OK(t, err)
if len(buf) != len(data) {
t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf))
@@ -404,11 +319,51 @@ func TestSave(t testing.TB) {
t.Fatalf("Stat() returned different size, want %q, got %d", len(data), fi.Size)
}
- err = b.Remove(h.Type, h.Name)
+ err = b.Remove(h)
if err != nil {
t.Fatalf("error removing item: %v", err)
}
}
+
+ // test saving from a tempfile
+ tmpfile, err := ioutil.TempFile("", "restic-backend-save-test-")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ length := rand.Intn(1<<23) + 200000
+ data := test.Random(23, length)
+ copy(id[:], data)
+
+ if _, err = tmpfile.Write(data); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = tmpfile.Seek(0, 0); err != nil {
+ t.Fatal(err)
+ }
+
+ h := restic.Handle{Type: restic.DataFile, Name: id.String()}
+
+ // wrap the tempfile in an errorCloser, so we can detect if the backend
+ // closes the reader
+ err = b.Save(h, errorCloser{t: t, Reader: tmpfile})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err = tmpfile.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = os.Remove(tmpfile.Name()); err != nil {
+ t.Fatal(err)
+ }
+
+ err = b.Remove(h)
+ if err != nil {
+ t.Fatalf("error removing item: %v", err)
+ }
}
var filenameTests = []struct {
@@ -430,13 +385,13 @@ func TestSaveFilenames(t testing.TB) {
for i, test := range filenameTests {
h := restic.Handle{Name: test.name, Type: restic.DataFile}
- err := b.Save(h, []byte(test.data))
+ err := b.Save(h, strings.NewReader(test.data))
if err != nil {
t.Errorf("test %d failed: Save() returned %v", i, err)
continue
}
- buf, err := backend.LoadAll(b, h, nil)
+ buf, err := backend.LoadAll(b, h)
if err != nil {
t.Errorf("test %d failed: Load() returned %v", i, err)
continue
@@ -446,7 +401,7 @@ func TestSaveFilenames(t testing.TB) {
t.Errorf("test %d: returned wrong bytes", i)
}
- err = b.Remove(h.Type, h.Name)
+ err = b.Remove(h)
if err != nil {
t.Errorf("test %d failed: Remove() returned %v", i, err)
continue
@@ -464,10 +419,12 @@ var testStrings = []struct {
{"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"},
}
-func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) {
+func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) restic.Handle {
id := restic.Hash(data)
- err := b.Save(restic.Handle{Name: id.String(), Type: tpe}, data)
+ h := restic.Handle{Name: id.String(), Type: tpe}
+ err := b.Save(h, bytes.NewReader(data))
test.OK(t, err)
+ return h
}
// TestBackend tests all functions of the backend.
@@ -485,21 +442,21 @@ func TestBackend(t testing.TB) {
test.OK(t, err)
// test if blob is already in repository
- ret, err := b.Test(tpe, id.String())
+ h := restic.Handle{Type: tpe, Name: id.String()}
+ ret, err := b.Test(h)
test.OK(t, err)
test.Assert(t, !ret, "blob was found to exist before creating")
// try to stat a not existing blob
- h := restic.Handle{Type: tpe, Name: id.String()}
_, err = b.Stat(h)
test.Assert(t, err != nil, "blob data could be extracted before creation")
// try to read not existing blob
- _, err = b.Load(h, nil, 0)
+ _, err = b.Load(h, 0, 0)
test.Assert(t, err != nil, "blob reader could be obtained before creation")
// try to get string out, should fail
- ret, err = b.Test(tpe, id.String())
+ ret, err = b.Test(h)
test.OK(t, err)
test.Assert(t, !ret, "id %q was found (but should not have)", ts.id)
}
@@ -510,7 +467,7 @@ func TestBackend(t testing.TB) {
// test Load()
h := restic.Handle{Type: tpe, Name: ts.id}
- buf, err := backend.LoadAll(b, h, nil)
+ buf, err := backend.LoadAll(b, h)
test.OK(t, err)
test.Equals(t, ts.data, string(buf))
@@ -520,9 +477,18 @@ func TestBackend(t testing.TB) {
length := end - start
buf2 := make([]byte, length)
- n, err := b.Load(h, buf2, int64(start))
+ rd, err := b.Load(h, len(buf2), int64(start))
test.OK(t, err)
- test.Equals(t, length, n)
+ n, err := io.ReadFull(rd, buf2)
+ test.OK(t, err)
+ test.Equals(t, len(buf2), n)
+
+ remaining, err := io.Copy(ioutil.Discard, rd)
+ test.OK(t, err)
+ test.Equals(t, int64(0), remaining)
+
+ test.OK(t, rd.Close())
+
test.Equals(t, ts.data[start:end], string(buf2))
}
@@ -530,20 +496,21 @@ func TestBackend(t testing.TB) {
ts := testStrings[0]
// create blob
- err := b.Save(restic.Handle{Type: tpe, Name: ts.id}, []byte(ts.data))
+ err := b.Save(restic.Handle{Type: tpe, Name: ts.id}, strings.NewReader(ts.data))
test.Assert(t, err != nil, "expected error, got %v", err)
// remove and recreate
- err = b.Remove(tpe, ts.id)
+ h := restic.Handle{Type: tpe, Name: ts.id}
+ err = b.Remove(h)
test.OK(t, err)
// test that the blob is gone
- ok, err := b.Test(tpe, ts.id)
+ ok, err := b.Test(h)
test.OK(t, err)
test.Assert(t, ok == false, "removed blob still present")
// create blob
- err = b.Save(restic.Handle{Type: tpe, Name: ts.id}, []byte(ts.data))
+ err = b.Save(h, strings.NewReader(ts.data))
test.OK(t, err)
// list items
@@ -578,12 +545,14 @@ func TestBackend(t testing.TB) {
id, err := restic.ParseID(ts.id)
test.OK(t, err)
- found, err := b.Test(tpe, id.String())
+ h := restic.Handle{Type: tpe, Name: id.String()}
+
+ found, err := b.Test(h)
test.OK(t, err)
- test.OK(t, b.Remove(tpe, id.String()))
+ test.OK(t, b.Remove(h))
- found, err = b.Test(tpe, id.String())
+ found, err = b.Test(h)
test.OK(t, err)
test.Assert(t, !found, fmt.Sprintf("id %q not found after removal", id))
}
diff --git a/src/restic/backend/utils.go b/src/restic/backend/utils.go
index 82a8995..27d2b9a 100644
--- a/src/restic/backend/utils.go
+++ b/src/restic/backend/utils.go
@@ -2,29 +2,51 @@ package backend
import (
"io"
+ "io/ioutil"
"restic"
-
- "restic/errors"
)
-// LoadAll reads all data stored in the backend for the handle. The buffer buf
-// is resized to accomodate all data in the blob. Errors returned by be.Load()
-// are passed on, except io.ErrUnexpectedEOF is silenced and nil returned
-// instead, since it means this function is working properly.
-func LoadAll(be restic.Backend, h restic.Handle, buf []byte) ([]byte, error) {
- fi, err := be.Stat(h)
+// LoadAll reads all data stored in the backend for the handle.
+func LoadAll(be restic.Backend, h restic.Handle) (buf []byte, err error) {
+ rd, err := be.Load(h, 0, 0)
if err != nil {
- return nil, errors.Wrap(err, "Stat")
+ return nil, err
}
- if fi.Size > int64(len(buf)) {
- buf = make([]byte, int(fi.Size))
- }
+ defer func() {
+ io.Copy(ioutil.Discard, rd)
+ e := rd.Close()
+ if err == nil {
+ err = e
+ }
+ }()
- n, err := be.Load(h, buf, 0)
- if errors.Cause(err) == io.ErrUnexpectedEOF {
- err = nil
- }
- buf = buf[:n]
- return buf, err
+ return ioutil.ReadAll(rd)
+}
+
+// Closer wraps an io.Reader and adds a Close() method that does nothing.
+type Closer struct {
+ io.Reader
+}
+
+// Close is a no-op.
+func (c Closer) Close() error {
+ return nil
+}
+
+// LimitedReadCloser wraps io.LimitedReader and exposes the Close() method.
+type LimitedReadCloser struct {
+ io.ReadCloser
+ io.Reader
+}
+
+// Read reads data from the limited reader.
+func (l *LimitedReadCloser) Read(p []byte) (int, error) {
+ return l.Reader.Read(p)
+}
+
+// LimitReadCloser returns a new reader wraps r in an io.LimitReader, but also
+// exposes the Close() method.
+func LimitReadCloser(r io.ReadCloser, n int64) *LimitedReadCloser {
+ return &LimitedReadCloser{ReadCloser: r, Reader: io.LimitReader(r, n)}
}
diff --git a/src/restic/backend/utils_test.go b/src/restic/backend/utils_test.go
index 59eed70..2996cf4 100644
--- a/src/restic/backend/utils_test.go
+++ b/src/restic/backend/utils_test.go
@@ -21,10 +21,10 @@ func TestLoadAll(t *testing.T) {
data := Random(23+i, rand.Intn(MiB)+500*KiB)
id := restic.Hash(data)
- err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data)
+ err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, bytes.NewReader(data))
OK(t, err)
- buf, err := backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, nil)
+ buf, err := backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()})
OK(t, err)
if len(buf) != len(data) {
@@ -46,11 +46,11 @@ func TestLoadSmallBuffer(t *testing.T) {
data := Random(23+i, rand.Intn(MiB)+500*KiB)
id := restic.Hash(data)
- err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data)
+ err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, bytes.NewReader(data))
OK(t, err)
buf := make([]byte, len(data)-23)
- buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, buf)
+ buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()})
OK(t, err)
if len(buf) != len(data) {
@@ -72,11 +72,11 @@ func TestLoadLargeBuffer(t *testing.T) {
data := Random(23+i, rand.Intn(MiB)+500*KiB)
id := restic.Hash(data)
- err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data)
+ err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, bytes.NewReader(data))
OK(t, err)
buf := make([]byte, len(data)+100)
- buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, buf)
+ buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()})
OK(t, err)
if len(buf) != len(data) {
diff --git a/src/restic/buffer.go b/src/restic/buffer.go
new file mode 100644
index 0000000..d822fce
--- /dev/null
+++ b/src/restic/buffer.go
@@ -0,0 +1,21 @@
+package restic
+
+import "restic/crypto"
+
+// NewBlobBuffer returns a buffer that is large enough to hold a blob of size
+// plaintext bytes, including the crypto overhead.
+func NewBlobBuffer(size int) []byte {
+ return make([]byte, size, size+crypto.Extension)
+}
+
+// PlaintextLength returns the plaintext length of a blob with ciphertextSize
+// bytes.
+func PlaintextLength(ciphertextSize int) int {
+ return ciphertextSize - crypto.Extension
+}
+
+// CiphertextLength returns the encrypted length of a blob with plaintextSize
+// bytes.
+func CiphertextLength(plaintextSize int) int {
+ return plaintextSize + crypto.Extension
+}
diff --git a/src/restic/checker/checker.go b/src/restic/checker/checker.go
index 600ec2e..7b37f7d 100644
--- a/src/restic/checker/checker.go
+++ b/src/restic/checker/checker.go
@@ -187,7 +187,8 @@ func packIDTester(repo restic.Repository, inChan <-chan restic.ID, errChan chan<
defer wg.Done()
for id := range inChan {
- ok, err := repo.Backend().Test(restic.DataFile, id.String())
+ h := restic.Handle{Type: restic.DataFile, Name: id.String()}
+ ok, err := repo.Backend().Test(h)
if err != nil {
err = PackError{ID: id, Err: err}
} else {
@@ -658,7 +659,7 @@ func (c *Checker) CountPacks() uint64 {
func checkPack(r restic.Repository, id restic.ID) error {
debug.Log("checking pack %v", id.Str())
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
- buf, err := backend.LoadAll(r.Backend(), h, nil)
+ buf, err := backend.LoadAll(r.Backend(), h)
if err != nil {
return err
}
diff --git a/src/restic/checker/checker_test.go b/src/restic/checker/checker_test.go
index 26528d2..d41f34b 100644
--- a/src/restic/checker/checker_test.go
+++ b/src/restic/checker/checker_test.go
@@ -1,6 +1,7 @@
package checker_test
import (
+ "io"
"math/rand"
"path/filepath"
"sort"
@@ -8,7 +9,6 @@ import (
"restic"
"restic/archiver"
- "restic/backend/mem"
"restic/checker"
"restic/repository"
"restic/test"
@@ -73,8 +73,11 @@ func TestMissingPack(t *testing.T) {
repo := repository.TestOpenLocal(t, repodir)
- packID := "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6"
- test.OK(t, repo.Backend().Remove(restic.DataFile, packID))
+ packHandle := restic.Handle{
+ Type: restic.DataFile,
+ Name: "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6",
+ }
+ test.OK(t, repo.Backend().Remove(packHandle))
chkr := checker.New(repo)
hints, errs := chkr.LoadIndex()
@@ -92,7 +95,7 @@ func TestMissingPack(t *testing.T) {
"expected exactly one error, got %v", len(errs))
if err, ok := errs[0].(checker.PackError); ok {
- test.Equals(t, packID, err.ID.String())
+ test.Equals(t, packHandle.Name, err.ID.String())
} else {
t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err)
}
@@ -105,9 +108,12 @@ func TestUnreferencedPack(t *testing.T) {
repo := repository.TestOpenLocal(t, repodir)
// index 3f1a only references pack 60e0
- indexID := "3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44"
packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e"
- test.OK(t, repo.Backend().Remove(restic.IndexFile, indexID))
+ indexHandle := restic.Handle{
+ Type: restic.IndexFile,
+ Name: "3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44",
+ }
+ test.OK(t, repo.Backend().Remove(indexHandle))
chkr := checker.New(repo)
hints, errs := chkr.LoadIndex()
@@ -137,8 +143,11 @@ func TestUnreferencedBlobs(t *testing.T) {
repo := repository.TestOpenLocal(t, repodir)
- snID := "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02"
- test.OK(t, repo.Backend().Remove(restic.SnapshotFile, snID))
+ snapshotHandle := restic.Handle{
+ Type: restic.SnapshotFile,
+ Name: "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02",
+ }
+ test.OK(t, repo.Backend().Remove(snapshotHandle))
unusedBlobsBySnapshot := restic.IDs{
restic.TestParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"),
@@ -208,15 +217,35 @@ type errorBackend struct {
ProduceErrors bool
}
-func (b errorBackend) Load(h restic.Handle, p []byte, off int64) (int, error) {
- n, err := b.Backend.Load(h, p, off)
+func (b errorBackend) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
+ rd, err := b.Backend.Load(h, length, offset)
+ if err != nil {
+ return rd, err
+ }
if b.ProduceErrors {
- induceError(p)
+ return errorReadCloser{rd}, err
+ }
+
+ return rd, nil
+}
+
+type errorReadCloser struct {
+ io.ReadCloser
+}
+
+func (erd errorReadCloser) Read(p []byte) (int, error) {
+ n, err := erd.ReadCloser.Read(p)
+ if n > 0 {
+ induceError(p[:n])
}
return n, err
}
+func (erd errorReadCloser) Close() error {
+ return erd.ReadCloser.Close()
+}
+
// induceError flips a bit in the slice.
func induceError(data []byte) {
if rand.Float32() < 0.2 {
@@ -228,19 +257,15 @@ func induceError(data []byte) {
}
func TestCheckerModifiedData(t *testing.T) {
- be := mem.New()
-
- repository.TestUseLowSecurityKDFParameters(t)
-
- repo := repository.New(be)
- test.OK(t, repo.Init(test.TestPassword))
+ repo, cleanup := repository.TestRepository(t)
+ defer cleanup()
arch := archiver.New(repo)
_, id, err := arch.Snapshot(nil, []string{"."}, nil, nil)
test.OK(t, err)
t.Logf("archived as %v", id.Str())
- beError := &errorBackend{Backend: be}
+ beError := &errorBackend{Backend: repo.Backend()}
checkRepo := repository.New(beError)
test.OK(t, checkRepo.SearchKey(test.TestPassword, 5))
@@ -266,7 +291,7 @@ func TestCheckerModifiedData(t *testing.T) {
}
for _, err := range checkData(chkr) {
- t.Logf("struct error: %v", err)
+ t.Logf("data error: %v", err)
errFound = true
}
diff --git a/src/restic/fuse/file.go b/src/restic/fuse/file.go
index 0327059..6bfa2e4 100644
--- a/src/restic/fuse/file.go
+++ b/src/restic/fuse/file.go
@@ -4,8 +4,6 @@
package fuse
import (
- "sync"
-
"restic/errors"
"restic"
@@ -35,29 +33,23 @@ type file struct {
node *restic.Node
ownerIsRoot bool
- sizes []uint
+ sizes []int
blobs [][]byte
}
const defaultBlobSize = 128 * 1024
-var blobPool = sync.Pool{
- New: func() interface{} {
- return make([]byte, defaultBlobSize)
- },
-}
-
func newFile(repo BlobLoader, node *restic.Node, ownerIsRoot bool) (*file, error) {
debug.Log("create new file for %v with %d blobs", node.Name, len(node.Content))
var bytes uint64
- sizes := make([]uint, len(node.Content))
+ sizes := make([]int, len(node.Content))
for i, id := range node.Content {
size, err := repo.LookupBlobSize(id, restic.DataBlob)
if err != nil {
return nil, err
}
- sizes[i] = size
+ sizes[i] = int(size)
bytes += uint64(size)
}
@@ -99,16 +91,12 @@ func (f *file) getBlobAt(i int) (blob []byte, err error) {
return f.blobs[i], nil
}
- buf := blobPool.Get().([]byte)
- buf = buf[:cap(buf)]
-
- if uint(len(buf)) < f.sizes[i] {
- if len(buf) > defaultBlobSize {
- blobPool.Put(buf)
- }
- buf = make([]byte, f.sizes[i])
+ // release earlier blobs
+ for j := 0; j < i; j++ {
+ f.blobs[j] = nil
}
+ buf := restic.NewBlobBuffer(f.sizes[i])
n, err := f.repo.LoadBlob(restic.DataBlob, f.node.Content[i], buf)
if err != nil {
debug.Log("LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err)
@@ -169,10 +157,7 @@ func (f *file) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadR
func (f *file) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
for i := range f.blobs {
- if f.blobs[i] != nil {
- blobPool.Put(f.blobs[i])
- f.blobs[i] = nil
- }
+ f.blobs[i] = nil
}
return nil
}
diff --git a/src/restic/fuse/file_test.go b/src/restic/fuse/file_test.go
index 090e432..9b2e098 100644
--- a/src/restic/fuse/file_test.go
+++ b/src/restic/fuse/file_test.go
@@ -9,7 +9,9 @@ import (
"testing"
"time"
- "restic/errors"
+ "golang.org/x/net/context"
+
+ "restic/repository"
"bazil.org/fuse"
@@ -17,108 +19,96 @@ import (
. "restic/test"
)
-type MockRepo struct {
- blobs map[restic.ID][]byte
-}
-
-func NewMockRepo(content map[restic.ID][]byte) *MockRepo {
- return &MockRepo{blobs: content}
-}
+func testRead(t testing.TB, f *file, offset, length int, data []byte) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
-func (m *MockRepo) LookupBlobSize(id restic.ID, t restic.BlobType) (uint, error) {
- buf, ok := m.blobs[id]
- if !ok {
- return 0, errors.New("blob not found")
+ req := &fuse.ReadRequest{
+ Offset: int64(offset),
+ Size: length,
}
-
- return uint(len(buf)), nil
-}
-
-func (m *MockRepo) LoadBlob(t restic.BlobType, id restic.ID, buf []byte) (int, error) {
- size, err := m.LookupBlobSize(id, t)
- if err != nil {
- return 0, err
+ resp := &fuse.ReadResponse{
+ Data: data,
}
+ OK(t, f.Read(ctx, req, resp))
+}
- if uint(len(buf)) < size {
- return 0, errors.New("buffer too small")
+func firstSnapshotID(t testing.TB, repo restic.Repository) (first restic.ID) {
+ done := make(chan struct{})
+ defer close(done)
+ for id := range repo.List(restic.SnapshotFile, done) {
+ if first.IsNull() {
+ first = id
+ }
}
-
- buf = buf[:size]
- copy(buf, m.blobs[id])
- return int(size), nil
+ return first
}
-type MockContext struct{}
-
-func (m MockContext) Deadline() (time.Time, bool) { return time.Now(), false }
-func (m MockContext) Done() <-chan struct{} { return nil }
-func (m MockContext) Err() error { return nil }
-func (m MockContext) Value(key interface{}) interface{} { return nil }
-
-var testContent = genTestContent()
-var testContentLengths = []uint{
- 4646 * 1024,
- 655 * 1024,
- 378 * 1024,
- 8108 * 1024,
- 558 * 1024,
+func loadFirstSnapshot(t testing.TB, repo restic.Repository) *restic.Snapshot {
+ id := firstSnapshotID(t, repo)
+ sn, err := restic.LoadSnapshot(repo, id)
+ OK(t, err)
+ return sn
}
-var testMaxFileSize uint
-func genTestContent() map[restic.ID][]byte {
- m := make(map[restic.ID][]byte)
-
- for _, length := range testContentLengths {
- buf := Random(int(length), int(length))
- id := restic.Hash(buf)
- m[id] = buf
- testMaxFileSize += length
- }
-
- return m
+func loadTree(t testing.TB, repo restic.Repository, id restic.ID) *restic.Tree {
+ tree, err := repo.LoadTree(id)
+ OK(t, err)
+ return tree
}
-const maxBufSize = 20 * 1024 * 1024
+func TestFuseFile(t *testing.T) {
+ repo, cleanup := repository.TestRepository(t)
+ defer cleanup()
-func testRead(t *testing.T, f *file, offset, length int, data []byte) {
- ctx := MockContext{}
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
- req := &fuse.ReadRequest{
- Offset: int64(offset),
- Size: length,
- }
- resp := &fuse.ReadResponse{
- Data: make([]byte, length),
- }
- OK(t, f.Read(ctx, req, resp))
-}
+ timestamp, err := time.Parse(time.RFC3339, "2017-01-24T10:42:56+01:00")
+ OK(t, err)
+ restic.TestCreateSnapshot(t, repo, timestamp, 2, 0.1)
-var offsetReadsTests = []struct {
- offset, length int
-}{
- {0, 5 * 1024 * 1024},
- {4000 * 1024, 1000 * 1024},
-}
+ sn := loadFirstSnapshot(t, repo)
+ tree := loadTree(t, repo, *sn.Tree)
-func TestFuseFile(t *testing.T) {
- repo := NewMockRepo(testContent)
- ctx := MockContext{}
+ var content restic.IDs
+ for _, node := range tree.Nodes {
+ content = append(content, node.Content...)
+ }
+ t.Logf("tree loaded, content: %v", content)
+
+ var (
+ filesize uint64
+ memfile []byte
+ )
+ for _, id := range content {
+ size, err := repo.LookupBlobSize(id, restic.DataBlob)
+ OK(t, err)
+ filesize += uint64(size)
+
+ buf := restic.NewBlobBuffer(int(size))
+ n, err := repo.LoadBlob(restic.DataBlob, id, buf)
+ OK(t, err)
+
+ if uint(n) != size {
+ t.Fatalf("not enough bytes read for id %v: want %v, got %v", id.Str(), size, n)
+ }
- memfile := make([]byte, 0, maxBufSize)
+ if uint(len(buf)) != size {
+ t.Fatalf("buffer has wrong length for id %v: want %v, got %v", id.Str(), size, len(buf))
+ }
- var ids restic.IDs
- for id, buf := range repo.blobs {
- ids = append(ids, id)
memfile = append(memfile, buf...)
}
+ t.Logf("filesize is %v, memfile has size %v", filesize, len(memfile))
+
node := &restic.Node{
Name: "foo",
Inode: 23,
Mode: 0742,
- Size: 42,
- Content: ids,
+ Size: filesize,
+ Content: content,
}
f, err := newFile(repo, node, false)
OK(t, err)
@@ -131,28 +121,19 @@ func TestFuseFile(t *testing.T) {
Equals(t, node.Size, attr.Size)
Equals(t, (node.Size/uint64(attr.BlockSize))+1, attr.Blocks)
- for i, test := range offsetReadsTests {
- b := memfile[test.offset : test.offset+test.length]
- buf := make([]byte, test.length)
- testRead(t, f, test.offset, test.length, buf)
- if !bytes.Equal(b, buf) {
- t.Errorf("test %d failed, wrong data returned", i)
- }
- }
-
for i := 0; i < 200; i++ {
- length := rand.Intn(int(testMaxFileSize) / 2)
- offset := rand.Intn(int(testMaxFileSize))
- if length+offset > int(testMaxFileSize) {
- diff := length + offset - int(testMaxFileSize)
- length -= diff
- }
+ offset := rand.Intn(int(filesize))
+ length := rand.Intn(int(filesize)-offset) + 100
b := memfile[offset : offset+length]
+
buf := make([]byte, length)
+
testRead(t, f, offset, length, buf)
if !bytes.Equal(b, buf) {
- t.Errorf("test %d failed (offset %d, length %d), wrong data returned", i, offset, length)
+ t.Errorf("test %d failed, wrong data returned (offset %v, length %v)", i, offset, length)
}
}
+
+ OK(t, f.Release(ctx, nil))
}
diff --git a/src/restic/hashing/reader.go b/src/restic/hashing/reader.go
new file mode 100644
index 0000000..a499f4a
--- /dev/null
+++ b/src/restic/hashing/reader.go
@@ -0,0 +1,29 @@
+package hashing
+
+import (
+ "hash"
+ "io"
+)
+
+// Reader hashes all data read from the underlying reader.
+type Reader struct {
+ r io.Reader
+ h hash.Hash
+}
+
+// NewReader returns a new Reader that uses the hash h.
+func NewReader(r io.Reader, h hash.Hash) *Reader {
+ return &Reader{
+ h: h,
+ r: io.TeeReader(r, h),
+ }
+}
+
+func (h *Reader) Read(p []byte) (int, error) {
+ return h.r.Read(p)
+}
+
+// Sum returns the hash of the data read so far.
+func (h *Reader) Sum(d []byte) []byte {
+ return h.h.Sum(d)
+}
diff --git a/src/restic/hashing/reader_test.go b/src/restic/hashing/reader_test.go
new file mode 100644
index 0000000..d17f264
--- /dev/null
+++ b/src/restic/hashing/reader_test.go
@@ -0,0 +1,73 @@
+package hashing
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/sha256"
+ "io"
+ "io/ioutil"
+ "testing"
+)
+
+func TestReader(t *testing.T) {
+ tests := []int{5, 23, 2<<18 + 23, 1 << 20}
+
+ for _, size := range tests {
+ data := make([]byte, size)
+ _, err := io.ReadFull(rand.Reader, data)
+ if err != nil {
+ t.Fatalf("ReadFull: %v", err)
+ }
+
+ expectedHash := sha256.Sum256(data)
+
+ rd := NewReader(bytes.NewReader(data), sha256.New())
+ n, err := io.Copy(ioutil.Discard, rd)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if n != int64(size) {
+ t.Errorf("Reader: invalid number of bytes written: got %d, expected %d",
+ n, size)
+ }
+
+ resultingHash := rd.Sum(nil)
+
+ if !bytes.Equal(expectedHash[:], resultingHash) {
+ t.Errorf("Reader: hashes do not match: expected %02x, got %02x",
+ expectedHash, resultingHash)
+ }
+ }
+}
+
+func BenchmarkReader(b *testing.B) {
+ buf := make([]byte, 1<<22)
+ _, err := io.ReadFull(rand.Reader, buf)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ expectedHash := sha256.Sum256(buf)
+
+ b.SetBytes(int64(len(buf)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ rd := NewReader(bytes.NewReader(buf), sha256.New())
+ n, err := io.Copy(ioutil.Discard, rd)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ if n != int64(len(buf)) {
+ b.Errorf("Reader: invalid number of bytes written: got %d, expected %d",
+ n, len(buf))
+ }
+
+ resultingHash := rd.Sum(nil)
+ if !bytes.Equal(expectedHash[:], resultingHash) {
+ b.Errorf("Reader: hashes do not match: expected %02x, got %02x",
+ expectedHash, resultingHash)
+ }
+ }
+}
diff --git a/src/restic/hashing/writer.go b/src/restic/hashing/writer.go
new file mode 100644
index 0000000..2940a62
--- /dev/null
+++ b/src/restic/hashing/writer.go
@@ -0,0 +1,31 @@
+package hashing
+
+import (
+ "hash"
+ "io"
+)
+
+// Writer transparently hashes all data while writing it to the underlying writer.
+type Writer struct {
+ w io.Writer
+ h hash.Hash
+}
+
+// NewWriter wraps the writer w and feeds all data written to the hash h.
+func NewWriter(w io.Writer, h hash.Hash) *Writer {
+ return &Writer{
+ h: h,
+ w: io.MultiWriter(w, h),
+ }
+}
+
+// Write wraps the write method of the underlying writer and also hashes all data.
+func (h *Writer) Write(p []byte) (int, error) {
+ n, err := h.w.Write(p)
+ return n, err
+}
+
+// Sum returns the hash of all data written so far.
+func (h *Writer) Sum(d []byte) []byte {
+ return h.h.Sum(d)
+}
diff --git a/src/restic/hashing/writer_test.go b/src/restic/hashing/writer_test.go
new file mode 100644
index 0000000..46999f2
--- /dev/null
+++ b/src/restic/hashing/writer_test.go
@@ -0,0 +1,74 @@
+package hashing
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/sha256"
+ "io"
+ "io/ioutil"
+ "testing"
+)
+
+func TestWriter(t *testing.T) {
+ tests := []int{5, 23, 2<<18 + 23, 1 << 20}
+
+ for _, size := range tests {
+ data := make([]byte, size)
+ _, err := io.ReadFull(rand.Reader, data)
+ if err != nil {
+ t.Fatalf("ReadFull: %v", err)
+ }
+
+ expectedHash := sha256.Sum256(data)
+
+ wr := NewWriter(ioutil.Discard, sha256.New())
+
+ n, err := io.Copy(wr, bytes.NewReader(data))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if n != int64(size) {
+ t.Errorf("Writer: invalid number of bytes written: got %d, expected %d",
+ n, size)
+ }
+
+ resultingHash := wr.Sum(nil)
+
+ if !bytes.Equal(expectedHash[:], resultingHash) {
+ t.Errorf("Writer: hashes do not match: expected %02x, got %02x",
+ expectedHash, resultingHash)
+ }
+ }
+}
+
+func BenchmarkWriter(b *testing.B) {
+ buf := make([]byte, 1<<22)
+ _, err := io.ReadFull(rand.Reader, buf)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ expectedHash := sha256.Sum256(buf)
+
+ b.SetBytes(int64(len(buf)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ wr := NewWriter(ioutil.Discard, sha256.New())
+ n, err := io.Copy(wr, bytes.NewReader(buf))
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ if n != int64(len(buf)) {
+ b.Errorf("Writer: invalid number of bytes written: got %d, expected %d",
+ n, len(buf))
+ }
+
+ resultingHash := wr.Sum(nil)
+ if !bytes.Equal(expectedHash[:], resultingHash) {
+ b.Errorf("Writer: hashes do not match: expected %02x, got %02x",
+ expectedHash, resultingHash)
+ }
+ }
+}
diff --git a/src/restic/id.go b/src/restic/id.go
index 08cb6f6..c64508a 100644
--- a/src/restic/id.go
+++ b/src/restic/id.go
@@ -114,3 +114,13 @@ func (id *ID) UnmarshalJSON(b []byte) error {
return nil
}
+
+// IDFromHash returns the ID for the hash.
+func IDFromHash(hash []byte) (id ID) {
+ if len(hash) != idSize {
+ panic("invalid hash type, not enough/too many bytes")
+ }
+
+ copy(id[:], hash)
+ return id
+}
diff --git a/src/restic/index/index.go b/src/restic/index/index.go
index 4481d0d..f1c41b7 100644
--- a/src/restic/index/index.go
+++ b/src/restic/index/index.go
@@ -14,27 +14,20 @@ import (
// Pack contains information about the contents of a pack.
type Pack struct {
+ ID restic.ID
Size int64
Entries []restic.Blob
}
-// Blob contains information about a blob.
-type Blob struct {
- Size int64
- Packs restic.IDSet
-}
-
// Index contains information about blobs and packs stored in a repo.
type Index struct {
Packs map[restic.ID]Pack
- Blobs map[restic.BlobHandle]Blob
IndexIDs restic.IDSet
}
func newIndex() *Index {
return &Index{
Packs: make(map[restic.ID]Pack),
- Blobs: make(map[restic.BlobHandle]Blob),
IndexIDs: restic.NewIDSet(),
}
}
@@ -69,9 +62,6 @@ func New(repo restic.Repository, p *restic.Progress) (*Index, error) {
if err != nil {
return nil, err
}
-
- p := Pack{Entries: j.Entries(), Size: j.Size()}
- idx.Packs[packID] = p
}
return idx, nil
@@ -179,19 +169,7 @@ func (idx *Index) AddPack(id restic.ID, size int64, entries []restic.Blob) error
return errors.Errorf("pack %v already present in the index", id.Str())
}
- idx.Packs[id] = Pack{Size: size, Entries: entries}
-
- for _, entry := range entries {
- h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
- if _, ok := idx.Blobs[h]; !ok {
- idx.Blobs[h] = Blob{
- Size: int64(entry.Length),
- Packs: restic.NewIDSet(),
- }
- }
-
- idx.Blobs[h].Packs.Insert(id)
- }
+ idx.Packs[id] = Pack{ID: id, Size: size, Entries: entries}
return nil
}
@@ -202,15 +180,6 @@ func (idx *Index) RemovePack(id restic.ID) error {
return errors.Errorf("pack %v not found in the index", id.Str())
}
- for _, blob := range idx.Packs[id].Entries {
- h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
- idx.Blobs[h].Packs.Delete(id)
-
- if len(idx.Blobs[h].Packs) == 0 {
- delete(idx.Blobs, h)
- }
- }
-
delete(idx.Packs, id)
return nil
@@ -239,14 +208,11 @@ func (idx *Index) DuplicateBlobs() (dups restic.BlobSet) {
func (idx *Index) PacksForBlobs(blobs restic.BlobSet) (packs restic.IDSet) {
packs = restic.NewIDSet()
- for h := range blobs {
- blob, ok := idx.Blobs[h]
- if !ok {
- continue
- }
-
- for id := range blob.Packs {
- packs.Insert(id)
+ for id, p := range idx.Packs {
+ for _, entry := range p.Entries {
+ if blobs.Has(restic.BlobHandle{ID: entry.ID, Type: entry.Type}) {
+ packs.Insert(id)
+ }
}
}
@@ -264,33 +230,22 @@ type Location struct {
var ErrBlobNotFound = errors.New("blob not found in index")
// FindBlob returns a list of packs and positions the blob can be found in.
-func (idx *Index) FindBlob(h restic.BlobHandle) ([]Location, error) {
- blob, ok := idx.Blobs[h]
- if !ok {
- return nil, ErrBlobNotFound
- }
-
- result := make([]Location, 0, len(blob.Packs))
- for packID := range blob.Packs {
- pack, ok := idx.Packs[packID]
- if !ok {
- return nil, errors.Errorf("pack %v not found in index", packID.Str())
- }
-
- for _, entry := range pack.Entries {
- if entry.Type != h.Type {
- continue
- }
-
- if !entry.ID.Equal(h.ID) {
- continue
+func (idx *Index) FindBlob(h restic.BlobHandle) (result []Location, err error) {
+ for id, p := range idx.Packs {
+ for _, entry := range p.Entries {
+ if entry.ID.Equal(h.ID) && entry.Type == h.Type {
+ result = append(result, Location{
+ PackID: id,
+ Blob: entry,
+ })
}
-
- loc := Location{PackID: packID, Blob: entry}
- result = append(result, loc)
}
}
+ if len(result) == 0 {
+ return nil, ErrBlobNotFound
+ }
+
return result, nil
}
diff --git a/src/restic/index/index_test.go b/src/restic/index/index_test.go
index 7905f73..1984c2c 100644
--- a/src/restic/index/index_test.go
+++ b/src/restic/index/index_test.go
@@ -3,7 +3,9 @@ package index
import (
"math/rand"
"restic"
+ "restic/checker"
"restic/repository"
+ "restic/test"
"testing"
"time"
)
@@ -25,9 +27,14 @@ func createFilledRepo(t testing.TB, snapshots int, dup float32) (restic.Reposito
func validateIndex(t testing.TB, repo restic.Repository, idx *Index) {
for id := range repo.List(restic.DataFile, nil) {
- if _, ok := idx.Packs[id]; !ok {
+ p, ok := idx.Packs[id]
+ if !ok {
t.Errorf("pack %v missing from index", id.Str())
}
+
+ if !p.ID.Equal(id) {
+ t.Errorf("pack %v has invalid ID: want %v, got %v", id.Str(), id, p.ID)
+ }
}
}
@@ -135,6 +142,40 @@ func BenchmarkIndexNew(b *testing.B) {
if idx == nil {
b.Fatalf("New() returned nil index")
}
+ b.Logf("idx %v packs", len(idx.Packs))
+ }
+}
+
+func BenchmarkIndexSave(b *testing.B) {
+ repo, cleanup := repository.TestRepository(b)
+ defer cleanup()
+
+ idx, err := New(repo, nil)
+ test.OK(b, err)
+
+ for i := 0; i < 8000; i++ {
+ entries := make([]restic.Blob, 0, 200)
+ for j := 0; j < cap(entries); j++ {
+ entries = append(entries, restic.Blob{
+ ID: restic.NewRandomID(),
+ Length: 1000,
+ Offset: 5,
+ Type: restic.DataBlob,
+ })
+ }
+
+ idx.AddPack(restic.NewRandomID(), 10000, entries)
+ }
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ id, err := idx.Save(repo, nil)
+ if err != nil {
+ b.Fatalf("New() returned error %v", err)
+ }
+
+ b.Logf("saved as %v", id.Str())
}
}
@@ -151,7 +192,7 @@ func TestIndexDuplicateBlobs(t *testing.T) {
if len(dups) == 0 {
t.Errorf("no duplicate blobs found")
}
- t.Logf("%d packs, %d unique blobs", len(idx.Packs), len(idx.Blobs))
+ t.Logf("%d packs, %d duplicate blobs", len(idx.Packs), len(dups))
packs := idx.PacksForBlobs(dups)
if len(packs) == 0 {
@@ -169,7 +210,7 @@ func loadIndex(t testing.TB, repo restic.Repository) *Index {
return idx
}
-func TestIndexSave(t *testing.T) {
+func TestSave(t *testing.T) {
repo, cleanup := createFilledRepo(t, 3, 0)
defer cleanup()
@@ -193,7 +234,8 @@ func TestIndexSave(t *testing.T) {
for id := range idx.IndexIDs {
t.Logf("remove index %v", id.Str())
- err = repo.Backend().Remove(restic.IndexFile, id.String())
+ h := restic.Handle{Type: restic.IndexFile, Name: id.String()}
+ err = repo.Backend().Remove(h)
if err != nil {
t.Errorf("error removing index %v: %v", id, err)
}
@@ -219,6 +261,42 @@ func TestIndexSave(t *testing.T) {
}
}
+func TestIndexSave(t *testing.T) {
+ repo, cleanup := createFilledRepo(t, 3, 0)
+ defer cleanup()
+
+ idx := loadIndex(t, repo)
+
+ id, err := idx.Save(repo, idx.IndexIDs.List())
+ if err != nil {
+ t.Fatalf("unable to save new index: %v", err)
+ }
+
+ t.Logf("new index saved as %v", id.Str())
+
+ for id := range idx.IndexIDs {
+ t.Logf("remove index %v", id.Str())
+ h := restic.Handle{Type: restic.IndexFile, Name: id.String()}
+ err = repo.Backend().Remove(h)
+ if err != nil {
+ t.Errorf("error removing index %v: %v", id, err)
+ }
+ }
+
+ idx2 := loadIndex(t, repo)
+ t.Logf("load new index with %d packs", len(idx2.Packs))
+
+ checker := checker.New(repo)
+ hints, errs := checker.LoadIndex()
+ for _, h := range hints {
+ t.Logf("hint: %v\n", h)
+ }
+
+ for _, err := range errs {
+ t.Errorf("checker found error: %v", err)
+ }
+}
+
func TestIndexAddRemovePack(t *testing.T) {
repo, cleanup := createFilledRepo(t, 3, 0)
defer cleanup()
@@ -249,12 +327,7 @@ func TestIndexAddRemovePack(t *testing.T) {
if err == nil {
t.Errorf("removed blob %v found in index", h)
}
-
- if _, ok := idx.Blobs[h]; ok {
- t.Errorf("removed blob %v found in index.Blobs", h)
- }
}
-
}
// example index serialization from doc/Design.md
diff --git a/src/restic/lock.go b/src/restic/lock.go
index e5f4a4b..8fec753 100644
--- a/src/restic/lock.go
+++ b/src/restic/lock.go
@@ -186,7 +186,7 @@ func (l *Lock) Unlock() error {
return nil
}
- return l.repo.Backend().Remove(LockFile, l.lockID.String())
+ return l.repo.Backend().Remove(Handle{Type: LockFile, Name: l.lockID.String()})
}
var staleTimeout = 30 * time.Minute
@@ -234,7 +234,7 @@ func (l *Lock) Refresh() error {
return err
}
- err = l.repo.Backend().Remove(LockFile, l.lockID.String())
+ err = l.repo.Backend().Remove(Handle{Type: LockFile, Name: l.lockID.String()})
if err != nil {
return err
}
@@ -289,7 +289,7 @@ func RemoveStaleLocks(repo Repository) error {
}
if lock.Stale() {
- return repo.Backend().Remove(LockFile, id.String())
+ return repo.Backend().Remove(Handle{Type: LockFile, Name: id.String()})
}
return nil
@@ -299,6 +299,6 @@ func RemoveStaleLocks(repo Repository) error {
// RemoveAllLocks removes all locks forcefully.
func RemoveAllLocks(repo Repository) error {
return eachLock(repo, func(id ID, lock *Lock, err error) error {
- return repo.Backend().Remove(LockFile, id.String())
+ return repo.Backend().Remove(Handle{Type: LockFile, Name: id.String()})
})
}
diff --git a/src/restic/lock_test.go b/src/restic/lock_test.go
index a6854db..b8288d6 100644
--- a/src/restic/lock_test.go
+++ b/src/restic/lock_test.go
@@ -102,7 +102,8 @@ func createFakeLock(repo restic.Repository, t time.Time, pid int) (restic.ID, er
}
func removeLock(repo restic.Repository, id restic.ID) error {
- return repo.Backend().Remove(restic.LockFile, id.String())
+ h := restic.Handle{Type: restic.LockFile, Name: id.String()}
+ return repo.Backend().Remove(h)
}
var staleLockTests = []struct {
@@ -162,7 +163,8 @@ func TestLockStale(t *testing.T) {
}
func lockExists(repo restic.Repository, t testing.TB, id restic.ID) bool {
- exists, err := repo.Backend().Test(restic.LockFile, id.String())
+ h := restic.Handle{Type: restic.LockFile, Name: id.String()}
+ exists, err := repo.Backend().Test(h)
OK(t, err)
return exists
diff --git a/src/restic/mock/backend.go b/src/restic/mock/backend.go
index 5aadc84..704e871 100644
--- a/src/restic/mock/backend.go
+++ b/src/restic/mock/backend.go
@@ -1,6 +1,7 @@
package mock
import (
+ "io"
"restic"
"restic/errors"
@@ -9,12 +10,12 @@ import (
// Backend implements a mock backend.
type Backend struct {
CloseFn func() error
- LoadFn func(h restic.Handle, p []byte, off int64) (int, error)
- SaveFn func(h restic.Handle, p []byte) error
+ SaveFn func(h restic.Handle, rd io.Reader) error
+ LoadFn func(h restic.Handle, length int, offset int64) (io.ReadCloser, error)
StatFn func(h restic.Handle) (restic.FileInfo, error)
ListFn func(restic.FileType, <-chan struct{}) <-chan string
- RemoveFn func(restic.FileType, string) error
- TestFn func(restic.FileType, string) (bool, error)
+ RemoveFn func(h restic.Handle) error
+ TestFn func(h restic.Handle) (bool, error)
DeleteFn func() error
LocationFn func() string
}
@@ -37,22 +38,22 @@ func (m *Backend) Location() string {
return m.LocationFn()
}
-// Load loads data from the backend.
-func (m *Backend) Load(h restic.Handle, p []byte, off int64) (int, error) {
- if m.LoadFn == nil {
- return 0, errors.New("not implemented")
+// Save data in the backend.
+func (m *Backend) Save(h restic.Handle, rd io.Reader) error {
+ if m.SaveFn == nil {
+ return errors.New("not implemented")
}
- return m.LoadFn(h, p, off)
+ return m.SaveFn(h, rd)
}
-// Save data in the backend.
-func (m *Backend) Save(h restic.Handle, p []byte) error {
- if m.SaveFn == nil {
- return errors.New("not implemented")
+// Load loads data from the backend.
+func (m *Backend) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
+ if m.LoadFn == nil {
+ return nil, errors.New("not implemented")
}
- return m.SaveFn(h, p)
+ return m.LoadFn(h, length, offset)
}
// Stat an object in the backend.
@@ -76,21 +77,21 @@ func (m *Backend) List(t restic.FileType, done <-chan struct{}) <-chan string {
}
// Remove data from the backend.
-func (m *Backend) Remove(t restic.FileType, name string) error {
+func (m *Backend) Remove(h restic.Handle) error {
if m.RemoveFn == nil {
return errors.New("not implemented")
}
- return m.RemoveFn(t, name)
+ return m.RemoveFn(h)
}
// Test for the existence of a specific item.
-func (m *Backend) Test(t restic.FileType, name string) (bool, error) {
+func (m *Backend) Test(h restic.Handle) (bool, error) {
if m.TestFn == nil {
return false, errors.New("not implemented")
}
- return m.TestFn(t, name)
+ return m.TestFn(h)
}
// Delete all data.
diff --git a/src/restic/node.go b/src/restic/node.go
index e172151..bf41f42 100644
--- a/src/restic/node.go
+++ b/src/restic/node.go
@@ -208,7 +208,7 @@ func (node Node) createFileAt(path string, repo Repository) error {
buf = buf[:cap(buf)]
if uint(len(buf)) < size {
- buf = make([]byte, size)
+ buf = NewBlobBuffer(int(size))
}
n, err := repo.LoadBlob(DataBlob, id, buf)
diff --git a/src/restic/pack/pack.go b/src/restic/pack/pack.go
index 17f79b0..6666d88 100644
--- a/src/restic/pack/pack.go
+++ b/src/restic/pack/pack.go
@@ -85,15 +85,15 @@ func (p *Packer) Finalize() (uint, error) {
return 0, errors.Wrap(err, "Write")
}
- hdrBytes := bytesHeader + crypto.Extension
- if uint(n) != hdrBytes {
+ hdrBytes := restic.CiphertextLength(int(bytesHeader))
+ if n != hdrBytes {
return 0, errors.New("wrong number of bytes written")
}
- bytesWritten += hdrBytes
+ bytesWritten += uint(hdrBytes)
// write length
- err = binary.Write(p.wr, binary.LittleEndian, uint32(uint(len(p.blobs))*entrySize+crypto.Extension))
+ err = binary.Write(p.wr, binary.LittleEndian, uint32(restic.CiphertextLength(len(p.blobs)*int(entrySize))))
if err != nil {
return 0, errors.Wrap(err, "binary.Write")
}
@@ -233,6 +233,8 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, err
hdrRd := bytes.NewReader(buf)
+ entries = make([]restic.Blob, 0, uint(n)/entrySize)
+
pos := uint(0)
for {
e := headerEntry{}
diff --git a/src/restic/pack/pack_test.go b/src/restic/pack/pack_test.go
index f90d1a4..39cdbba 100644
--- a/src/restic/pack/pack_test.go
+++ b/src/restic/pack/pack_test.go
@@ -54,10 +54,9 @@ func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReaderAt, packSi
}
// header length
written += binary.Size(uint32(0))
- // header
- written += len(bufs) * (binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + len(restic.ID{}))
- // header crypto
- written += crypto.Extension
+ // header + header crypto
+ headerSize := len(bufs) * (binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + len(restic.ID{}))
+ written += restic.CiphertextLength(headerSize)
// check length
Equals(t, uint(written), packSize)
@@ -127,7 +126,7 @@ func TestUnpackReadSeeker(t *testing.T) {
id := restic.Hash(packData)
handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
- OK(t, b.Save(handle, packData))
+ OK(t, b.Save(handle, bytes.NewReader(packData)))
verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize)
}
@@ -140,6 +139,6 @@ func TestShortPack(t *testing.T) {
id := restic.Hash(packData)
handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
- OK(t, b.Save(handle, packData))
+ OK(t, b.Save(handle, bytes.NewReader(packData)))
verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize)
}
diff --git a/src/restic/readerat.go b/src/restic/readerat.go
index 7d36b33..a579744 100644
--- a/src/restic/readerat.go
+++ b/src/restic/readerat.go
@@ -2,6 +2,7 @@ package restic
import (
"io"
+ "restic/debug"
)
type backendReaderAt struct {
@@ -9,11 +10,30 @@ type backendReaderAt struct {
h Handle
}
-func (brd backendReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
- return brd.be.Load(brd.h, p, off)
+func (brd backendReaderAt) ReadAt(p []byte, offset int64) (n int, err error) {
+ return ReadAt(brd.be, brd.h, offset, p)
}
// ReaderAt returns an io.ReaderAt for a file in the backend.
func ReaderAt(be Backend, h Handle) io.ReaderAt {
return backendReaderAt{be: be, h: h}
}
+
+// ReadAt reads from the backend handle h at the given position.
+func ReadAt(be Backend, h Handle, offset int64, p []byte) (n int, err error) {
+ debug.Log("ReadAt(%v) at %v, len %v", h, offset, len(p))
+ rd, err := be.Load(h, len(p), offset)
+ if err != nil {
+ return 0, err
+ }
+
+ n, err = io.ReadFull(rd, p)
+ e := rd.Close()
+ if err == nil {
+ err = e
+ }
+
+ debug.Log("ReadAt(%v) ReadFull returned %v bytes", h, n)
+
+ return n, err
+}
diff --git a/src/restic/repository/index.go b/src/restic/repository/index.go
index 1ca9525..4257c7d 100644
--- a/src/restic/repository/index.go
+++ b/src/restic/repository/index.go
@@ -1,7 +1,6 @@
package repository
import (
- "bytes"
"encoding/json"
"io"
"restic"
@@ -10,7 +9,6 @@ import (
"restic/errors"
- "restic/crypto"
"restic/debug"
)
@@ -177,15 +175,15 @@ func (idx *Index) Has(id restic.ID, tpe restic.BlobType) bool {
return false
}
-// LookupSize returns the length of the cleartext content behind the
-// given id
-func (idx *Index) LookupSize(id restic.ID, tpe restic.BlobType) (cleartextLength uint, err error) {
+// LookupSize returns the length of the plaintext content of the blob with the
+// given id.
+func (idx *Index) LookupSize(id restic.ID, tpe restic.BlobType) (plaintextLength uint, err error) {
blobs, err := idx.Lookup(id, tpe)
if err != nil {
return 0, err
}
- return blobs[0].Length - crypto.Extension, nil
+ return uint(restic.PlaintextLength(int(blobs[0].Length))), nil
}
// Supersedes returns the list of indexes this index supersedes, if any.
@@ -452,12 +450,11 @@ func isErrOldIndex(err error) bool {
var ErrOldIndexFormat = errors.New("index has old format")
// DecodeIndex loads and unserializes an index from rd.
-func DecodeIndex(rd io.Reader) (idx *Index, err error) {
+func DecodeIndex(buf []byte) (idx *Index, err error) {
debug.Log("Start decoding index")
- idxJSON := jsonIndex{}
+ idxJSON := &jsonIndex{}
- dec := json.NewDecoder(rd)
- err = dec.Decode(&idxJSON)
+ err = json.Unmarshal(buf, idxJSON)
if err != nil {
debug.Log("Error %v", err)
@@ -491,12 +488,11 @@ func DecodeIndex(rd io.Reader) (idx *Index, err error) {
}
// DecodeOldIndex loads and unserializes an index in the old format from rd.
-func DecodeOldIndex(rd io.Reader) (idx *Index, err error) {
+func DecodeOldIndex(buf []byte) (idx *Index, err error) {
debug.Log("Start decoding old index")
list := []*packJSON{}
- dec := json.NewDecoder(rd)
- err = dec.Decode(&list)
+ err = json.Unmarshal(buf, &list)
if err != nil {
debug.Log("Error %#v", err)
return nil, errors.Wrap(err, "Decode")
@@ -523,7 +519,7 @@ func DecodeOldIndex(rd io.Reader) (idx *Index, err error) {
}
// LoadIndexWithDecoder loads the index and decodes it with fn.
-func LoadIndexWithDecoder(repo restic.Repository, id restic.ID, fn func(io.Reader) (*Index, error)) (idx *Index, err error) {
+func LoadIndexWithDecoder(repo restic.Repository, id restic.ID, fn func([]byte) (*Index, error)) (idx *Index, err error) {
debug.Log("Loading index %v", id.Str())
buf, err := repo.LoadAndDecrypt(restic.IndexFile, id)
@@ -531,7 +527,7 @@ func LoadIndexWithDecoder(repo restic.Repository, id restic.ID, fn func(io.Reade
return nil, err
}
- idx, err = fn(bytes.NewReader(buf))
+ idx, err = fn(buf)
if err != nil {
debug.Log("error while decoding index %v: %v", id, err)
return nil, err
diff --git a/src/restic/repository/index_rebuild.go b/src/restic/repository/index_rebuild.go
index 592ac04..fcfc302 100644
--- a/src/restic/repository/index_rebuild.go
+++ b/src/restic/repository/index_rebuild.go
@@ -55,7 +55,8 @@ func RebuildIndex(repo restic.Repository) error {
debug.Log("new index saved as %v", id.Str())
for indexID := range oldIndexes {
- err := repo.Backend().Remove(restic.IndexFile, indexID.String())
+ h := restic.Handle{Type: restic.IndexFile, Name: indexID.String()}
+ err := repo.Backend().Remove(h)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", indexID.Str(), err)
}
diff --git a/src/restic/repository/index_test.go b/src/restic/repository/index_test.go
index 986f9ef..892b2b4 100644
--- a/src/restic/repository/index_test.go
+++ b/src/restic/repository/index_test.go
@@ -54,7 +54,7 @@ func TestIndexSerialize(t *testing.T) {
err := idx.Encode(wr)
OK(t, err)
- idx2, err := repository.DecodeIndex(wr)
+ idx2, err := repository.DecodeIndex(wr.Bytes())
OK(t, err)
Assert(t, idx2 != nil,
"nil returned for decoded index")
@@ -136,7 +136,7 @@ func TestIndexSerialize(t *testing.T) {
Assert(t, id2.Equal(id),
"wrong ID returned: want %v, got %v", id, id2)
- idx3, err := repository.DecodeIndex(wr3)
+ idx3, err := repository.DecodeIndex(wr3.Bytes())
OK(t, err)
Assert(t, idx3 != nil,
"nil returned for decoded index")
@@ -288,7 +288,7 @@ var exampleLookupTest = struct {
func TestIndexUnserialize(t *testing.T) {
oldIdx := restic.IDs{restic.TestParseID("ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452")}
- idx, err := repository.DecodeIndex(bytes.NewReader(docExample))
+ idx, err := repository.DecodeIndex(docExample)
OK(t, err)
for _, test := range exampleTests {
@@ -326,8 +326,17 @@ func TestIndexUnserialize(t *testing.T) {
}
}
+func BenchmarkDecodeIndex(b *testing.B) {
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ _, err := repository.DecodeIndex(docExample)
+ OK(b, err)
+ }
+}
+
func TestIndexUnserializeOld(t *testing.T) {
- idx, err := repository.DecodeOldIndex(bytes.NewReader(docOldExample))
+ idx, err := repository.DecodeOldIndex(docOldExample)
OK(t, err)
for _, test := range exampleTests {
diff --git a/src/restic/repository/key.go b/src/restic/repository/key.go
index 3deeb9c..7ce9757 100644
--- a/src/restic/repository/key.go
+++ b/src/restic/repository/key.go
@@ -1,6 +1,7 @@
package repository
import (
+ "bytes"
"encoding/json"
"fmt"
"os"
@@ -146,7 +147,7 @@ func SearchKey(s *Repository, password string, maxKeys int) (*Key, error) {
// LoadKey loads a key from the backend.
func LoadKey(s *Repository, name string) (k *Key, err error) {
h := restic.Handle{Type: restic.KeyFile, Name: name}
- data, err := backend.LoadAll(s.be, h, nil)
+ data, err := backend.LoadAll(s.be, h)
if err != nil {
return nil, err
}
@@ -232,7 +233,7 @@ func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error)
Name: restic.Hash(buf).String(),
}
- err = s.be.Save(h, buf)
+ err = s.be.Save(h, bytes.NewReader(buf))
if err != nil {
return nil, err
}
diff --git a/src/restic/repository/master_index.go b/src/restic/repository/master_index.go
index 165bf60..ebd2cbe 100644
--- a/src/restic/repository/master_index.go
+++ b/src/restic/repository/master_index.go
@@ -30,8 +30,7 @@ func (mi *MasterIndex) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic
for _, idx := range mi.idx {
blobs, err = idx.Lookup(id, tpe)
if err == nil {
- debug.Log("MasterIndex.Lookup",
- "found id %v: %v", id.Str(), blobs)
+ debug.Log("found id %v: %v", id.Str(), blobs)
return
}
}
@@ -46,9 +45,8 @@ func (mi *MasterIndex) LookupSize(id restic.ID, tpe restic.BlobType) (uint, erro
defer mi.idxMutex.RUnlock()
for _, idx := range mi.idx {
- length, err := idx.LookupSize(id, tpe)
- if err == nil {
- return length, nil
+ if idx.Has(id, tpe) {
+ return idx.LookupSize(id, tpe)
}
}
diff --git a/src/restic/repository/packer_manager.go b/src/restic/repository/packer_manager.go
index c686eff..e3f49f3 100644
--- a/src/restic/repository/packer_manager.go
+++ b/src/restic/repository/packer_manager.go
@@ -1,6 +1,7 @@
package repository
import (
+ "crypto/sha256"
"io"
"io/ioutil"
"os"
@@ -8,6 +9,7 @@ import (
"sync"
"restic/errors"
+ "restic/hashing"
"restic/crypto"
"restic/debug"
@@ -17,15 +19,22 @@ import (
// Saver implements saving data in a backend.
type Saver interface {
- Save(h restic.Handle, jp []byte) error
+ Save(restic.Handle, io.Reader) error
+}
+
+// Packer holds a pack.Packer together with a hash writer.
+type Packer struct {
+ *pack.Packer
+ hw *hashing.Writer
+ tmpfile *os.File
}
// packerManager keeps a list of open packs and creates new on demand.
type packerManager struct {
- be Saver
- key *crypto.Key
- pm sync.Mutex
- packs []*pack.Packer
+ be Saver
+ key *crypto.Key
+ pm sync.Mutex
+ packers []*Packer
pool sync.Pool
}
@@ -50,18 +59,18 @@ func newPackerManager(be Saver, key *crypto.Key) *packerManager {
// findPacker returns a packer for a new blob of size bytes. Either a new one is
// created or one is returned that already has some blobs.
-func (r *packerManager) findPacker(size uint) (packer *pack.Packer, err error) {
+func (r *packerManager) findPacker(size uint) (packer *Packer, err error) {
r.pm.Lock()
defer r.pm.Unlock()
// search for a suitable packer
- if len(r.packs) > 0 {
+ if len(r.packers) > 0 {
debug.Log("searching packer for %d bytes\n", size)
- for i, p := range r.packs {
- if p.Size()+size < maxPackSize {
+ for i, p := range r.packers {
+ if p.Packer.Size()+size < maxPackSize {
debug.Log("found packer %v", p)
// remove from list
- r.packs = append(r.packs[:i], r.packs[i+1:]...)
+ r.packers = append(r.packers[:i], r.packers[i+1:]...)
return p, nil
}
}
@@ -74,50 +83,43 @@ func (r *packerManager) findPacker(size uint) (packer *pack.Packer, err error) {
return nil, errors.Wrap(err, "ioutil.TempFile")
}
- return pack.NewPacker(r.key, tmpfile), nil
+ hw := hashing.NewWriter(tmpfile, sha256.New())
+ p := pack.NewPacker(r.key, hw)
+ packer = &Packer{
+ Packer: p,
+ hw: hw,
+ tmpfile: tmpfile,
+ }
+
+ return packer, nil
}
// insertPacker appends p to s.packs.
-func (r *packerManager) insertPacker(p *pack.Packer) {
+func (r *packerManager) insertPacker(p *Packer) {
r.pm.Lock()
defer r.pm.Unlock()
- r.packs = append(r.packs, p)
- debug.Log("%d packers\n", len(r.packs))
+ r.packers = append(r.packers, p)
+ debug.Log("%d packers\n", len(r.packers))
}
// savePacker stores p in the backend.
-func (r *Repository) savePacker(p *pack.Packer) error {
- debug.Log("save packer with %d blobs\n", p.Count())
- n, err := p.Finalize()
+func (r *Repository) savePacker(p *Packer) error {
+ debug.Log("save packer with %d blobs\n", p.Packer.Count())
+ _, err := p.Packer.Finalize()
if err != nil {
return err
}
- tmpfile := p.Writer().(*os.File)
- f, err := fs.Open(tmpfile.Name())
- if err != nil {
- return errors.Wrap(err, "Open")
- }
-
- data := make([]byte, n)
- m, err := io.ReadFull(f, data)
+ _, err = p.tmpfile.Seek(0, 0)
if err != nil {
- return errors.Wrap(err, "ReadFul")
+ return errors.Wrap(err, "Seek")
}
- if uint(m) != n {
- return errors.Errorf("read wrong number of bytes from %v: want %v, got %v", tmpfile.Name(), n, m)
- }
-
- if err = f.Close(); err != nil {
- return errors.Wrap(err, "Close")
- }
-
- id := restic.Hash(data)
+ id := restic.IDFromHash(p.hw.Sum(nil))
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
- err = r.be.Save(h, data)
+ err = r.be.Save(h, p.tmpfile)
if err != nil {
debug.Log("Save(%v) error: %v", h, err)
return err
@@ -125,13 +127,18 @@ func (r *Repository) savePacker(p *pack.Packer) error {
debug.Log("saved as %v", h)
- err = fs.Remove(tmpfile.Name())
+ err = p.tmpfile.Close()
+ if err != nil {
+ return errors.Wrap(err, "close tempfile")
+ }
+
+ err = fs.Remove(p.tmpfile.Name())
if err != nil {
return errors.Wrap(err, "Remove")
}
// update blobs in the index
- for _, b := range p.Blobs() {
+ for _, b := range p.Packer.Blobs() {
debug.Log(" updating blob %v to pack %v", b.ID.Str(), id.Str())
r.idx.Store(restic.PackedBlob{
Blob: restic.Blob{
@@ -152,5 +159,5 @@ func (r *packerManager) countPacker() int {
r.pm.Lock()
defer r.pm.Unlock()
- return len(r.packs)
+ return len(r.packers)
}
diff --git a/src/restic/repository/packer_manager_test.go b/src/restic/repository/packer_manager_test.go
index bf62584..37718a5 100644
--- a/src/restic/repository/packer_manager_test.go
+++ b/src/restic/repository/packer_manager_test.go
@@ -7,6 +7,7 @@ import (
"restic"
"restic/backend/mem"
"restic/crypto"
+ "restic/mock"
"testing"
)
@@ -46,32 +47,19 @@ func randomID(rd io.Reader) restic.ID {
const maxBlobSize = 1 << 20
-func saveFile(t testing.TB, be Saver, filename string, n int) {
- f, err := os.Open(filename)
- if err != nil {
- t.Fatal(err)
- }
-
- data := make([]byte, n)
- m, err := io.ReadFull(f, data)
-
- if m != n {
- t.Fatalf("read wrong number of bytes from %v: want %v, got %v", filename, m, n)
- }
+func saveFile(t testing.TB, be Saver, f *os.File, id restic.ID) {
+ h := restic.Handle{Type: restic.DataFile, Name: id.String()}
+ t.Logf("save file %v", h)
- if err = f.Close(); err != nil {
+ if err := be.Save(h, f); err != nil {
t.Fatal(err)
}
- h := restic.Handle{Type: restic.DataFile, Name: restic.Hash(data).String()}
-
- err = be.Save(h, data)
- if err != nil {
+ if err := f.Close(); err != nil {
t.Fatal(err)
}
- err = os.Remove(filename)
- if err != nil {
+ if err := os.Remove(f.Name()); err != nil {
t.Fatal(err)
}
}
@@ -105,13 +93,17 @@ func fillPacks(t testing.TB, rnd *randReader, be Saver, pm *packerManager, buf [
continue
}
- bytesWritten, err := packer.Finalize()
+ _, err = packer.Finalize()
if err != nil {
t.Fatal(err)
}
- tmpfile := packer.Writer().(*os.File)
- saveFile(t, be, tmpfile.Name(), int(bytesWritten))
+ if _, err = packer.tmpfile.Seek(0, 0); err != nil {
+ t.Fatal(err)
+ }
+
+ packID := restic.IDFromHash(packer.hw.Sum(nil))
+ saveFile(t, be, packer.tmpfile, packID)
}
return bytes
@@ -119,27 +111,21 @@ func fillPacks(t testing.TB, rnd *randReader, be Saver, pm *packerManager, buf [
func flushRemainingPacks(t testing.TB, rnd *randReader, be Saver, pm *packerManager) (bytes int) {
if pm.countPacker() > 0 {
- for _, packer := range pm.packs {
+ for _, packer := range pm.packers {
n, err := packer.Finalize()
if err != nil {
t.Fatal(err)
}
bytes += int(n)
- tmpfile := packer.Writer().(*os.File)
- saveFile(t, be, tmpfile.Name(), bytes)
+ packID := restic.IDFromHash(packer.hw.Sum(nil))
+ saveFile(t, be, packer.tmpfile, packID)
}
}
return bytes
}
-type fakeBackend struct{}
-
-func (f *fakeBackend) Save(h restic.Handle, data []byte) error {
- return nil
-}
-
func TestPackerManager(t *testing.T) {
rnd := newRandReader(rand.NewSource(23))
@@ -157,17 +143,18 @@ func TestPackerManager(t *testing.T) {
func BenchmarkPackerManager(t *testing.B) {
rnd := newRandReader(rand.NewSource(23))
- be := &fakeBackend{}
- pm := newPackerManager(be, crypto.NewRandomKey())
+ be := &mock.Backend{
+ SaveFn: func(restic.Handle, io.Reader) error { return nil },
+ }
blobBuf := make([]byte, maxBlobSize)
t.ResetTimer()
- bytes := 0
for i := 0; i < t.N; i++ {
+ bytes := 0
+ pm := newPackerManager(be, crypto.NewRandomKey())
bytes += fillPacks(t, rnd, be, pm, blobBuf)
+ bytes += flushRemainingPacks(t, rnd, be, pm)
+ t.Logf("saved %d bytes", bytes)
}
-
- bytes += flushRemainingPacks(t, rnd, be, pm)
- t.Logf("saved %d bytes", bytes)
}
diff --git a/src/restic/repository/repack.go b/src/restic/repository/repack.go
index 0a82a23..59bc70b 100644
--- a/src/restic/repository/repack.go
+++ b/src/restic/repository/repack.go
@@ -1,11 +1,14 @@
package repository
import (
- "bytes"
+ "crypto/sha256"
"io"
+ "io/ioutil"
+ "os"
"restic"
"restic/crypto"
"restic/debug"
+ "restic/hashing"
"restic/pack"
"restic/errors"
@@ -18,30 +21,47 @@ import (
func Repack(repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet) (err error) {
debug.Log("repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs))
- buf := make([]byte, 0, maxPackSize)
for packID := range packs {
- // load the complete pack
+ // load the complete pack into a temp file
h := restic.Handle{Type: restic.DataFile, Name: packID.String()}
- l, err := repo.Backend().Load(h, buf[:cap(buf)], 0)
- if errors.Cause(err) == io.ErrUnexpectedEOF {
- err = nil
- buf = buf[:l]
+ tempfile, err := ioutil.TempFile("", "restic-temp-repack-")
+ if err != nil {
+ return errors.Wrap(err, "TempFile")
}
+ beRd, err := repo.Backend().Load(h, 0, 0)
if err != nil {
return err
}
- debug.Log("pack %v loaded (%d bytes)", packID.Str(), len(buf))
+ defer beRd.Close()
- blobs, err := pack.List(repo.Key(), bytes.NewReader(buf), int64(len(buf)))
+ hrd := hashing.NewReader(beRd, sha256.New())
+ packLength, err := io.Copy(tempfile, hrd)
+ if err != nil {
+ return errors.Wrap(err, "Copy")
+ }
+
+ hash := restic.IDFromHash(hrd.Sum(nil))
+ debug.Log("pack %v loaded (%d bytes), hash %v", packID.Str(), packLength, hash.Str())
+
+ if !packID.Equal(hash) {
+ return errors.Errorf("hash does not match id: want %v, got %v", packID, hash)
+ }
+
+ _, err = tempfile.Seek(0, 0)
+ if err != nil {
+ return errors.Wrap(err, "Seek")
+ }
+
+ blobs, err := pack.List(repo.Key(), tempfile, packLength)
if err != nil {
return err
}
debug.Log("processing pack %v, blobs: %v", packID.Str(), len(blobs))
- var plaintext []byte
+ var buf []byte
for _, entry := range blobs {
h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
if !keepBlobs.Has(h) {
@@ -50,21 +70,36 @@ func Repack(repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet
debug.Log(" process blob %v", h)
- ciphertext := buf[entry.Offset : entry.Offset+entry.Length]
- plaintext = plaintext[:len(plaintext)]
- if len(plaintext) < len(ciphertext) {
- plaintext = make([]byte, len(ciphertext))
+ buf = buf[:len(buf)]
+ if uint(len(buf)) < entry.Length {
+ buf = make([]byte, entry.Length)
+ }
+ buf = buf[:entry.Length]
+
+ n, err := tempfile.ReadAt(buf, int64(entry.Offset))
+ if err != nil {
+ return errors.Wrap(err, "ReadAt")
}
- debug.Log(" ciphertext %d, plaintext %d", len(plaintext), len(ciphertext))
+ if n != len(buf) {
+ return errors.Errorf("read blob %v from %v: not enough bytes read, want %v, got %v",
+ h, tempfile.Name(), len(buf), n)
+ }
- n, err := crypto.Decrypt(repo.Key(), plaintext, ciphertext)
+ n, err = crypto.Decrypt(repo.Key(), buf, buf)
if err != nil {
return err
}
- plaintext = plaintext[:n]
- _, err = repo.SaveBlob(entry.Type, plaintext, entry.ID)
+ buf = buf[:n]
+
+ id := restic.Hash(buf)
+ if !id.Equal(entry.ID) {
+ return errors.Errorf("read blob %v from %v: wrong data returned, hash is %v",
+ h, tempfile.Name(), id)
+ }
+
+ _, err = repo.SaveBlob(entry.Type, buf, entry.ID)
if err != nil {
return err
}
@@ -73,6 +108,14 @@ func Repack(repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet
keepBlobs.Delete(h)
}
+
+ if err = tempfile.Close(); err != nil {
+ return errors.Wrap(err, "Close")
+ }
+
+ if err = os.Remove(tempfile.Name()); err != nil {
+ return errors.Wrap(err, "Remove")
+ }
}
if err := repo.Flush(); err != nil {
@@ -80,7 +123,8 @@ func Repack(repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet
}
for packID := range packs {
- err := repo.Backend().Remove(restic.DataFile, packID.String())
+ h := restic.Handle{Type: restic.DataFile, Name: packID.String()}
+ err := repo.Backend().Remove(h)
if err != nil {
debug.Log("error removing pack %v: %v", packID.Str(), err)
return err
diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go
index 809afb1..45046da 100644
--- a/src/restic/repository/repository.go
+++ b/src/restic/repository/repository.go
@@ -54,7 +54,7 @@ func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, er
debug.Log("load %v with id %v", t, id.Str())
h := restic.Handle{Type: t, Name: id.String()}
- buf, err := backend.LoadAll(r.be, h, nil)
+ buf, err := backend.LoadAll(r.be, h)
if err != nil {
debug.Log("error loading %v: %v", id.Str(), err)
return nil, err
@@ -64,33 +64,20 @@ func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, er
return nil, errors.New("invalid data returned")
}
- plain := make([]byte, len(buf))
-
// decrypt
- n, err := r.decryptTo(plain, buf)
+ n, err := r.decryptTo(buf, buf)
if err != nil {
return nil, err
}
- return plain[:n], nil
+ return buf[:n], nil
}
// loadBlob tries to load and decrypt content identified by t and id from a
// pack from the backend, the result is stored in plaintextBuf, which must be
// large enough to hold the complete blob.
func (r *Repository) loadBlob(id restic.ID, t restic.BlobType, plaintextBuf []byte) (int, error) {
- debug.Log("load %v with id %v (buf %p, len %d)", t, id.Str(), plaintextBuf, len(plaintextBuf))
-
- // lookup plaintext size of blob
- size, err := r.idx.LookupSize(id, t)
- if err != nil {
- return 0, err
- }
-
- // make sure the plaintext buffer is large enough, extend otherwise
- if len(plaintextBuf) < int(size) {
- return 0, errors.Errorf("buffer is too small: %d < %d", len(plaintextBuf), size)
- }
+ debug.Log("load %v with id %v (buf len %v, cap %d)", t, id.Str(), len(plaintextBuf), cap(plaintextBuf))
// lookup packs
blobs, err := r.idx.Lookup(id, t)
@@ -109,8 +96,14 @@ func (r *Repository) loadBlob(id restic.ID, t restic.BlobType, plaintextBuf []by
// load blob from pack
h := restic.Handle{Type: restic.DataFile, Name: blob.PackID.String()}
- ciphertextBuf := make([]byte, blob.Length)
- n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset))
+
+ if uint(cap(plaintextBuf)) < blob.Length {
+ return 0, errors.Errorf("buffer is too small: %v < %v", cap(plaintextBuf), blob.Length)
+ }
+
+ plaintextBuf = plaintextBuf[:blob.Length]
+
+ n, err := restic.ReadAt(r.be, h, int64(blob.Offset), plaintextBuf)
if err != nil {
debug.Log("error loading blob %v: %v", blob, err)
lastError = err
@@ -125,7 +118,7 @@ func (r *Repository) loadBlob(id restic.ID, t restic.BlobType, plaintextBuf []by
}
// decrypt
- n, err = r.decryptTo(plaintextBuf, ciphertextBuf)
+ n, err = r.decryptTo(plaintextBuf, plaintextBuf)
if err != nil {
lastError = errors.Errorf("decrypting blob %v failed: %v", id, err)
continue
@@ -224,7 +217,7 @@ func (r *Repository) SaveJSONUnpacked(t restic.FileType, item interface{}) (rest
// SaveUnpacked encrypts data and stores it in the backend. Returned is the
// storage hash.
func (r *Repository) SaveUnpacked(t restic.FileType, p []byte) (id restic.ID, err error) {
- ciphertext := make([]byte, len(p)+crypto.Extension)
+ ciphertext := restic.NewBlobBuffer(len(p))
ciphertext, err = r.Encrypt(ciphertext, p)
if err != nil {
return restic.ID{}, err
@@ -233,7 +226,7 @@ func (r *Repository) SaveUnpacked(t restic.FileType, p []byte) (id restic.ID, er
id = restic.Hash(ciphertext)
h := restic.Handle{Type: t, Name: id.String()}
- err = r.be.Save(h, ciphertext)
+ err = r.be.Save(h, bytes.NewReader(ciphertext))
if err != nil {
debug.Log("error saving blob %v: %v", h, err)
return restic.ID{}, err
@@ -248,15 +241,15 @@ func (r *Repository) Flush() error {
r.pm.Lock()
defer r.pm.Unlock()
- debug.Log("manually flushing %d packs", len(r.packs))
+ debug.Log("manually flushing %d packs", len(r.packerManager.packers))
- for _, p := range r.packs {
+ for _, p := range r.packerManager.packers {
err := r.savePacker(p)
if err != nil {
return err
}
}
- r.packs = r.packs[:0]
+ r.packerManager.packers = r.packerManager.packers[:0]
return nil
}
@@ -387,7 +380,7 @@ func (r *Repository) SearchKey(password string, maxKeys int) error {
// Init creates a new master key with the supplied password, initializes and
// saves the repository config.
func (r *Repository) Init(password string) error {
- has, err := r.be.Test(restic.ConfigFile, "")
+ has, err := r.be.Test(restic.Handle{Type: restic.ConfigFile})
if err != nil {
return err
}
@@ -528,16 +521,18 @@ func (r *Repository) Close() error {
return r.be.Close()
}
-// LoadBlob loads a blob of type t from the repository to the buffer.
+// LoadBlob loads a blob of type t from the repository to the buffer. buf must
+// be large enough to hold the encrypted blob, since it is used as scratch
+// space.
func (r *Repository) LoadBlob(t restic.BlobType, id restic.ID, buf []byte) (int, error) {
- debug.Log("load blob %v into buf %p", id.Str(), buf)
+ debug.Log("load blob %v into buf (len %v, cap %v)", id.Str(), len(buf), cap(buf))
size, err := r.idx.LookupSize(id, t)
if err != nil {
return 0, err
}
- if len(buf) < int(size) {
- return 0, errors.Errorf("buffer is too small for data blob (%d < %d)", len(buf), size)
+ if cap(buf) < restic.CiphertextLength(int(size)) {
+ return 0, errors.Errorf("buffer is too small for data blob (%d < %d)", cap(buf), restic.CiphertextLength(int(size)))
}
n, err := r.loadBlob(id, t, buf)
@@ -571,7 +566,7 @@ func (r *Repository) LoadTree(id restic.ID) (*restic.Tree, error) {
}
debug.Log("size is %d, create buffer", size)
- buf := make([]byte, size)
+ buf := restic.NewBlobBuffer(int(size))
n, err := r.loadBlob(id, restic.TreeBlob, buf)
if err != nil {
diff --git a/src/restic/repository/repository_test.go b/src/restic/repository/repository_test.go
index 5934d47..6ee99f2 100644
--- a/src/restic/repository/repository_test.go
+++ b/src/restic/repository/repository_test.go
@@ -2,12 +2,12 @@ package repository_test
import (
"bytes"
- "crypto/rand"
"crypto/sha256"
"io"
- mrand "math/rand"
+ "math/rand"
"path/filepath"
"testing"
+ "time"
"restic"
"restic/archiver"
@@ -17,13 +17,15 @@ import (
var testSizes = []int{5, 23, 2<<18 + 23, 1 << 20}
+var rnd = rand.New(rand.NewSource(time.Now().UnixNano()))
+
func TestSave(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
for _, size := range testSizes {
data := make([]byte, size)
- _, err := io.ReadFull(rand.Reader, data)
+ _, err := io.ReadFull(rnd, data)
OK(t, err)
id := restic.Hash(data)
@@ -38,7 +40,7 @@ func TestSave(t *testing.T) {
// OK(t, repo.SaveIndex())
// read back
- buf := make([]byte, size)
+ buf := restic.NewBlobBuffer(size)
n, err := repo.LoadBlob(restic.DataBlob, id, buf)
OK(t, err)
Equals(t, len(buf), n)
@@ -59,7 +61,7 @@ func TestSaveFrom(t *testing.T) {
for _, size := range testSizes {
data := make([]byte, size)
- _, err := io.ReadFull(rand.Reader, data)
+ _, err := io.ReadFull(rnd, data)
OK(t, err)
id := restic.Hash(data)
@@ -72,7 +74,7 @@ func TestSaveFrom(t *testing.T) {
OK(t, repo.Flush())
// read back
- buf := make([]byte, size)
+ buf := restic.NewBlobBuffer(size)
n, err := repo.LoadBlob(restic.DataBlob, id, buf)
OK(t, err)
Equals(t, len(buf), n)
@@ -94,7 +96,7 @@ func BenchmarkSaveAndEncrypt(t *testing.B) {
size := 4 << 20 // 4MiB
data := make([]byte, size)
- _, err := io.ReadFull(rand.Reader, data)
+ _, err := io.ReadFull(rnd, data)
OK(t, err)
id := restic.ID(sha256.Sum256(data))
@@ -145,6 +147,113 @@ func BenchmarkLoadTree(t *testing.B) {
}
}
+func TestLoadBlob(t *testing.T) {
+ repo, cleanup := repository.TestRepository(t)
+ defer cleanup()
+
+ length := 1000000
+ buf := restic.NewBlobBuffer(length)
+ _, err := io.ReadFull(rnd, buf)
+ OK(t, err)
+
+ id, err := repo.SaveBlob(restic.DataBlob, buf, restic.ID{})
+ OK(t, err)
+ OK(t, repo.Flush())
+
+ // first, test with buffers that are too small
+ for _, testlength := range []int{length - 20, length, restic.CiphertextLength(length) - 1} {
+ buf = make([]byte, 0, testlength)
+ n, err := repo.LoadBlob(restic.DataBlob, id, buf)
+ if err == nil {
+ t.Errorf("LoadBlob() did not return an error for a buffer that is too small to hold the blob")
+ continue
+ }
+
+ if n != 0 {
+ t.Errorf("LoadBlob() returned an error and n > 0")
+ continue
+ }
+ }
+
+ // then use buffers that are large enough
+ base := restic.CiphertextLength(length)
+ for _, testlength := range []int{base, base + 7, base + 15, base + 1000} {
+ buf = make([]byte, 0, testlength)
+ n, err := repo.LoadBlob(restic.DataBlob, id, buf)
+ if err != nil {
+ t.Errorf("LoadBlob() returned an error for buffer size %v: %v", testlength, err)
+ continue
+ }
+
+ if n != length {
+ t.Errorf("LoadBlob() returned the wrong number of bytes: want %v, got %v", length, n)
+ continue
+ }
+ }
+}
+
+func BenchmarkLoadBlob(b *testing.B) {
+ repo, cleanup := repository.TestRepository(b)
+ defer cleanup()
+
+ length := 1000000
+ buf := restic.NewBlobBuffer(length)
+ _, err := io.ReadFull(rnd, buf)
+ OK(b, err)
+
+ id, err := repo.SaveBlob(restic.DataBlob, buf, restic.ID{})
+ OK(b, err)
+ OK(b, repo.Flush())
+
+ b.ResetTimer()
+ b.SetBytes(int64(length))
+
+ for i := 0; i < b.N; i++ {
+ n, err := repo.LoadBlob(restic.DataBlob, id, buf)
+ OK(b, err)
+ if n != length {
+ b.Errorf("wanted %d bytes, got %d", length, n)
+ }
+
+ id2 := restic.Hash(buf[:n])
+ if !id.Equal(id2) {
+ b.Errorf("wrong data returned, wanted %v, got %v", id.Str(), id2.Str())
+ }
+ }
+}
+
+func BenchmarkLoadAndDecrypt(b *testing.B) {
+ repo, cleanup := repository.TestRepository(b)
+ defer cleanup()
+
+ length := 1000000
+ buf := restic.NewBlobBuffer(length)
+ _, err := io.ReadFull(rnd, buf)
+ OK(b, err)
+
+ dataID := restic.Hash(buf)
+
+ storageID, err := repo.SaveUnpacked(restic.DataFile, buf)
+ OK(b, err)
+ // OK(b, repo.Flush())
+
+ b.ResetTimer()
+ b.SetBytes(int64(length))
+
+ for i := 0; i < b.N; i++ {
+ data, err := repo.LoadAndDecrypt(restic.DataFile, storageID)
+ OK(b, err)
+ if len(data) != length {
+ b.Errorf("wanted %d bytes, got %d", length, len(data))
+ }
+
+ id2 := restic.Hash(data)
+ if !dataID.Equal(id2) {
+ b.Errorf("wrong data returned, wanted %v, got %v", storageID.Str(), id2.Str())
+ }
+ }
+}
+
func TestLoadJSONUnpacked(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
@@ -182,25 +291,48 @@ func TestRepositoryLoadIndex(t *testing.T) {
}
func BenchmarkLoadIndex(b *testing.B) {
- repodir, cleanup := Env(b, repoFixture)
+ repository.TestUseLowSecurityKDFParameters(b)
+
+ repo, cleanup := repository.TestRepository(b)
defer cleanup()
- repo := repository.TestOpenLocal(b, repodir)
+ idx := repository.NewIndex()
+
+ for i := 0; i < 5000; i++ {
+ idx.Store(restic.PackedBlob{
+ Blob: restic.Blob{
+ Type: restic.DataBlob,
+ Length: 1234,
+ ID: restic.NewRandomID(),
+ Offset: 1235,
+ },
+ PackID: restic.NewRandomID(),
+ })
+ }
+
+ id, err := repository.SaveIndex(repo, idx)
+ OK(b, err)
+
+ b.Logf("index saved as %v (%v entries)", id.Str(), idx.Count(restic.DataBlob))
+ fi, err := repo.Backend().Stat(restic.Handle{Type: restic.IndexFile, Name: id.String()})
+ OK(b, err)
+ b.Logf("filesize is %v", fi.Size)
+
b.ResetTimer()
for i := 0; i < b.N; i++ {
- repo.SetIndex(repository.NewMasterIndex())
- OK(b, repo.LoadIndex())
+ _, err := repository.LoadIndex(repo, id)
+ OK(b, err)
}
}
// saveRandomDataBlobs generates random data blobs and saves them to the repository.
func saveRandomDataBlobs(t testing.TB, repo restic.Repository, num int, sizeMax int) {
for i := 0; i < num; i++ {
- size := mrand.Int() % sizeMax
+ size := rand.Int() % sizeMax
buf := make([]byte, size)
- _, err := io.ReadFull(rand.Reader, buf)
+ _, err := io.ReadFull(rnd, buf)
OK(t, err)
_, err = repo.SaveBlob(restic.DataBlob, buf, restic.ID{})
diff --git a/src/restic/test/helpers.go b/src/restic/test/helpers.go
index 072cc4d..4e19000 100644
--- a/src/restic/test/helpers.go
+++ b/src/restic/test/helpers.go
@@ -79,7 +79,7 @@ func Random(seed, count int) []byte {
for j := range data {
cur := i + j
- if len(p) >= cur {
+ if cur >= len(p) {
break
}
p[cur] = data[j]
diff --git a/src/restic/testing.go b/src/restic/testing.go
index 49a8489..719ff33 100644
--- a/src/restic/testing.go
+++ b/src/restic/testing.go
@@ -23,16 +23,26 @@ type fakeFileSystem struct {
repo Repository
knownBlobs IDSet
duplication float32
+ buf []byte
+ chunker *chunker.Chunker
}
// saveFile reads from rd and saves the blobs in the repository. The list of
// IDs is returned.
-func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs IDs) {
- blobs = IDs{}
- ch := chunker.New(rd, fs.repo.Config().ChunkerPolynomial)
+func (fs *fakeFileSystem) saveFile(rd io.Reader) (blobs IDs) {
+ if fs.buf == nil {
+ fs.buf = make([]byte, chunker.MaxSize)
+ }
+ if fs.chunker == nil {
+ fs.chunker = chunker.New(rd, fs.repo.Config().ChunkerPolynomial)
+ } else {
+ fs.chunker.Reset(rd, fs.repo.Config().ChunkerPolynomial)
+ }
+
+ blobs = IDs{}
for {
- chunk, err := ch.Next(getBuf())
+ chunk, err := fs.chunker.Next(fs.buf)
if errors.Cause(err) == io.EOF {
break
}
@@ -50,7 +60,6 @@ func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs IDs) {
fs.knownBlobs.Insert(id)
}
- freeBuf(chunk.Data)
blobs = append(blobs, id)
}
@@ -64,7 +73,7 @@ const (
maxNodes = 32
)
-func (fs fakeFileSystem) treeIsKnown(tree *Tree) (bool, []byte, ID) {
+func (fs *fakeFileSystem) treeIsKnown(tree *Tree) (bool, []byte, ID) {
data, err := json.Marshal(tree)
if err != nil {
fs.t.Fatalf("json.Marshal(tree) returned error: %v", err)
@@ -76,7 +85,7 @@ func (fs fakeFileSystem) treeIsKnown(tree *Tree) (bool, []byte, ID) {
return fs.blobIsKnown(id, TreeBlob), data, id
}
-func (fs fakeFileSystem) blobIsKnown(id ID, t BlobType) bool {
+func (fs *fakeFileSystem) blobIsKnown(id ID, t BlobType) bool {
if rand.Float32() < fs.duplication {
return false
}
@@ -94,7 +103,7 @@ func (fs fakeFileSystem) blobIsKnown(id ID, t BlobType) bool {
}
// saveTree saves a tree of fake files in the repo and returns the ID.
-func (fs fakeFileSystem) saveTree(seed int64, depth int) ID {
+func (fs *fakeFileSystem) saveTree(seed int64, depth int) ID {
rnd := rand.NewSource(seed)
numNodes := int(rnd.Int63() % maxNodes)
diff --git a/src/restic/testing_test.go b/src/restic/testing_test.go
index 1258bf2..86b18a0 100644
--- a/src/restic/testing_test.go
+++ b/src/restic/testing_test.go
@@ -47,3 +47,14 @@ func TestCreateSnapshot(t *testing.T) {
checker.TestCheckRepo(t, repo)
}
+
+func BenchmarkTestCreateSnapshot(t *testing.B) {
+ repo, cleanup := repository.TestRepository(t)
+ defer cleanup()
+
+ t.ResetTimer()
+
+ for i := 0; i < t.N; i++ {
+ restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second), testDepth, 0)
+ }
+}
diff --git a/vendor/manifest b/vendor/manifest
index 7deaab8..1d128b7 100644
--- a/vendor/manifest
+++ b/vendor/manifest
@@ -38,6 +38,12 @@
"branch": "master"
},
{
+ "importpath": "github.com/pkg/profile",
+ "repository": "https://github.com/pkg/profile",
+ "revision": "1c16f117a3ab788fdf0e334e623b8bccf5679866",
+ "branch": "HEAD"
+ },
+ {
"importpath": "github.com/pkg/sftp",
"repository": "https://github.com/pkg/sftp",
"revision": "8197a2e580736b78d704be0fc47b2324c0591a32",