mirror of
https://github.com/garethgeorge/backrest.git
synced 2025-10-30 12:17:03 +00:00
feat: allow direct downloading files and folders through backrest webui without restoring first (#921)
This commit is contained in:
@@ -167,7 +167,7 @@ func main() {
|
||||
syncStateHandlerPath, syncStateHandler := v1connect.NewBackrestSyncStateServiceHandler(syncapi.NewBackrestSyncStateHandler(syncMgr))
|
||||
mux.Handle(syncStateHandlerPath, auth.RequireAuthentication(syncStateHandler, authenticator))
|
||||
mux.Handle("/", webui.Handler())
|
||||
mux.Handle("/download/", http.StripPrefix("/download", api.NewDownloadHandler(log)))
|
||||
mux.Handle("/download/", http.StripPrefix("/download", api.NewDownloadHandler(log, orchestrator)))
|
||||
mux.Handle("/metrics", auth.RequireAuthentication(metric.GetRegistry().Handler(), authenticator))
|
||||
|
||||
// Serve the HTTP gateway
|
||||
|
||||
@@ -743,10 +743,14 @@ func (s *BackrestHandler) GetDownloadURL(ctx context.Context, req *connect.Reque
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get operation %v: %w", req.Msg.Value, err)
|
||||
}
|
||||
_, ok := op.Op.(*v1.Operation_OperationRestore)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("operation %v is not a restore operation", req.Msg.Value)
|
||||
|
||||
switch op.Op.(type) {
|
||||
case *v1.Operation_OperationIndexSnapshot:
|
||||
case *v1.Operation_OperationRestore:
|
||||
default:
|
||||
return nil, fmt.Errorf("operation %v is not a restore or snapshot operation", req.Msg.Value)
|
||||
}
|
||||
|
||||
signature, err := signInt64(op.Id) // the signature authenticates the download URL. Note that the shared URL will be valid for any downloader.
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate signature: %w", err)
|
||||
|
||||
@@ -2,24 +2,28 @@ package api
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/hmac"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "github.com/garethgeorge/backrest/gen/go/v1"
|
||||
"github.com/garethgeorge/backrest/internal/oplog"
|
||||
"github.com/garethgeorge/backrest/internal/orchestrator"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func NewDownloadHandler(oplog *oplog.OpLog) http.Handler {
|
||||
func NewDownloadHandler(oplog *oplog.OpLog, orchestrator *orchestrator.Orchestrator) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
p := r.URL.Path[1:]
|
||||
|
||||
@@ -39,39 +43,107 @@ func NewDownloadHandler(oplog *oplog.OpLog) http.Handler {
|
||||
http.Error(w, "restore not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
restoreOp, ok := op.Op.(*v1.Operation_OperationRestore)
|
||||
if !ok {
|
||||
|
||||
switch typedOp := op.Op.(type) {
|
||||
case *v1.Operation_OperationIndexSnapshot:
|
||||
handleIndexSnapshotDownload(w, r, orchestrator, op, typedOp, filePath)
|
||||
case *v1.Operation_OperationRestore:
|
||||
handleRestoreDownload(w, r, typedOp, filePath)
|
||||
default:
|
||||
http.Error(w, "restore not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
targetPath := restoreOp.OperationRestore.GetTarget()
|
||||
if targetPath == "" {
|
||||
http.Error(w, "restore target not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
fullPath := filepath.Join(targetPath, filePath)
|
||||
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=archive-%v.tar.gz", time.Now().Format("2006-01-02-15-04-05")))
|
||||
w.Header().Set("Content-Type", "application/gzip")
|
||||
w.Header().Set("Content-Transfer-Encoding", "binary")
|
||||
|
||||
gzw, err := gzip.NewWriterLevel(w, gzip.BestSpeed)
|
||||
if err != nil {
|
||||
zap.S().Errorf("error creating gzip writer: %v", err)
|
||||
http.Error(w, "error creating gzip writer", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err := tarDirectory(gzw, fullPath); err != nil {
|
||||
zap.S().Errorf("error creating tar archive: %v", err)
|
||||
http.Error(w, "error creating tar archive", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err := gzw.Close(); err != nil {
|
||||
http.Error(w, "error creating tar archive", http.StatusInternalServerError)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func handleIndexSnapshotDownload(w http.ResponseWriter, r *http.Request, orchestrator *orchestrator.Orchestrator, op *v1.Operation, indexOp *v1.Operation_OperationIndexSnapshot, filePath string) {
|
||||
repoCfg, err := orchestrator.GetRepo(op.RepoId)
|
||||
if err != nil {
|
||||
http.Error(w, "error getting repo", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if repoCfg.Guid != op.RepoGuid {
|
||||
http.Error(w, "repo GUID does not match", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
repo, err := orchestrator.GetRepoOrchestrator(op.RepoId)
|
||||
if err != nil {
|
||||
http.Error(w, "error getting repo", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
dumpErrCh := make(chan error, 1)
|
||||
piper, pipew := io.Pipe()
|
||||
|
||||
go func() {
|
||||
dumpErrCh <- repo.Dump(r.Context(), indexOp.OperationIndexSnapshot.Snapshot.GetId(), filePath, pipew)
|
||||
pipew.Close()
|
||||
}()
|
||||
|
||||
firstBytesBuffer := bytes.NewBuffer(nil)
|
||||
_, err = io.CopyN(firstBytesBuffer, piper, 32*1024)
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
zap.S().Errorf("error copying snapshot: %v", err)
|
||||
http.Error(w, fmt.Sprintf("error copying snapshot: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case dumpErr := <-dumpErrCh:
|
||||
if dumpErr != nil {
|
||||
zap.S().Errorf("error dumping snapshot: %v", dumpErr)
|
||||
http.Error(w, fmt.Sprintf("error dumping snapshot: %v", dumpErr), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
if runtime.GOOS != "windows" && IsTarArchive(bytes.NewReader(firstBytesBuffer.Bytes())) && filepath.Ext(filePath) != ".tar" {
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%v.tar", filePath))
|
||||
} else if runtime.GOOS == "windows" && IsZipArchive(bytes.NewReader(firstBytesBuffer.Bytes())) && filepath.Ext(filePath) != ".zip" {
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%v.zip", filePath))
|
||||
} else {
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%v", filePath))
|
||||
}
|
||||
w.Header().Set("Content-Transfer-Encoding", "binary")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
if _, err := io.Copy(w, firstBytesBuffer); err != nil {
|
||||
zap.S().Errorf("error copying snapshot: %v", err)
|
||||
return
|
||||
}
|
||||
if _, err := io.Copy(w, piper); err != nil {
|
||||
zap.S().Errorf("error copying snapshot: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func handleRestoreDownload(w http.ResponseWriter, r *http.Request, op *v1.Operation_OperationRestore, filePath string) {
|
||||
targetPath := op.OperationRestore.GetTarget()
|
||||
if targetPath == "" {
|
||||
http.Error(w, "restore target not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
fullPath := filepath.Join(targetPath, filePath)
|
||||
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=archive-%v.tar.gz", time.Now().Format("2006-01-02-15-04-05")))
|
||||
w.Header().Set("Content-Type", "application/gzip")
|
||||
w.Header().Set("Content-Transfer-Encoding", "binary")
|
||||
|
||||
gzw, err := gzip.NewWriterLevel(w, gzip.BestSpeed)
|
||||
if err != nil {
|
||||
zap.S().Errorf("error creating gzip writer: %v", err)
|
||||
http.Error(w, "error creating gzip writer", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer gzw.Close()
|
||||
|
||||
if err := tarDirectory(gzw, fullPath); err != nil {
|
||||
zap.S().Errorf("error creating tar archive: %v", err)
|
||||
http.Error(w, "error creating tar archive", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
func parseDownloadPath(p string) (int64, string, string, error) {
|
||||
sep := strings.Index(p, "/")
|
||||
if sep == -1 {
|
||||
@@ -105,40 +177,67 @@ func checkDownloadURLSignature(id int64, signature string) (bool, error) {
|
||||
}
|
||||
|
||||
func tarDirectory(w io.Writer, dirpath string) error {
|
||||
t := tar.NewWriter(w)
|
||||
if err := filepath.Walk(dirpath, func(path string, info os.FileInfo, err error) error {
|
||||
tw := tar.NewWriter(w)
|
||||
defer tw.Close()
|
||||
|
||||
return filepath.Walk(dirpath, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
stat, err := os.Stat(path)
|
||||
|
||||
// Create a new tar header
|
||||
header, err := tar.FileInfoHeader(info, info.Name())
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat %v: %w", path, err)
|
||||
return fmt.Errorf("creating tar header for %s: %w", path, err)
|
||||
}
|
||||
file, err := os.OpenFile(path, os.O_RDONLY, 0)
|
||||
|
||||
// Update the name to be relative to the directory we're archiving
|
||||
relPath, err := filepath.Rel(dirpath, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open %v: %w", path, err)
|
||||
return fmt.Errorf("getting relative path for %s: %w", path, err)
|
||||
}
|
||||
header.Name = relPath
|
||||
|
||||
// Write the header
|
||||
if err := tw.WriteHeader(header); err != nil {
|
||||
return fmt.Errorf("writing tar header for %s: %w", path, err)
|
||||
}
|
||||
|
||||
// Open the file
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening file %s: %w", path, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if err := t.WriteHeader(&tar.Header{
|
||||
Name: path[len(dirpath)+1:],
|
||||
Size: stat.Size(),
|
||||
Mode: int64(stat.Mode()),
|
||||
ModTime: stat.ModTime(),
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if n, err := io.CopyN(t, file, stat.Size()); err != nil {
|
||||
zap.L().Warn("error copying file to tar archive", zap.String("path", path), zap.Error(err))
|
||||
} else if n != stat.Size() {
|
||||
zap.L().Warn("error copying file to tar archive: short write", zap.String("path", path))
|
||||
// Copy the file contents
|
||||
if _, err := io.Copy(tw, file); err != nil {
|
||||
return fmt.Errorf("copying file %s to tar archive: %w", path, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return t.Flush()
|
||||
})
|
||||
}
|
||||
|
||||
func IsTarArchive(r io.Reader) bool {
|
||||
if r == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
tr := tar.NewReader(r)
|
||||
_, err := tr.Next()
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func IsZipArchive(r io.Reader) bool {
|
||||
// Use magic number
|
||||
var b [4]byte
|
||||
_, err := r.Read(b[:])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal([]byte{0x50, 0x4B, 0x03, 0x04}, b[:])
|
||||
}
|
||||
|
||||
@@ -336,6 +336,17 @@ func (r *RepoOrchestrator) Restore(ctx context.Context, snapshotId string, snaps
|
||||
return protoutil.RestoreProgressEntryToProto(summary), nil
|
||||
}
|
||||
|
||||
func (r *RepoOrchestrator) Dump(ctx context.Context, snapshotId string, snapshotPath string, output io.Writer) error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
ctx, flush := forwardResticLogs(ctx)
|
||||
defer flush()
|
||||
|
||||
r.logger(ctx).Debug("dump snapshot", zap.String("snapshot", snapshotId), zap.String("path", snapshotPath))
|
||||
|
||||
return r.repo.Dump(ctx, snapshotId, snapshotPath, output)
|
||||
}
|
||||
|
||||
// UnlockIfAutoEnabled unlocks the repo if the auto unlock feature is enabled.
|
||||
func (r *RepoOrchestrator) UnlockIfAutoEnabled(ctx context.Context) error {
|
||||
if !r.repoConfig.AutoUnlock {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -372,6 +373,29 @@ func (r *Repo) ForgetSnapshot(ctx context.Context, snapshotId string, opts ...Ge
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repo) Dump(ctx context.Context, snapshotID string, file string, dumpOutput io.Writer, opts ...GenericOption) error {
|
||||
args := []string{"dump", snapshotID, file}
|
||||
if runtime.GOOS == "windows" {
|
||||
args = append(args, "--archive", "zip")
|
||||
} else {
|
||||
args = append(args, "--archive", "tar")
|
||||
}
|
||||
cmd := r.commandWithContext(ctx, args, opts...)
|
||||
logWriter := LoggerFromContext(ctx)
|
||||
if logWriter == nil {
|
||||
logWriter = io.Discard
|
||||
}
|
||||
errorCollector := errorMessageCollector{}
|
||||
|
||||
// Dump writes binary output to stdout, we should only ever capture and print stderr
|
||||
r.handleOutput(cmd, withStdOutTo(dumpOutput), withStdErrTo(logWriter), withStdErrTo(&errorCollector))
|
||||
if err := cmd.Run(); err != nil {
|
||||
return errorCollector.AddCmdOutputToError(cmd, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repo) Prune(ctx context.Context, pruneOutput io.Writer, opts ...GenericOption) error {
|
||||
return r.runSimpleCommand(ctx, []string{"prune"}, pruneOutput, opts...)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
@@ -608,6 +609,40 @@ func TestResticCheck(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestResticDump(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("skipping on windows")
|
||||
}
|
||||
|
||||
repo := t.TempDir()
|
||||
r := NewRepo(helpers.ResticBinary(t), repo, WithFlags("--no-cache"), WithEnv("RESTIC_PASSWORD=test"))
|
||||
if err := r.Init(context.Background()); err != nil {
|
||||
t.Fatalf("failed to init repo: %v", err)
|
||||
}
|
||||
|
||||
testDataDir := t.TempDir()
|
||||
if err := os.WriteFile(path.Join(testDataDir, "test.txt"), []byte("test data"), 0644); err != nil {
|
||||
t.Fatalf("failed to create test data: %v", err)
|
||||
}
|
||||
|
||||
_, err := r.Backup(context.Background(), []string{testDataDir}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to backup and create new snapshot: %v", err)
|
||||
}
|
||||
|
||||
// dump all files
|
||||
output := bytes.NewBuffer(nil)
|
||||
if err := r.Dump(context.Background(), "latest", path.Join(testDataDir, "test.txt"), output); err != nil {
|
||||
t.Fatalf("failed to dump repo: %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Contains(output.Bytes(), []byte("test data")) {
|
||||
t.Errorf("wanted output to contain 'test data', got: %s", output.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestResticExitError(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@@ -6,5 +6,9 @@
|
||||
"/v1.Authentication": {
|
||||
"target": "http://localhost:9898",
|
||||
"secure": false
|
||||
},
|
||||
"/download": {
|
||||
"target": "http://localhost:9898",
|
||||
"secure": false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -227,6 +227,7 @@ export const OperationRow = ({
|
||||
children: (
|
||||
<SnapshotBrowser
|
||||
snapshotId={snapshotOp.snapshot!.id}
|
||||
snapshotOpId={operation.id}
|
||||
repoId={operation.repoId}
|
||||
planId={operation.planId}
|
||||
/>
|
||||
|
||||
@@ -76,8 +76,10 @@ export const SnapshotBrowser = ({
|
||||
repoId,
|
||||
planId, // optional: purely to link restore operations to the right plan.
|
||||
snapshotId,
|
||||
snapshotOpId,
|
||||
}: React.PropsWithoutRef<{
|
||||
snapshotId: string;
|
||||
snapshotOpId?: bigint;
|
||||
repoId: string;
|
||||
planId?: string;
|
||||
}>) => {
|
||||
@@ -85,6 +87,22 @@ export const SnapshotBrowser = ({
|
||||
const showModal = useShowModal();
|
||||
const [treeData, setTreeData] = useState<DataNode[]>([]);
|
||||
|
||||
const respToNodes = (resp: ListSnapshotFilesResponse): DataNode[] => {
|
||||
const nodes = resp
|
||||
.entries!.filter((entry) => entry.path!.length >= resp.path!.length)
|
||||
.map((entry) => {
|
||||
const node: DataNode = {
|
||||
key: entry.path!,
|
||||
title: <FileNode entry={entry} snapshotOpId={snapshotOpId} />,
|
||||
isLeaf: entry.type === "file",
|
||||
icon: entry.type === "file" ? <FileOutlined /> : <FolderOutlined />,
|
||||
};
|
||||
return node;
|
||||
});
|
||||
|
||||
return nodes;
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
setTreeData(
|
||||
respToNodes(
|
||||
@@ -132,7 +150,7 @@ export const SnapshotBrowser = ({
|
||||
}
|
||||
|
||||
const toUpdateCopy = { ...toUpdate };
|
||||
toUpdateCopy.children = respToNodes(resp);
|
||||
toUpdateCopy.children = respToNodes(resp, snapshotOpId);
|
||||
|
||||
return treeData.map((node) => {
|
||||
const didUpdate = replaceKeyInTree(node, key as string, toUpdateCopy);
|
||||
@@ -153,23 +171,13 @@ export const SnapshotBrowser = ({
|
||||
);
|
||||
};
|
||||
|
||||
const respToNodes = (resp: ListSnapshotFilesResponse): DataNode[] => {
|
||||
const nodes = resp
|
||||
.entries!.filter((entry) => entry.path!.length >= resp.path!.length)
|
||||
.map((entry) => {
|
||||
const node: DataNode = {
|
||||
key: entry.path!,
|
||||
title: <FileNode entry={entry} />,
|
||||
isLeaf: entry.type === "file",
|
||||
icon: entry.type === "file" ? <FileOutlined /> : <FolderOutlined />,
|
||||
};
|
||||
return node;
|
||||
});
|
||||
|
||||
return nodes;
|
||||
};
|
||||
|
||||
const FileNode = ({ entry }: { entry: LsEntry }) => {
|
||||
const FileNode = ({
|
||||
entry,
|
||||
snapshotOpId,
|
||||
}: {
|
||||
entry: LsEntry;
|
||||
snapshotOpId?: bigint;
|
||||
}) => {
|
||||
const [dropdown, setDropdown] = useState<React.ReactNode>(null);
|
||||
const { snapshotId, repoId, planId, showModal } = React.useContext(
|
||||
SnapshotBrowserContext
|
||||
@@ -215,6 +223,22 @@ const FileNode = ({ entry }: { entry: LsEntry }) => {
|
||||
);
|
||||
},
|
||||
},
|
||||
snapshotOpId
|
||||
? {
|
||||
key: "download",
|
||||
label: "Download",
|
||||
onClick: () => {
|
||||
backrestService
|
||||
.getDownloadURL({ value: snapshotOpId })
|
||||
.then((resp) => {
|
||||
window.open(resp.value + entry.path, "_blank");
|
||||
})
|
||||
.catch((e) => {
|
||||
alert("Failed to fetch download URL: " + e.message);
|
||||
});
|
||||
},
|
||||
}
|
||||
: null,
|
||||
],
|
||||
}}
|
||||
>
|
||||
|
||||
Reference in New Issue
Block a user