From b98b1dc3ccfb0f810259ae3e1e00366da7b0eee5 Mon Sep 17 00:00:00 2001 From: Gareth Date: Sat, 2 May 2026 22:29:39 -0700 Subject: [PATCH] feat: experimental multihost sync (#1204) --- .devcontainer/Dockerfile | 35 +- .devcontainer/devcontainer.json | 32 +- .envrc | 1 + .gitignore | 5 + README.md | 32 +- docs/src/.vitepress/config.mts | 1 + docs/src/docs/multihost.md | 136 +++ flake.lock | 27 + flake.nix | 65 ++ gen/go/v1/config.pb.go | 526 ++++++--- gen/go/v1/crypto.pb.go | 33 +- gen/go/v1/service.pb.go | 275 +++-- gen/go/v1/service_grpc.pb.go | 84 +- gen/go/v1/v1connect/service.connect.go | 75 +- gen/go/v1sync/syncservice.pb.go | 532 ++++++--- gen/go/v1sync/syncservice_grpc.pb.go | 43 +- .../v1syncconnect/syncservice.connect.go | 31 + internal/api/backresthandler.go | 123 +- internal/api/syncapi/authmiddleware.go | 245 ---- internal/api/syncapi/authmiddleware_test.go | 200 ---- internal/api/syncapi/cmdstreamutil.go | 18 +- internal/api/syncapi/errors.go | 28 - internal/api/syncapi/pairing_test.go | 416 +++++++ internal/api/syncapi/permissions/groups.go | 4 + .../api/syncapi/permissions/permissions.go | 25 +- internal/api/syncapi/signing.go | 70 ++ internal/api/syncapi/syncapi_test.go | 12 +- internal/api/syncapi/syncclient.go | 415 ++++--- internal/api/syncapi/synccommon.go | 97 +- internal/api/syncapi/syncmanager.go | 120 +- internal/api/syncapi/syncoperations_test.go | 397 +++++++ internal/api/syncapi/syncserver.go | 278 ++++- internal/api/syncapi/syncstatehandler.go | 22 + internal/api/syncapi/uriutil.go | 60 - internal/config/config.go | 42 + internal/config/jsonstore.go | 37 +- internal/config/memstore.go | 16 + internal/config/networksanitize_test.go | 24 +- internal/config/validate.go | 93 +- internal/config/validate_test.go | 235 ++++ internal/cryptoutil/identity.go | 12 +- internal/cryptoutil/identity_test.go | 4 +- internal/cryptoutil/pairingtoken.go | 70 ++ internal/cryptoutil/pairingtoken_test.go | 117 ++ internal/oplog/memstore/memstore.go | 49 +- internal/oplog/oplog.go | 45 + internal/oplog/query.go | 4 + internal/oplog/sqlitestore/sqlitestore.go | 61 +- .../oplog/storetests/storecontract_test.go | 3 + internal/orchestrator/orchestrator.go | 41 +- internal/orchestrator/repo/repo.go | 14 +- internal/orchestrator/taskrunnerimpl.go | 7 +- internal/orchestrator/tasks/task.go | 64 +- internal/orchestrator/tasks/taskbackup.go | 10 +- .../orchestrator/tasks/taskcollectgarbage.go | 5 + internal/orchestrator/tasks/taskforget.go | 279 ++++- .../orchestrator/tasks/taskindexsnapshots.go | 42 - internal/orchestrator/tasks/taskrun_test.go | 648 +++++++++++ .../orchestrator/tasks/testhelpers_test.go | 91 ++ internal/protoutil/conversion.go | 13 + pkg/restic/restic.go | 19 +- pkg/restic/restic_test.go | 59 + proto/v1/config.proto | 26 +- proto/v1/crypto.proto | 6 +- proto/v1/service.proto | 15 + proto/v1sync/syncservice.proto | 32 +- scripts/testing/run-named.sh | 72 ++ shell.nix | 3 + webui/gen/ts/google/api/annotations_pb.ts | 2 +- webui/gen/ts/google/api/http_pb.ts | 2 +- webui/gen/ts/types/value_pb.ts | 2 +- webui/gen/ts/v1/authentication_pb.ts | 2 +- webui/gen/ts/v1/config_pb.ts | 177 ++- webui/gen/ts/v1/crypto_pb.ts | 16 +- webui/gen/ts/v1/operations_pb.ts | 2 +- webui/gen/ts/v1/restic_pb.ts | 2 +- webui/gen/ts/v1/service_pb.ts | 76 +- webui/gen/ts/v1sync/syncservice_pb.ts | 174 ++- webui/messages/en.json | 9 +- webui/src/api/logState.ts | 3 + webui/src/app/App.tsx | 377 +++++- webui/src/components/common/FormModal.tsx | 1 + .../components/common/RetentionPolicyView.tsx | 189 +++ webui/src/components/common/SectionCard.tsx | 62 + webui/src/components/common/StatusPill.tsx | 47 + webui/src/components/common/SyncStateIcon.tsx | 23 +- webui/src/components/common/ToggleField.tsx | 76 ++ webui/src/components/common/TwoPaneModal.tsx | 404 +++++++ webui/src/components/ui/tooltip.tsx | 4 +- .../features/dashboard/SummaryDashboard.tsx | 70 +- .../features/operations/OperationListView.tsx | 1 + .../features/operations/OperationTreeView.tsx | 1 + webui/src/features/plans/AddPlanModal.tsx | 759 +++++------- .../features/repositories/AddRepoModal.tsx | 646 +++++----- webui/src/features/settings/SettingsModal.tsx | 1035 +++++++++++------ webui/src/state/buildcfg.ts | 2 +- 96 files changed, 8092 insertions(+), 2793 deletions(-) create mode 100644 .envrc create mode 100644 docs/src/docs/multihost.md create mode 100644 flake.lock create mode 100644 flake.nix delete mode 100644 internal/api/syncapi/authmiddleware.go delete mode 100644 internal/api/syncapi/authmiddleware_test.go create mode 100644 internal/api/syncapi/pairing_test.go create mode 100644 internal/api/syncapi/signing.go create mode 100644 internal/api/syncapi/syncoperations_test.go delete mode 100644 internal/api/syncapi/uriutil.go create mode 100644 internal/config/validate_test.go create mode 100644 internal/cryptoutil/pairingtoken.go create mode 100644 internal/cryptoutil/pairingtoken_test.go create mode 100644 internal/orchestrator/tasks/taskrun_test.go create mode 100644 internal/orchestrator/tasks/testhelpers_test.go create mode 100755 scripts/testing/run-named.sh create mode 100644 shell.nix create mode 100644 webui/src/components/common/RetentionPolicyView.tsx create mode 100644 webui/src/components/common/SectionCard.tsx create mode 100644 webui/src/components/common/StatusPill.tsx create mode 100644 webui/src/components/common/ToggleField.tsx create mode 100644 webui/src/components/common/TwoPaneModal.tsx diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 491cf7a9..101022e3 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,22 +1,19 @@ -## Based on microsoft go devcontainer - https://github.com/microsoft/vscode-dev-containers/blob/v0.205.2/containers/go/.devcontainer/Dockerfile -# [Choice] Go version (use -bullseye variants on local arm64/Apple Silicon): 1, 1.16, 1.17, 1-bullseye, 1.16-bullseye, 1.17-bullseye, 1-buster, 1.16-buster, 1.17-buster -ARG VARIANT=1-bullseye -FROM mcr.microsoft.com/vscode/devcontainers/go:${VARIANT} - -# [Choice] Node.js version: none, lts/*, 16, 14, 12, 10 -ARG NODE_VERSION="none" -RUN if [ "${NODE_VERSION}" != "none" ]; then su vscode -c "umask 0002 && . /usr/local/share/nvm/nvm.sh && nvm install ${NODE_VERSION} 2>&1"; fi - -RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ - && apt-get -y install --no-install-recommends protobuf-compiler +FROM mcr.microsoft.com/devcontainers/base:bookworm +# Install Nix in single-user mode for the vscode user USER vscode +RUN curl -L https://nixos.org/nix/install | bash -s -- --no-daemon +ENV PATH="/home/vscode/.nix-profile/bin:${PATH}" -RUN go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest \ - && go install github.com/bufbuild/buf/cmd/buf@v1.27.2 \ - && go install github.com/fullstorydev/grpcurl/cmd/grpcurl@latest \ - && go install google.golang.org/protobuf/cmd/protoc-gen-go@latest \ - && go install connectrpc.com/connect/cmd/protoc-gen-connect-go@latest \ - && go install github.com/GeertJohan/go.rice/rice@latest \ - && go install github.com/goreleaser/goreleaser@latest \ - && npm install -g @bufbuild/protoc-gen-es @connectrpc/protoc-gen-connect-es +# Install direnv via Nix and configure shell hooks +RUN . /home/vscode/.nix-profile/etc/profile.d/nix.sh \ + && nix-env -iA nixpkgs.direnv +RUN echo '. /home/vscode/.nix-profile/etc/profile.d/nix.sh' >> /home/vscode/.bashrc \ + && echo 'eval "$(direnv hook bash)"' >> /home/vscode/.bashrc \ + && echo '. /home/vscode/.nix-profile/etc/profile.d/nix.sh' >> /home/vscode/.zshrc \ + && echo 'eval "$(direnv hook zsh)"' >> /home/vscode/.zshrc + +# Pre-populate the nix store with project dependencies so first open is fast +COPY shell.nix /tmp/shell.nix +RUN . /home/vscode/.nix-profile/etc/profile.d/nix.sh \ + && nix-shell /tmp/shell.nix --run "echo 'nix dependencies cached'" diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index f0b66772..191f4cc0 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,49 +1,29 @@ { - "name": "Go", + "name": "Backrest Dev", "build": { "dockerfile": "Dockerfile", - "args": { - // Update the VARIANT arg to pick a version of Go: 1, 1.16, 1.17 - // Append -bullseye or -buster to pin to an OS version. - // Use -bullseye variants on local arm64/Apple Silicon. - "VARIANT": "1-1.24-bookworm", - // Options - "NODE_VERSION": "lts/*" - } + "context": ".." }, "runArgs": [ "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" ], + "postCreateCommand": "direnv allow", "customizations": { "vscode": { - // Set *default* container specific settings.json values on container create. "settings": { "go.toolsManagement.checkForUpdates": "local", "go.useLanguageServer": true, - "go.gopath": "/go", - "go.goroot": "/usr/local/go", "typescript.tsdk": "webui/node_modules/typescript/lib", - "typescript.enablePromptUseWorkspaceTsdk": true, - "gitlens.telemetry.enabled": false + "typescript.enablePromptUseWorkspaceTsdk": true }, - // Add the IDs of extensions you want installed when the container is created. "extensions": [ "golang.Go", - "ms-azuretools.vscode-docker", - "mhutchie.git-graph", - "eamodio.gitlens", - "donjayamanne.githistory", - "esbenp.prettier-vscode", - "iulian-radu-at.vscode-tasks-sidebar" + "mkhl.direnv", + "esbenp.prettier-vscode" ] } }, - // Use 'forwardPorts' to make a list of ports inside the container available locally. - // "forwardPorts": [], - // Use 'postCreateCommand' to run commands after the container is created. - // "postCreateCommand": "go version", - // Comment out connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. "remoteUser": "vscode" } diff --git a/.envrc b/.envrc new file mode 100644 index 00000000..3550a30f --- /dev/null +++ b/.envrc @@ -0,0 +1 @@ +use flake diff --git a/.gitignore b/.gitignore index 4a75d3a8..2f93f8b9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# Nix +result + /backrest /backrest-* dist @@ -7,4 +10,6 @@ cmd/backrest/backrest .DS_Store .idea/ .pnpm-store/ +.gemini +.claude webui/dist-backup/ diff --git a/README.md b/README.md index 11bf1c2e..3d5820b4 100644 --- a/README.md +++ b/README.md @@ -251,17 +251,29 @@ Contributions are welcome! See the [issues](https://github.com/garethgeorge/back ## Build Dependencies -- [Node.js](https://nodejs.org/en) for UI development -- [Go](https://go.dev/) 1.21 or greater for server development +All build dependencies are defined in `shell.nix` and can be activated automatically using [Nix](https://nixos.org/) and [direnv](https://direnv.net/). + +### Using Nix + direnv (Recommended) + +1. Install [Nix](https://nixos.org/download/) and [direnv](https://direnv.net/docs/installation.html) +2. [Hook direnv into your shell](https://direnv.net/docs/hook.html) (e.g. `eval "$(direnv hook bash)"` in your `.bashrc`) +3. Clone the repo and `cd` into it +4. Run `direnv allow` to trust the `.envrc` — all dependencies (Go, Node.js, pnpm, protoc, buf, etc.) will be available in your shell automatically + +### Manual Setup + +If you prefer not to use Nix, install the following manually: + +- [Go](https://go.dev/) 1.24 or greater +- [Node.js](https://nodejs.org/en) 20.x and [pnpm](https://pnpm.io/) 9 - [goreleaser](https://github.com/goreleaser/goreleaser) `go install github.com/goreleaser/goreleaser@latest` -**(Optional) To Edit Protobuffers** +**(Optional) To edit protobuf definitions:** ```sh apt install -y protobuf-compiler go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest -go install github.com/bufbuild/buf/cmd/buf@v1.47.2 -go install github.com/fullstorydev/grpcurl/cmd/grpcurl@latest +go install github.com/bufbuild/buf/cmd/buf@latest go install google.golang.org/protobuf/cmd/protoc-gen-go@latest go install connectrpc.com/connect/cmd/protoc-gen-connect-go@latest npm install -g @bufbuild/protoc-gen-es @@ -270,22 +282,22 @@ npm install -g @bufbuild/protoc-gen-es ## Compiling ```sh -(cd webui && npm i && npm run build) +(cd webui && pnpm i && pnpm run build) (cd cmd/backrest && go build .) ``` ## Using VSCode Dev Containers -You can also use VSCode with [Dev Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension to quickly get up and running with a working development and debugging environment. +The dev container uses Nix and direnv to provide all dependencies. When the container starts, `direnv allow` runs automatically so the Nix shell is activated in every terminal. -0. Make sure Docker and VSCode with [Dev Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension is installed +0. Make sure Docker and VSCode with the [Dev Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension is installed 1. Clone this repository 2. Open this folder in VSCode 3. When prompted, click on `Open in Container` button, or run `> Dev Containers: Rebuild and Reopen in Containers` command -4. When container is started, go to `Run and Debug`, choose `Debug Backrest (backend+frontend)` and run it +4. When the container is started, go to `Run and Debug`, choose `Debug Backrest (backend+frontend)` and run it > [!NOTE] -> Provided launch configuration has hot reload for typescript frontend. +> Provided launch configuration has hot reload for the typescript frontend. ## Translations diff --git a/docs/src/.vitepress/config.mts b/docs/src/.vitepress/config.mts index a4b5c86a..59d9577f 100644 --- a/docs/src/.vitepress/config.mts +++ b/docs/src/.vitepress/config.mts @@ -28,6 +28,7 @@ export default defineConfig({ items: [ { text: 'Operations', link: '/docs/operations' }, { text: 'Hooks', link: '/docs/hooks' }, + { text: 'Multihost Sync', link: '/docs/multihost' }, { text: 'API', link: '/docs/api' } ] }, diff --git a/docs/src/docs/multihost.md b/docs/src/docs/multihost.md new file mode 100644 index 00000000..70d7daa5 --- /dev/null +++ b/docs/src/docs/multihost.md @@ -0,0 +1,136 @@ +# Multihost Sync + +Backrest supports syncing configuration and operation history between multiple instances. This allows you to monitor and manage backups across several machines from a single Backrest installation. + +## Concepts + +**Server**: A Backrest instance that accepts connections from other instances. The server can push shared repos to clients and receive operation updates from them. + +**Client**: A Backrest instance that connects to a server. The client sends its backup operation history to the server and can receive shared repo configurations. + +**Pairing Token**: A one-time token generated by the server that allows a client to authenticate and register itself as an authorized client. + +**Shared Repo**: A repository on the server marked as "shared." Its configuration is automatically pushed to all connected clients that have the `Receive Shared Repos` permission. + +## Setup Overview + +The typical setup flow is: + +1. Configure both instances with unique **Instance IDs** (Settings > General) +2. On the server, generate a **pairing token** with the desired permissions +3. On the client, add the server as a **known host** using the pairing token +4. The client connects and is automatically registered as an authorized client on the server + +## Step 1: Generate a Pairing Token (Server) + +1. Open **Settings** on the server instance +2. Scroll to the **Multihost** section +3. Under **Pairing Tokens**, click **Generate** +4. Configure the token: + - **Label**: A human-readable name (e.g. "laptop-backup") + - **TTL**: How long the token is valid (e.g. 15 minutes, 1 hour, or forever) + - **Max Uses**: How many clients can pair with this token (0 = unlimited) + - **Permissions**: What the paired client will be allowed to do (see [Permissions](#permissions) below) +5. Copy the generated token string — it will look like `:#` + +::: tip +Pairing tokens are consumed on use. Once a client has paired, the token is no longer needed. Generate short-lived tokens with limited uses for better security. +::: + +## Step 2: Connect the Client + +1. Open **Settings** on the client instance +2. Scroll to the **Multihost** section +3. Under **Known Hosts**, click **Add** +4. Paste the **pairing token** from the server +5. Enter the server's **Instance URL** (e.g. `https://backrest.example.com:9898`) +6. Save the configuration + +The client will connect to the server, present the pairing secret, and be automatically registered as an authorized client. After pairing, the token is cleared and authentication uses public key identity going forward. + +## Permissions + +Permissions control what a connected client can do. They are configured when generating a pairing token or by editing an authorized client's permissions after pairing. + +| Permission | Description | +|---|---| +| **Read Operations** | The client sends its operation history (backup results, errors, etc.) to the server. This enables centralized monitoring. | +| **Read Config** | The client can read repo and plan configuration from the server. | +| **Read/Write Config** | The client can read and write repo and plan configuration on the server. | +| **Receive Shared Repos** | The server automatically pushes all repos marked as "shared" to this client. | + +### Scopes + +Permissions (except Receive Shared Repos) can be scoped to specific repos or plans: + +- **All** (`*`): Applies to all repos and plans +- **Specific repo**: `repo:` — applies only to the named repo +- **Specific plan**: `plan:` — applies only to the named plan +- **Exclude repo**: `!repo:` — applies to all except the named repo +- **Exclude plan**: `!plan:` — applies to all except the named plan + +## Shared Repos + +Marking a repo as "shared" on the server causes its configuration to be automatically pushed to all authorized clients with the `Receive Shared Repos` permission. + +To share a repo: + +1. Open the repo's settings on the server +2. Enable the **Shared** toggle +3. Save + +When a shared repo is received by a client: + +- The repo appears in the client's repo list with its `originInstanceId` set to the server's instance ID +- The repo is **read-only** on the client — it cannot be edited, only deleted +- **Scheduling of maintenance tasks (prune, check, forget) is skipped** for shared repos on the client — the server that owns the repo manages these operations +- The client can still run backups to the shared repo if it has plans configured for it + +::: warning +Shared repos are identified by their GUID. If the client already has a local repo with the same GUID, the shared repo will be skipped to avoid conflicts. +::: + +## Monitoring Sync Status + +After setup, the server's Settings page shows the connection status of each authorized client. A green indicator means the client is currently connected and syncing. + +The sync protocol uses a persistent bidirectional connection with: + +- **Heartbeats** to detect disconnections +- **Automatic reconnection** with exponential backoff if the connection drops +- **Manifest-based reconciliation** to efficiently sync only changed operations + +## Typical Configurations + +### Centralized Monitoring + +Push backup status from multiple machines to a single dashboard: + +- **Server permissions**: `Read Operations` scoped to `*` +- **Shared repos**: Not needed — each client manages its own repos +- **Result**: Server's operation log shows backup results from all clients + +### Shared Repository + +Multiple machines back up to the same repository, managed by the server: + +- **Server permissions**: `Receive Shared Repos` + `Read Operations` scoped to `*` +- **Server repo**: Mark the target repo as **Shared** +- **Result**: Clients receive the repo config automatically. The server handles forget, prune, and check. Clients run their own backup plans against the shared repo. + +### Full Config Management + +Server manages configuration for all clients: + +- **Server permissions**: `Read/Write Config` scoped to `*` + `Receive Shared Repos` +- **Result**: The server can push config changes (repos and plans) to connected clients + +## Troubleshooting + +**Client can't connect**: Verify the Instance URL is reachable from the client. The URL should include the port (default 9898). If using a reverse proxy, ensure it supports HTTP/2 and WebSocket connections (needed for the bidirectional sync stream). + +**Pairing fails**: Check that the pairing token hasn't expired and hasn't exceeded its max uses. Generate a new token if needed. + +**Shared repo not appearing on client**: Verify the client has the `Receive Shared Repos` permission. Check that the repo is marked as "shared" on the server. If the client already has a repo with the same GUID, the shared repo will be skipped. + +**Stale snapshots on client**: When the server runs forget and removes snapshots, the client discovers this the next time it indexes snapshots for that repo (typically after a backup). If the client doesn't run backups to a shared repo, its snapshot list may show snapshots that the server has already forgotten until the next index operation. diff --git a/flake.lock b/flake.lock new file mode 100644 index 00000000..4b8e88ed --- /dev/null +++ b/flake.lock @@ -0,0 +1,27 @@ +{ + "nodes": { + "nixpkgs": { + "locked": { + "lastModified": 1776329215, + "narHash": "sha256-a8BYi3mzoJ/AcJP8UldOx8emoPRLeWqALZWu4ZvjPXw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "b86751bc4085f48661017fa226dee99fab6c651b", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "nixpkgs": "nixpkgs" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 00000000..150d63c6 --- /dev/null +++ b/flake.nix @@ -0,0 +1,65 @@ +{ + description = "Backrest development environment"; + + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + }; + + outputs = { self, nixpkgs }: + let + supportedSystems = [ "x86_64-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" ]; + forAllSystems = nixpkgs.lib.genAttrs supportedSystems; + in + { + devShells = forAllSystems (system: + let + pkgs = nixpkgs.legacyPackages.${system}; + in + { + default = pkgs.mkShell { + buildInputs = with pkgs; [ + # Go backend + go + goreleaser + + # Frontend + nodejs_20 + pnpm_9 + + # Protobuf / code generation + protobuf + buf + protoc-gen-go + protoc-gen-go-grpc + + # General build tools + gnumake + git + + # Runtime dependencies (for local testing) + restic + rclone + + # Shell + zsh + oh-my-posh + ]; + + SHELL = "${pkgs.zsh}/bin/zsh"; + OMP_THEME = "${pkgs.oh-my-posh}/share/oh-my-posh/themes/star.omp.json"; + + shellHook = '' + if [ -z "$IN_NIX_SHELL_ZSH" ]; then + export IN_NIX_SHELL_ZSH=1 + export ZDOTDIR=$(mktemp -d) + cat > "$ZDOTDIR/.zshrc" <<'ZSHRC' + [[ -f ~/.zshrc ]] && source ~/.zshrc + eval "$(oh-my-posh init zsh --config "$OMP_THEME")" + ZSHRC + exec ${pkgs.zsh}/bin/zsh + fi + ''; + }; + }); + }; +} diff --git a/gen/go/v1/config.pb.go b/gen/go/v1/config.pb.go index eae8d60c..f1d41ef2 100644 --- a/gen/go/v1/config.pb.go +++ b/gen/go/v1/config.pb.go @@ -34,6 +34,9 @@ const ( // When granted to an authorizedClient, the client will be able to write the configuration to the server. // When granted to a knownHost, the known host will be able to write configuration. Multihost_Permission_PERMISSION_READ_WRITE_CONFIG Multihost_Permission_Type = 3 // read and write configuration for the resource in scope. + // When granted to an authorizedClient, the server will push repos marked as 'shared' to the client. + // This permission does not use scopes — if present, all shared repos are pushed. + Multihost_Permission_PERMISSION_RECEIVE_SHARED_REPOS Multihost_Permission_Type = 4 ) // Enum value maps for Multihost_Permission_Type. @@ -43,12 +46,14 @@ var ( 1: "PERMISSION_READ_OPERATIONS", 2: "PERMISSION_READ_CONFIG", 3: "PERMISSION_READ_WRITE_CONFIG", + 4: "PERMISSION_RECEIVE_SHARED_REPOS", } Multihost_Permission_Type_value = map[string]int32{ - "PERMISSION_UNKNOWN": 0, - "PERMISSION_READ_OPERATIONS": 1, - "PERMISSION_READ_CONFIG": 2, - "PERMISSION_READ_WRITE_CONFIG": 3, + "PERMISSION_UNKNOWN": 0, + "PERMISSION_READ_OPERATIONS": 1, + "PERMISSION_READ_CONFIG": 2, + "PERMISSION_READ_WRITE_CONFIG": 3, + "PERMISSION_RECEIVE_SHARED_REPOS": 4, } ) @@ -76,7 +81,7 @@ func (x Multihost_Permission_Type) Number() protoreflect.EnumNumber { // Deprecated: Use Multihost_Permission_Type.Descriptor instead. func (Multihost_Permission_Type) EnumDescriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{1, 1, 0} + return file_v1_config_proto_rawDescGZIP(), []int{1, 2, 0} } type CommandPrefix_IONiceLevel int32 @@ -229,7 +234,7 @@ func (x Schedule_Clock) Number() protoreflect.EnumNumber { // Deprecated: Use Schedule_Clock.Descriptor instead. func (Schedule_Clock) EnumDescriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{8, 0} + return file_v1_config_proto_rawDescGZIP(), []int{9, 0} } type Hook_Condition int32 @@ -323,7 +328,7 @@ func (x Hook_Condition) Number() protoreflect.EnumNumber { // Deprecated: Use Hook_Condition.Descriptor instead. func (Hook_Condition) EnumDescriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{9, 0} + return file_v1_config_proto_rawDescGZIP(), []int{10, 0} } type Hook_OnError int32 @@ -381,7 +386,7 @@ func (x Hook_OnError) Number() protoreflect.EnumNumber { // Deprecated: Use Hook_OnError.Descriptor instead. func (Hook_OnError) EnumDescriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{9, 1} + return file_v1_config_proto_rawDescGZIP(), []int{10, 1} } type Hook_Webhook_Method int32 @@ -430,7 +435,7 @@ func (x Hook_Webhook_Method) Number() protoreflect.EnumNumber { // Deprecated: Use Hook_Webhook_Method.Descriptor instead. func (Hook_Webhook_Method) EnumDescriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{9, 1, 0} + return file_v1_config_proto_rawDescGZIP(), []int{10, 1, 0} } // Config is the top level config object for restic UI. @@ -530,10 +535,11 @@ func (x *Config) GetMultihost() *Multihost { } type Multihost struct { - state protoimpl.MessageState `protogen:"open.v1"` - Identity *PrivateKey `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` - KnownHosts []*Multihost_Peer `protobuf:"bytes,2,rep,name=known_hosts,json=knownHosts,proto3" json:"known_hosts,omitempty"` - AuthorizedClients []*Multihost_Peer `protobuf:"bytes,3,rep,name=authorized_clients,json=authorizedClients,proto3" json:"authorized_clients,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Identity *PrivateKey `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + KnownHosts []*Multihost_Peer `protobuf:"bytes,2,rep,name=known_hosts,json=knownHosts,proto3" json:"known_hosts,omitempty"` + AuthorizedClients []*Multihost_Peer `protobuf:"bytes,3,rep,name=authorized_clients,json=authorizedClients,proto3" json:"authorized_clients,omitempty"` + PairingTokens []*Multihost_PairingToken `protobuf:"bytes,4,rep,name=pairing_tokens,json=pairingTokens,proto3" json:"pairing_tokens,omitempty"` // active pairing tokens generated by this instance (server-side only) unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -589,22 +595,32 @@ func (x *Multihost) GetAuthorizedClients() []*Multihost_Peer { return nil } +func (x *Multihost) GetPairingTokens() []*Multihost_PairingToken { + if x != nil { + return x.PairingTokens + } + return nil +} + type Repo struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // unique but human readable ID for this repo. - Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"` // URI of the repo. - Guid string `protobuf:"bytes,11,opt,name=guid,proto3" json:"guid,omitempty"` // a globally unique ID for this repo. Should be derived as the 'id' field in `restic cat config --json`. - Password string `protobuf:"bytes,3,opt,name=password,proto3" json:"password,omitempty"` // plaintext password - Env []string `protobuf:"bytes,4,rep,name=env,proto3" json:"env,omitempty"` // extra environment variables to set for restic. - Flags []string `protobuf:"bytes,5,rep,name=flags,proto3" json:"flags,omitempty"` // extra flags set on the restic command. - PrunePolicy *PrunePolicy `protobuf:"bytes,6,opt,name=prune_policy,json=prunePolicy,proto3" json:"prune_policy,omitempty"` // policy for when to run prune. - CheckPolicy *CheckPolicy `protobuf:"bytes,9,opt,name=check_policy,json=checkPolicy,proto3" json:"check_policy,omitempty"` // policy for when to run check. - Hooks []*Hook `protobuf:"bytes,7,rep,name=hooks,proto3" json:"hooks,omitempty"` // hooks to run on events for this repo. - AutoUnlock bool `protobuf:"varint,8,opt,name=auto_unlock,json=autoUnlock,proto3" json:"auto_unlock,omitempty"` // automatically unlock the repo when needed. - AutoInitialize bool `protobuf:"varint,12,opt,name=auto_initialize,json=autoInitialize,proto3" json:"auto_initialize,omitempty"` // whether the repo should be auto-initialized if not found. - CommandPrefix *CommandPrefix `protobuf:"bytes,10,opt,name=command_prefix,json=commandPrefix,proto3" json:"command_prefix,omitempty"` // modifiers for the restic commands - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // unique but human readable ID for this repo. + Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"` // URI of the repo. + Guid string `protobuf:"bytes,11,opt,name=guid,proto3" json:"guid,omitempty"` // a globally unique ID for this repo. Should be derived as the 'id' field in `restic cat config --json`. + Password string `protobuf:"bytes,3,opt,name=password,proto3" json:"password,omitempty"` // plaintext password + Env []string `protobuf:"bytes,4,rep,name=env,proto3" json:"env,omitempty"` // extra environment variables to set for restic. + Flags []string `protobuf:"bytes,5,rep,name=flags,proto3" json:"flags,omitempty"` // extra flags set on the restic command. + PrunePolicy *PrunePolicy `protobuf:"bytes,6,opt,name=prune_policy,json=prunePolicy,proto3" json:"prune_policy,omitempty"` // policy for when to run prune. + CheckPolicy *CheckPolicy `protobuf:"bytes,9,opt,name=check_policy,json=checkPolicy,proto3" json:"check_policy,omitempty"` // policy for when to run check. + Hooks []*Hook `protobuf:"bytes,7,rep,name=hooks,proto3" json:"hooks,omitempty"` // hooks to run on events for this repo. + AutoUnlock bool `protobuf:"varint,8,opt,name=auto_unlock,json=autoUnlock,proto3" json:"auto_unlock,omitempty"` // automatically unlock the repo when needed. + AutoInitialize bool `protobuf:"varint,12,opt,name=auto_initialize,json=autoInitialize,proto3" json:"auto_initialize,omitempty"` // whether the repo should be auto-initialized if not found. + CommandPrefix *CommandPrefix `protobuf:"bytes,10,opt,name=command_prefix,json=commandPrefix,proto3" json:"command_prefix,omitempty"` // modifiers for the restic commands + Shared bool `protobuf:"varint,13,opt,name=shared,proto3" json:"shared,omitempty"` // if true, this repo is pushed to all authorized clients with read-config permission + OriginInstanceId string `protobuf:"bytes,14,opt,name=origin_instance_id,json=originInstanceId,proto3" json:"origin_instance_id,omitempty"` // set when this repo was pushed from a remote instance; marks it as non-editable + ForgetPolicy *ForgetPolicy `protobuf:"bytes,15,opt,name=forget_policy,json=forgetPolicy,proto3" json:"forget_policy,omitempty"` // optional repo-level forget policy. If set, overrides per-plan retention policies. + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Repo) Reset() { @@ -721,6 +737,27 @@ func (x *Repo) GetCommandPrefix() *CommandPrefix { return nil } +func (x *Repo) GetShared() bool { + if x != nil { + return x.Shared + } + return false +} + +func (x *Repo) GetOriginInstanceId() string { + if x != nil { + return x.OriginInstanceId + } + return "" +} + +func (x *Repo) GetForgetPolicy() *ForgetPolicy { + if x != nil { + return x.ForgetPolicy + } + return nil +} + type Plan struct { state protoimpl.MessageState `protogen:"open.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // unique but human readable ID for this plan. @@ -987,6 +1024,58 @@ func (*RetentionPolicy_PolicyTimeBucketed) isRetentionPolicy_Policy() {} func (*RetentionPolicy_PolicyKeepAll) isRetentionPolicy_Policy() {} +type ForgetPolicy struct { + state protoimpl.MessageState `protogen:"open.v1"` + Schedule *Schedule `protobuf:"bytes,1,opt,name=schedule,proto3" json:"schedule,omitempty"` + Retention *RetentionPolicy `protobuf:"bytes,2,opt,name=retention,proto3" json:"retention,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ForgetPolicy) Reset() { + *x = ForgetPolicy{} + mi := &file_v1_config_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ForgetPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForgetPolicy) ProtoMessage() {} + +func (x *ForgetPolicy) ProtoReflect() protoreflect.Message { + mi := &file_v1_config_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForgetPolicy.ProtoReflect.Descriptor instead. +func (*ForgetPolicy) Descriptor() ([]byte, []int) { + return file_v1_config_proto_rawDescGZIP(), []int{6} +} + +func (x *ForgetPolicy) GetSchedule() *Schedule { + if x != nil { + return x.Schedule + } + return nil +} + +func (x *ForgetPolicy) GetRetention() *RetentionPolicy { + if x != nil { + return x.Retention + } + return nil +} + type PrunePolicy struct { state protoimpl.MessageState `protogen:"open.v1"` Schedule *Schedule `protobuf:"bytes,2,opt,name=schedule,proto3" json:"schedule,omitempty"` @@ -998,7 +1087,7 @@ type PrunePolicy struct { func (x *PrunePolicy) Reset() { *x = PrunePolicy{} - mi := &file_v1_config_proto_msgTypes[6] + mi := &file_v1_config_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1010,7 +1099,7 @@ func (x *PrunePolicy) String() string { func (*PrunePolicy) ProtoMessage() {} func (x *PrunePolicy) ProtoReflect() protoreflect.Message { - mi := &file_v1_config_proto_msgTypes[6] + mi := &file_v1_config_proto_msgTypes[7] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1023,7 +1112,7 @@ func (x *PrunePolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use PrunePolicy.ProtoReflect.Descriptor instead. func (*PrunePolicy) Descriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{6} + return file_v1_config_proto_rawDescGZIP(), []int{7} } func (x *PrunePolicy) GetSchedule() *Schedule { @@ -1061,7 +1150,7 @@ type CheckPolicy struct { func (x *CheckPolicy) Reset() { *x = CheckPolicy{} - mi := &file_v1_config_proto_msgTypes[7] + mi := &file_v1_config_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1073,7 +1162,7 @@ func (x *CheckPolicy) String() string { func (*CheckPolicy) ProtoMessage() {} func (x *CheckPolicy) ProtoReflect() protoreflect.Message { - mi := &file_v1_config_proto_msgTypes[7] + mi := &file_v1_config_proto_msgTypes[8] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1086,7 +1175,7 @@ func (x *CheckPolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use CheckPolicy.ProtoReflect.Descriptor instead. func (*CheckPolicy) Descriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{7} + return file_v1_config_proto_rawDescGZIP(), []int{8} } func (x *CheckPolicy) GetSchedule() *Schedule { @@ -1153,7 +1242,7 @@ type Schedule struct { func (x *Schedule) Reset() { *x = Schedule{} - mi := &file_v1_config_proto_msgTypes[8] + mi := &file_v1_config_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1165,7 +1254,7 @@ func (x *Schedule) String() string { func (*Schedule) ProtoMessage() {} func (x *Schedule) ProtoReflect() protoreflect.Message { - mi := &file_v1_config_proto_msgTypes[8] + mi := &file_v1_config_proto_msgTypes[9] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1178,7 +1267,7 @@ func (x *Schedule) ProtoReflect() protoreflect.Message { // Deprecated: Use Schedule.ProtoReflect.Descriptor instead. func (*Schedule) Descriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{8} + return file_v1_config_proto_rawDescGZIP(), []int{9} } func (x *Schedule) GetSchedule() isSchedule_Schedule { @@ -1280,7 +1369,7 @@ type Hook struct { func (x *Hook) Reset() { *x = Hook{} - mi := &file_v1_config_proto_msgTypes[9] + mi := &file_v1_config_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1292,7 +1381,7 @@ func (x *Hook) String() string { func (*Hook) ProtoMessage() {} func (x *Hook) ProtoReflect() protoreflect.Message { - mi := &file_v1_config_proto_msgTypes[9] + mi := &file_v1_config_proto_msgTypes[10] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1305,7 +1394,7 @@ func (x *Hook) ProtoReflect() protoreflect.Message { // Deprecated: Use Hook.ProtoReflect.Descriptor instead. func (*Hook) Descriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{9} + return file_v1_config_proto_rawDescGZIP(), []int{10} } func (x *Hook) GetConditions() []Hook_Condition { @@ -1463,7 +1552,7 @@ type Auth struct { func (x *Auth) Reset() { *x = Auth{} - mi := &file_v1_config_proto_msgTypes[10] + mi := &file_v1_config_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1475,7 +1564,7 @@ func (x *Auth) String() string { func (*Auth) ProtoMessage() {} func (x *Auth) ProtoReflect() protoreflect.Message { - mi := &file_v1_config_proto_msgTypes[10] + mi := &file_v1_config_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1488,7 +1577,7 @@ func (x *Auth) ProtoReflect() protoreflect.Message { // Deprecated: Use Auth.ProtoReflect.Descriptor instead. func (*Auth) Descriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{10} + return file_v1_config_proto_rawDescGZIP(), []int{11} } func (x *Auth) GetDisabled() bool { @@ -1518,7 +1607,7 @@ type User struct { func (x *User) Reset() { *x = User{} - mi := &file_v1_config_proto_msgTypes[11] + mi := &file_v1_config_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1530,7 +1619,7 @@ func (x *User) String() string { func (*User) ProtoMessage() {} func (x *User) ProtoReflect() protoreflect.Message { - mi := &file_v1_config_proto_msgTypes[11] + mi := &file_v1_config_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1543,7 +1632,7 @@ func (x *User) ProtoReflect() protoreflect.Message { // Deprecated: Use User.ProtoReflect.Descriptor instead. func (*User) Descriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{11} + return file_v1_config_proto_rawDescGZIP(), []int{12} } func (x *User) GetName() string { @@ -1580,20 +1669,20 @@ type User_PasswordBcrypt struct { func (*User_PasswordBcrypt) isUser_Password() {} type Multihost_Peer struct { - state protoimpl.MessageState `protogen:"open.v1"` - InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` // a human readable name for the peer, typically the same as its instance ID. - Keyid string `protobuf:"bytes,2,opt,name=keyid,json=keyId,proto3" json:"keyid,omitempty"` // the key ID of the peer. This must match the sha256 of the public key the client provides in handshake. - KeyidVerified bool `protobuf:"varint,3,opt,name=keyid_verified,json=keyIdVerified,proto3" json:"keyid_verified,omitempty"` // marks whether the key ID was visually verified by the user, this must be done for authorized clients. Not required for known hosts but recommended. - Permissions []*Multihost_Permission `protobuf:"bytes,5,rep,name=permissions,proto3" json:"permissions,omitempty"` // permissions granted to this peer. + state protoimpl.MessageState `protogen:"open.v1"` + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` // a human readable name for the peer, typically the same as its instance ID. + Keyid string `protobuf:"bytes,2,opt,name=keyid,json=keyId,proto3" json:"keyid,omitempty"` // the key ID of the peer. This must match the sha256 of the public key the client provides in handshake. + Permissions []*Multihost_Permission `protobuf:"bytes,5,rep,name=permissions,proto3" json:"permissions,omitempty"` // permissions granted to this peer. // Known host only fields - InstanceUrl string `protobuf:"bytes,4,opt,name=instance_url,json=instanceUrl,proto3" json:"instance_url,omitempty"` // instance URL, required for a known host. Otherwise meaningless. - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + InstanceUrl string `protobuf:"bytes,4,opt,name=instance_url,json=instanceUrl,proto3" json:"instance_url,omitempty"` // instance URL, required for a known host. Otherwise meaningless. + InitialPairingSecret string `protobuf:"bytes,6,opt,name=initial_pairing_secret,json=initialPairingSecret,proto3" json:"initial_pairing_secret,omitempty"` // one-time pairing secret sent during first handshake to auto-authorize with the server. Cleared after successful pairing. + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Multihost_Peer) Reset() { *x = Multihost_Peer{} - mi := &file_v1_config_proto_msgTypes[12] + mi := &file_v1_config_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1605,7 +1694,7 @@ func (x *Multihost_Peer) String() string { func (*Multihost_Peer) ProtoMessage() {} func (x *Multihost_Peer) ProtoReflect() protoreflect.Message { - mi := &file_v1_config_proto_msgTypes[12] + mi := &file_v1_config_proto_msgTypes[13] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1635,13 +1724,6 @@ func (x *Multihost_Peer) GetKeyid() string { return "" } -func (x *Multihost_Peer) GetKeyidVerified() bool { - if x != nil { - return x.KeyidVerified - } - return false -} - func (x *Multihost_Peer) GetPermissions() []*Multihost_Permission { if x != nil { return x.Permissions @@ -1656,6 +1738,105 @@ func (x *Multihost_Peer) GetInstanceUrl() string { return "" } +func (x *Multihost_Peer) GetInitialPairingSecret() string { + if x != nil { + return x.InitialPairingSecret + } + return "" +} + +type Multihost_PairingToken struct { + state protoimpl.MessageState `protogen:"open.v1"` + Secret string `protobuf:"bytes,1,opt,name=secret,proto3" json:"secret,omitempty"` // the one-time secret used to validate the pairing request + Label string `protobuf:"bytes,2,opt,name=label,proto3" json:"label,omitempty"` // human-readable label for this token + CreatedAtUnix int64 `protobuf:"varint,3,opt,name=created_at_unix,json=createdAtUnix,proto3" json:"created_at_unix,omitempty"` // unix timestamp when the token was created + ExpiresAtUnix int64 `protobuf:"varint,4,opt,name=expires_at_unix,json=expiresAtUnix,proto3" json:"expires_at_unix,omitempty"` // unix timestamp when the token expires + MaxUses int32 `protobuf:"varint,5,opt,name=max_uses,json=maxUses,proto3" json:"max_uses,omitempty"` // maximum number of clients that can pair with this token, 0 means unlimited + Uses int32 `protobuf:"varint,6,opt,name=uses,proto3" json:"uses,omitempty"` // number of times this token has been used + Permissions []*Multihost_Permission `protobuf:"bytes,7,rep,name=permissions,proto3" json:"permissions,omitempty"` // permissions granted to clients that pair with this token + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Multihost_PairingToken) Reset() { + *x = Multihost_PairingToken{} + mi := &file_v1_config_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Multihost_PairingToken) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Multihost_PairingToken) ProtoMessage() {} + +func (x *Multihost_PairingToken) ProtoReflect() protoreflect.Message { + mi := &file_v1_config_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Multihost_PairingToken.ProtoReflect.Descriptor instead. +func (*Multihost_PairingToken) Descriptor() ([]byte, []int) { + return file_v1_config_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *Multihost_PairingToken) GetSecret() string { + if x != nil { + return x.Secret + } + return "" +} + +func (x *Multihost_PairingToken) GetLabel() string { + if x != nil { + return x.Label + } + return "" +} + +func (x *Multihost_PairingToken) GetCreatedAtUnix() int64 { + if x != nil { + return x.CreatedAtUnix + } + return 0 +} + +func (x *Multihost_PairingToken) GetExpiresAtUnix() int64 { + if x != nil { + return x.ExpiresAtUnix + } + return 0 +} + +func (x *Multihost_PairingToken) GetMaxUses() int32 { + if x != nil { + return x.MaxUses + } + return 0 +} + +func (x *Multihost_PairingToken) GetUses() int32 { + if x != nil { + return x.Uses + } + return 0 +} + +func (x *Multihost_PairingToken) GetPermissions() []*Multihost_Permission { + if x != nil { + return x.Permissions + } + return nil +} + type Multihost_Permission struct { state protoimpl.MessageState `protogen:"open.v1"` // Scopes are any of '*', 'repo:' or 'plan:','-repo:','-plan:'. @@ -1669,7 +1850,7 @@ type Multihost_Permission struct { func (x *Multihost_Permission) Reset() { *x = Multihost_Permission{} - mi := &file_v1_config_proto_msgTypes[13] + mi := &file_v1_config_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1681,7 +1862,7 @@ func (x *Multihost_Permission) String() string { func (*Multihost_Permission) ProtoMessage() {} func (x *Multihost_Permission) ProtoReflect() protoreflect.Message { - mi := &file_v1_config_proto_msgTypes[13] + mi := &file_v1_config_proto_msgTypes[15] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1694,7 +1875,7 @@ func (x *Multihost_Permission) ProtoReflect() protoreflect.Message { // Deprecated: Use Multihost_Permission.ProtoReflect.Descriptor instead. func (*Multihost_Permission) Descriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{1, 1} + return file_v1_config_proto_rawDescGZIP(), []int{1, 2} } func (x *Multihost_Permission) GetType() Multihost_Permission_Type { @@ -1725,7 +1906,7 @@ type RetentionPolicy_TimeBucketedCounts struct { func (x *RetentionPolicy_TimeBucketedCounts) Reset() { *x = RetentionPolicy_TimeBucketedCounts{} - mi := &file_v1_config_proto_msgTypes[14] + mi := &file_v1_config_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1737,7 +1918,7 @@ func (x *RetentionPolicy_TimeBucketedCounts) String() string { func (*RetentionPolicy_TimeBucketedCounts) ProtoMessage() {} func (x *RetentionPolicy_TimeBucketedCounts) ProtoReflect() protoreflect.Message { - mi := &file_v1_config_proto_msgTypes[14] + mi := &file_v1_config_proto_msgTypes[16] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1804,7 +1985,7 @@ type Hook_Command struct { func (x *Hook_Command) Reset() { *x = Hook_Command{} - mi := &file_v1_config_proto_msgTypes[15] + mi := &file_v1_config_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1816,7 +1997,7 @@ func (x *Hook_Command) String() string { func (*Hook_Command) ProtoMessage() {} func (x *Hook_Command) ProtoReflect() protoreflect.Message { - mi := &file_v1_config_proto_msgTypes[15] + mi := &file_v1_config_proto_msgTypes[17] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1829,7 +2010,7 @@ func (x *Hook_Command) ProtoReflect() protoreflect.Message { // Deprecated: Use Hook_Command.ProtoReflect.Descriptor instead. func (*Hook_Command) Descriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{9, 0} + return file_v1_config_proto_rawDescGZIP(), []int{10, 0} } func (x *Hook_Command) GetCommand() string { @@ -1850,7 +2031,7 @@ type Hook_Webhook struct { func (x *Hook_Webhook) Reset() { *x = Hook_Webhook{} - mi := &file_v1_config_proto_msgTypes[16] + mi := &file_v1_config_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1862,7 +2043,7 @@ func (x *Hook_Webhook) String() string { func (*Hook_Webhook) ProtoMessage() {} func (x *Hook_Webhook) ProtoReflect() protoreflect.Message { - mi := &file_v1_config_proto_msgTypes[16] + mi := &file_v1_config_proto_msgTypes[18] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1875,7 +2056,7 @@ func (x *Hook_Webhook) ProtoReflect() protoreflect.Message { // Deprecated: Use Hook_Webhook.ProtoReflect.Descriptor instead. func (*Hook_Webhook) Descriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{9, 1} + return file_v1_config_proto_rawDescGZIP(), []int{10, 1} } func (x *Hook_Webhook) GetWebhookUrl() string { @@ -1909,7 +2090,7 @@ type Hook_Discord struct { func (x *Hook_Discord) Reset() { *x = Hook_Discord{} - mi := &file_v1_config_proto_msgTypes[17] + mi := &file_v1_config_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1921,7 +2102,7 @@ func (x *Hook_Discord) String() string { func (*Hook_Discord) ProtoMessage() {} func (x *Hook_Discord) ProtoReflect() protoreflect.Message { - mi := &file_v1_config_proto_msgTypes[17] + mi := &file_v1_config_proto_msgTypes[19] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1934,7 +2115,7 @@ func (x *Hook_Discord) ProtoReflect() protoreflect.Message { // Deprecated: Use Hook_Discord.ProtoReflect.Descriptor instead. func (*Hook_Discord) Descriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{9, 2} + return file_v1_config_proto_rawDescGZIP(), []int{10, 2} } func (x *Hook_Discord) GetWebhookUrl() string { @@ -1964,7 +2145,7 @@ type Hook_Gotify struct { func (x *Hook_Gotify) Reset() { *x = Hook_Gotify{} - mi := &file_v1_config_proto_msgTypes[18] + mi := &file_v1_config_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1976,7 +2157,7 @@ func (x *Hook_Gotify) String() string { func (*Hook_Gotify) ProtoMessage() {} func (x *Hook_Gotify) ProtoReflect() protoreflect.Message { - mi := &file_v1_config_proto_msgTypes[18] + mi := &file_v1_config_proto_msgTypes[20] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1989,7 +2170,7 @@ func (x *Hook_Gotify) ProtoReflect() protoreflect.Message { // Deprecated: Use Hook_Gotify.ProtoReflect.Descriptor instead. func (*Hook_Gotify) Descriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{9, 3} + return file_v1_config_proto_rawDescGZIP(), []int{10, 3} } func (x *Hook_Gotify) GetBaseUrl() string { @@ -2037,7 +2218,7 @@ type Hook_Slack struct { func (x *Hook_Slack) Reset() { *x = Hook_Slack{} - mi := &file_v1_config_proto_msgTypes[19] + mi := &file_v1_config_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2049,7 +2230,7 @@ func (x *Hook_Slack) String() string { func (*Hook_Slack) ProtoMessage() {} func (x *Hook_Slack) ProtoReflect() protoreflect.Message { - mi := &file_v1_config_proto_msgTypes[19] + mi := &file_v1_config_proto_msgTypes[21] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2062,7 +2243,7 @@ func (x *Hook_Slack) ProtoReflect() protoreflect.Message { // Deprecated: Use Hook_Slack.ProtoReflect.Descriptor instead. func (*Hook_Slack) Descriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{9, 4} + return file_v1_config_proto_rawDescGZIP(), []int{10, 4} } func (x *Hook_Slack) GetWebhookUrl() string { @@ -2089,7 +2270,7 @@ type Hook_Shoutrrr struct { func (x *Hook_Shoutrrr) Reset() { *x = Hook_Shoutrrr{} - mi := &file_v1_config_proto_msgTypes[20] + mi := &file_v1_config_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2101,7 +2282,7 @@ func (x *Hook_Shoutrrr) String() string { func (*Hook_Shoutrrr) ProtoMessage() {} func (x *Hook_Shoutrrr) ProtoReflect() protoreflect.Message { - mi := &file_v1_config_proto_msgTypes[20] + mi := &file_v1_config_proto_msgTypes[22] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2114,7 +2295,7 @@ func (x *Hook_Shoutrrr) ProtoReflect() protoreflect.Message { // Deprecated: Use Hook_Shoutrrr.ProtoReflect.Descriptor instead. func (*Hook_Shoutrrr) Descriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{9, 5} + return file_v1_config_proto_rawDescGZIP(), []int{10, 5} } func (x *Hook_Shoutrrr) GetShoutrrrUrl() string { @@ -2141,7 +2322,7 @@ type Hook_Healthchecks struct { func (x *Hook_Healthchecks) Reset() { *x = Hook_Healthchecks{} - mi := &file_v1_config_proto_msgTypes[21] + mi := &file_v1_config_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2153,7 +2334,7 @@ func (x *Hook_Healthchecks) String() string { func (*Hook_Healthchecks) ProtoMessage() {} func (x *Hook_Healthchecks) ProtoReflect() protoreflect.Message { - mi := &file_v1_config_proto_msgTypes[21] + mi := &file_v1_config_proto_msgTypes[23] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2166,7 +2347,7 @@ func (x *Hook_Healthchecks) ProtoReflect() protoreflect.Message { // Deprecated: Use Hook_Healthchecks.ProtoReflect.Descriptor instead. func (*Hook_Healthchecks) Descriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{9, 6} + return file_v1_config_proto_rawDescGZIP(), []int{10, 6} } func (x *Hook_Healthchecks) GetWebhookUrl() string { @@ -2194,7 +2375,7 @@ type Hook_Telegram struct { func (x *Hook_Telegram) Reset() { *x = Hook_Telegram{} - mi := &file_v1_config_proto_msgTypes[22] + mi := &file_v1_config_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2206,7 +2387,7 @@ func (x *Hook_Telegram) String() string { func (*Hook_Telegram) ProtoMessage() {} func (x *Hook_Telegram) ProtoReflect() protoreflect.Message { - mi := &file_v1_config_proto_msgTypes[22] + mi := &file_v1_config_proto_msgTypes[24] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2219,7 +2400,7 @@ func (x *Hook_Telegram) ProtoReflect() protoreflect.Message { // Deprecated: Use Hook_Telegram.ProtoReflect.Descriptor instead. func (*Hook_Telegram) Descriptor() ([]byte, []int) { - return file_v1_config_proto_rawDescGZIP(), []int{9, 7} + return file_v1_config_proto_rawDescGZIP(), []int{10, 7} } func (x *Hook_Telegram) GetBotToken() string { @@ -2255,28 +2436,38 @@ const file_v1_config_proto_rawDesc = "" + "\x05repos\x18\x03 \x03(\v2\b.v1.RepoR\x05repos\x12\x1e\n" + "\x05plans\x18\x04 \x03(\v2\b.v1.PlanR\x05plans\x12\x1c\n" + "\x04auth\x18\x05 \x01(\v2\b.v1.AuthR\x04auth\x12&\n" + - "\tmultihost\x18\a \x01(\v2\r.v1.MultihostR\x04sync\"\xcd\x04\n" + + "\tmultihost\x18\a \x01(\v2\r.v1.MultihostR\x04sync\"\xc5\a\n" + "\tMultihost\x12*\n" + "\bidentity\x18\x01 \x01(\v2\x0e.v1.PrivateKeyR\bidentity\x123\n" + "\vknown_hosts\x18\x02 \x03(\v2\x12.v1.Multihost.PeerR\n" + "knownHosts\x12A\n" + - "\x12authorized_clients\x18\x03 \x03(\v2\x12.v1.Multihost.PeerR\x11authorizedClients\x1a\xc3\x01\n" + + "\x12authorized_clients\x18\x03 \x03(\v2\x12.v1.Multihost.PeerR\x11authorizedClients\x12A\n" + + "\x0epairing_tokens\x18\x04 \x03(\v2\x1a.v1.Multihost.PairingTokenR\rpairingTokens\x1a\xd8\x01\n" + "\x04Peer\x12\x1f\n" + "\vinstance_id\x18\x01 \x01(\tR\n" + "instanceId\x12\x14\n" + - "\x05keyid\x18\x02 \x01(\tR\x05keyId\x12%\n" + - "\x0ekeyid_verified\x18\x03 \x01(\bR\rkeyIdVerified\x12:\n" + + "\x05keyid\x18\x02 \x01(\tR\x05keyId\x12:\n" + "\vpermissions\x18\x05 \x03(\v2\x18.v1.Multihost.PermissionR\vpermissions\x12!\n" + - "\finstance_url\x18\x04 \x01(\tR\vinstanceUrl\x1a\xd5\x01\n" + + "\finstance_url\x18\x04 \x01(\tR\vinstanceUrl\x124\n" + + "\x16initial_pairing_secret\x18\x06 \x01(\tR\x14initialPairingSecretJ\x04\b\x03\x10\x04\x1a\xf7\x01\n" + + "\fPairingToken\x12\x16\n" + + "\x06secret\x18\x01 \x01(\tR\x06secret\x12\x14\n" + + "\x05label\x18\x02 \x01(\tR\x05label\x12&\n" + + "\x0fcreated_at_unix\x18\x03 \x01(\x03R\rcreatedAtUnix\x12&\n" + + "\x0fexpires_at_unix\x18\x04 \x01(\x03R\rexpiresAtUnix\x12\x19\n" + + "\bmax_uses\x18\x05 \x01(\x05R\amaxUses\x12\x12\n" + + "\x04uses\x18\x06 \x01(\x05R\x04uses\x12:\n" + + "\vpermissions\x18\a \x03(\v2\x18.v1.Multihost.PermissionR\vpermissions\x1a\xfb\x01\n" + "\n" + "Permission\x121\n" + "\x04type\x18\x01 \x01(\x0e2\x1d.v1.Multihost.Permission.TypeR\x04type\x12\x16\n" + - "\x06scopes\x18\x02 \x03(\tR\x06scopes\"|\n" + + "\x06scopes\x18\x02 \x03(\tR\x06scopes\"\xa1\x01\n" + "\x04Type\x12\x16\n" + "\x12PERMISSION_UNKNOWN\x10\x00\x12\x1e\n" + "\x1aPERMISSION_READ_OPERATIONS\x10\x01\x12\x1a\n" + "\x16PERMISSION_READ_CONFIG\x10\x02\x12 \n" + - "\x1cPERMISSION_READ_WRITE_CONFIG\x10\x03\"\x8c\x03\n" + + "\x1cPERMISSION_READ_WRITE_CONFIG\x10\x03\x12#\n" + + "\x1fPERMISSION_RECEIVE_SHARED_REPOS\x10\x04\"\x89\x04\n" + "\x04Repo\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x10\n" + "\x03uri\x18\x02 \x01(\tR\x03uri\x12\x12\n" + @@ -2291,7 +2482,10 @@ const file_v1_config_proto_rawDesc = "" + "autoUnlock\x12'\n" + "\x0fauto_initialize\x18\f \x01(\bR\x0eautoInitialize\x128\n" + "\x0ecommand_prefix\x18\n" + - " \x01(\v2\x11.v1.CommandPrefixR\rcommandPrefix\"\xd9\x02\n" + + " \x01(\v2\x11.v1.CommandPrefixR\rcommandPrefix\x12\x16\n" + + "\x06shared\x18\r \x01(\bR\x06shared\x12,\n" + + "\x12origin_instance_id\x18\x0e \x01(\tR\x10originInstanceId\x125\n" + + "\rforget_policy\x18\x0f \x01(\v2\x10.v1.ForgetPolicyR\fforgetPolicy\"\xd9\x02\n" + "\x04Plan\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + "\x04repo\x18\x02 \x01(\tR\x04repo\x12\x14\n" + @@ -2329,7 +2523,10 @@ const file_v1_config_proto_rawDesc = "" + "\amonthly\x18\x04 \x01(\x05R\amonthly\x12\x16\n" + "\x06yearly\x18\x05 \x01(\x05R\x06yearly\x12\x1e\n" + "\vkeep_last_n\x18\x06 \x01(\x05R\tkeepLastNB\b\n" + - "\x06policy\"\x8f\x01\n" + + "\x06policy\"k\n" + + "\fForgetPolicy\x12(\n" + + "\bschedule\x18\x01 \x01(\v2\f.v1.ScheduleR\bschedule\x121\n" + + "\tretention\x18\x02 \x01(\v2\x13.v1.RetentionPolicyR\tretention\"\x8f\x01\n" + "\vPrunePolicy\x12(\n" + "\bschedule\x18\x02 \x01(\v2\f.v1.ScheduleR\bschedule\x12(\n" + "\x10max_unused_bytes\x18\x03 \x01(\x03R\x0emaxUnusedBytes\x12,\n" + @@ -2449,7 +2646,7 @@ func file_v1_config_proto_rawDescGZIP() []byte { } var file_v1_config_proto_enumTypes = make([]protoimpl.EnumInfo, 7) -var file_v1_config_proto_msgTypes = make([]protoimpl.MessageInfo, 23) +var file_v1_config_proto_msgTypes = make([]protoimpl.MessageInfo, 25) var file_v1_config_proto_goTypes = []any{ (Multihost_Permission_Type)(0), // 0: v1.Multihost.Permission.Type (CommandPrefix_IONiceLevel)(0), // 1: v1.CommandPrefix.IONiceLevel @@ -2464,65 +2661,72 @@ var file_v1_config_proto_goTypes = []any{ (*Plan)(nil), // 10: v1.Plan (*CommandPrefix)(nil), // 11: v1.CommandPrefix (*RetentionPolicy)(nil), // 12: v1.RetentionPolicy - (*PrunePolicy)(nil), // 13: v1.PrunePolicy - (*CheckPolicy)(nil), // 14: v1.CheckPolicy - (*Schedule)(nil), // 15: v1.Schedule - (*Hook)(nil), // 16: v1.Hook - (*Auth)(nil), // 17: v1.Auth - (*User)(nil), // 18: v1.User - (*Multihost_Peer)(nil), // 19: v1.Multihost.Peer - (*Multihost_Permission)(nil), // 20: v1.Multihost.Permission - (*RetentionPolicy_TimeBucketedCounts)(nil), // 21: v1.RetentionPolicy.TimeBucketedCounts - (*Hook_Command)(nil), // 22: v1.Hook.Command - (*Hook_Webhook)(nil), // 23: v1.Hook.Webhook - (*Hook_Discord)(nil), // 24: v1.Hook.Discord - (*Hook_Gotify)(nil), // 25: v1.Hook.Gotify - (*Hook_Slack)(nil), // 26: v1.Hook.Slack - (*Hook_Shoutrrr)(nil), // 27: v1.Hook.Shoutrrr - (*Hook_Healthchecks)(nil), // 28: v1.Hook.Healthchecks - (*Hook_Telegram)(nil), // 29: v1.Hook.Telegram - (*PrivateKey)(nil), // 30: v1.PrivateKey + (*ForgetPolicy)(nil), // 13: v1.ForgetPolicy + (*PrunePolicy)(nil), // 14: v1.PrunePolicy + (*CheckPolicy)(nil), // 15: v1.CheckPolicy + (*Schedule)(nil), // 16: v1.Schedule + (*Hook)(nil), // 17: v1.Hook + (*Auth)(nil), // 18: v1.Auth + (*User)(nil), // 19: v1.User + (*Multihost_Peer)(nil), // 20: v1.Multihost.Peer + (*Multihost_PairingToken)(nil), // 21: v1.Multihost.PairingToken + (*Multihost_Permission)(nil), // 22: v1.Multihost.Permission + (*RetentionPolicy_TimeBucketedCounts)(nil), // 23: v1.RetentionPolicy.TimeBucketedCounts + (*Hook_Command)(nil), // 24: v1.Hook.Command + (*Hook_Webhook)(nil), // 25: v1.Hook.Webhook + (*Hook_Discord)(nil), // 26: v1.Hook.Discord + (*Hook_Gotify)(nil), // 27: v1.Hook.Gotify + (*Hook_Slack)(nil), // 28: v1.Hook.Slack + (*Hook_Shoutrrr)(nil), // 29: v1.Hook.Shoutrrr + (*Hook_Healthchecks)(nil), // 30: v1.Hook.Healthchecks + (*Hook_Telegram)(nil), // 31: v1.Hook.Telegram + (*PrivateKey)(nil), // 32: v1.PrivateKey } var file_v1_config_proto_depIdxs = []int32{ 9, // 0: v1.Config.repos:type_name -> v1.Repo 10, // 1: v1.Config.plans:type_name -> v1.Plan - 17, // 2: v1.Config.auth:type_name -> v1.Auth + 18, // 2: v1.Config.auth:type_name -> v1.Auth 8, // 3: v1.Config.multihost:type_name -> v1.Multihost - 30, // 4: v1.Multihost.identity:type_name -> v1.PrivateKey - 19, // 5: v1.Multihost.known_hosts:type_name -> v1.Multihost.Peer - 19, // 6: v1.Multihost.authorized_clients:type_name -> v1.Multihost.Peer - 13, // 7: v1.Repo.prune_policy:type_name -> v1.PrunePolicy - 14, // 8: v1.Repo.check_policy:type_name -> v1.CheckPolicy - 16, // 9: v1.Repo.hooks:type_name -> v1.Hook - 11, // 10: v1.Repo.command_prefix:type_name -> v1.CommandPrefix - 15, // 11: v1.Plan.schedule:type_name -> v1.Schedule - 12, // 12: v1.Plan.retention:type_name -> v1.RetentionPolicy - 16, // 13: v1.Plan.hooks:type_name -> v1.Hook - 1, // 14: v1.CommandPrefix.io_nice:type_name -> v1.CommandPrefix.IONiceLevel - 2, // 15: v1.CommandPrefix.cpu_nice:type_name -> v1.CommandPrefix.CPUNiceLevel - 21, // 16: v1.RetentionPolicy.policy_time_bucketed:type_name -> v1.RetentionPolicy.TimeBucketedCounts - 15, // 17: v1.PrunePolicy.schedule:type_name -> v1.Schedule - 15, // 18: v1.CheckPolicy.schedule:type_name -> v1.Schedule - 3, // 19: v1.Schedule.clock:type_name -> v1.Schedule.Clock - 4, // 20: v1.Hook.conditions:type_name -> v1.Hook.Condition - 5, // 21: v1.Hook.on_error:type_name -> v1.Hook.OnError - 22, // 22: v1.Hook.action_command:type_name -> v1.Hook.Command - 23, // 23: v1.Hook.action_webhook:type_name -> v1.Hook.Webhook - 24, // 24: v1.Hook.action_discord:type_name -> v1.Hook.Discord - 25, // 25: v1.Hook.action_gotify:type_name -> v1.Hook.Gotify - 26, // 26: v1.Hook.action_slack:type_name -> v1.Hook.Slack - 27, // 27: v1.Hook.action_shoutrrr:type_name -> v1.Hook.Shoutrrr - 28, // 28: v1.Hook.action_healthchecks:type_name -> v1.Hook.Healthchecks - 29, // 29: v1.Hook.action_telegram:type_name -> v1.Hook.Telegram - 18, // 30: v1.Auth.users:type_name -> v1.User - 20, // 31: v1.Multihost.Peer.permissions:type_name -> v1.Multihost.Permission - 0, // 32: v1.Multihost.Permission.type:type_name -> v1.Multihost.Permission.Type - 6, // 33: v1.Hook.Webhook.method:type_name -> v1.Hook.Webhook.Method - 34, // [34:34] is the sub-list for method output_type - 34, // [34:34] is the sub-list for method input_type - 34, // [34:34] is the sub-list for extension type_name - 34, // [34:34] is the sub-list for extension extendee - 0, // [0:34] is the sub-list for field type_name + 32, // 4: v1.Multihost.identity:type_name -> v1.PrivateKey + 20, // 5: v1.Multihost.known_hosts:type_name -> v1.Multihost.Peer + 20, // 6: v1.Multihost.authorized_clients:type_name -> v1.Multihost.Peer + 21, // 7: v1.Multihost.pairing_tokens:type_name -> v1.Multihost.PairingToken + 14, // 8: v1.Repo.prune_policy:type_name -> v1.PrunePolicy + 15, // 9: v1.Repo.check_policy:type_name -> v1.CheckPolicy + 17, // 10: v1.Repo.hooks:type_name -> v1.Hook + 11, // 11: v1.Repo.command_prefix:type_name -> v1.CommandPrefix + 13, // 12: v1.Repo.forget_policy:type_name -> v1.ForgetPolicy + 16, // 13: v1.Plan.schedule:type_name -> v1.Schedule + 12, // 14: v1.Plan.retention:type_name -> v1.RetentionPolicy + 17, // 15: v1.Plan.hooks:type_name -> v1.Hook + 1, // 16: v1.CommandPrefix.io_nice:type_name -> v1.CommandPrefix.IONiceLevel + 2, // 17: v1.CommandPrefix.cpu_nice:type_name -> v1.CommandPrefix.CPUNiceLevel + 23, // 18: v1.RetentionPolicy.policy_time_bucketed:type_name -> v1.RetentionPolicy.TimeBucketedCounts + 16, // 19: v1.ForgetPolicy.schedule:type_name -> v1.Schedule + 12, // 20: v1.ForgetPolicy.retention:type_name -> v1.RetentionPolicy + 16, // 21: v1.PrunePolicy.schedule:type_name -> v1.Schedule + 16, // 22: v1.CheckPolicy.schedule:type_name -> v1.Schedule + 3, // 23: v1.Schedule.clock:type_name -> v1.Schedule.Clock + 4, // 24: v1.Hook.conditions:type_name -> v1.Hook.Condition + 5, // 25: v1.Hook.on_error:type_name -> v1.Hook.OnError + 24, // 26: v1.Hook.action_command:type_name -> v1.Hook.Command + 25, // 27: v1.Hook.action_webhook:type_name -> v1.Hook.Webhook + 26, // 28: v1.Hook.action_discord:type_name -> v1.Hook.Discord + 27, // 29: v1.Hook.action_gotify:type_name -> v1.Hook.Gotify + 28, // 30: v1.Hook.action_slack:type_name -> v1.Hook.Slack + 29, // 31: v1.Hook.action_shoutrrr:type_name -> v1.Hook.Shoutrrr + 30, // 32: v1.Hook.action_healthchecks:type_name -> v1.Hook.Healthchecks + 31, // 33: v1.Hook.action_telegram:type_name -> v1.Hook.Telegram + 19, // 34: v1.Auth.users:type_name -> v1.User + 22, // 35: v1.Multihost.Peer.permissions:type_name -> v1.Multihost.Permission + 22, // 36: v1.Multihost.PairingToken.permissions:type_name -> v1.Multihost.Permission + 0, // 37: v1.Multihost.Permission.type:type_name -> v1.Multihost.Permission.Type + 6, // 38: v1.Hook.Webhook.method:type_name -> v1.Hook.Webhook.Method + 39, // [39:39] is the sub-list for method output_type + 39, // [39:39] is the sub-list for method input_type + 39, // [39:39] is the sub-list for extension type_name + 39, // [39:39] is the sub-list for extension extendee + 0, // [0:39] is the sub-list for field type_name } func init() { file_v1_config_proto_init() } @@ -2536,17 +2740,17 @@ func file_v1_config_proto_init() { (*RetentionPolicy_PolicyTimeBucketed)(nil), (*RetentionPolicy_PolicyKeepAll)(nil), } - file_v1_config_proto_msgTypes[7].OneofWrappers = []any{ + file_v1_config_proto_msgTypes[8].OneofWrappers = []any{ (*CheckPolicy_StructureOnly)(nil), (*CheckPolicy_ReadDataSubsetPercent)(nil), } - file_v1_config_proto_msgTypes[8].OneofWrappers = []any{ + file_v1_config_proto_msgTypes[9].OneofWrappers = []any{ (*Schedule_Disabled)(nil), (*Schedule_Cron)(nil), (*Schedule_MaxFrequencyDays)(nil), (*Schedule_MaxFrequencyHours)(nil), } - file_v1_config_proto_msgTypes[9].OneofWrappers = []any{ + file_v1_config_proto_msgTypes[10].OneofWrappers = []any{ (*Hook_ActionCommand)(nil), (*Hook_ActionWebhook)(nil), (*Hook_ActionDiscord)(nil), @@ -2556,7 +2760,7 @@ func file_v1_config_proto_init() { (*Hook_ActionHealthchecks)(nil), (*Hook_ActionTelegram)(nil), } - file_v1_config_proto_msgTypes[11].OneofWrappers = []any{ + file_v1_config_proto_msgTypes[12].OneofWrappers = []any{ (*User_PasswordBcrypt)(nil), } type x struct{} @@ -2565,7 +2769,7 @@ func file_v1_config_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_v1_config_proto_rawDesc), len(file_v1_config_proto_rawDesc)), NumEnums: 7, - NumMessages: 23, + NumMessages: 25, NumExtensions: 0, NumServices: 0, }, diff --git a/gen/go/v1/crypto.pb.go b/gen/go/v1/crypto.pb.go index ce064c0e..86c6d9d1 100644 --- a/gen/go/v1/crypto.pb.go +++ b/gen/go/v1/crypto.pb.go @@ -92,7 +92,7 @@ func (x *SignedMessage) GetTimestampMillis() int64 { type PublicKey struct { state protoimpl.MessageState `protogen:"open.v1"` Keyid string `protobuf:"bytes,1,opt,name=keyid,json=keyId,proto3" json:"keyid,omitempty"` // a unique identifier generated as the SHA256 of the public key. - Ed25519Pub string `protobuf:"bytes,2,opt,name=ed25519pub,proto3" json:"ed25519pub,omitempty"` + EcdsaPub string `protobuf:"bytes,2,opt,name=ecdsa_pub,json=ecdsaPub,proto3" json:"ecdsa_pub,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -134,9 +134,9 @@ func (x *PublicKey) GetKeyid() string { return "" } -func (x *PublicKey) GetEd25519Pub() string { +func (x *PublicKey) GetEcdsaPub() string { if x != nil { - return x.Ed25519Pub + return x.EcdsaPub } return "" } @@ -144,8 +144,8 @@ func (x *PublicKey) GetEd25519Pub() string { type PrivateKey struct { state protoimpl.MessageState `protogen:"open.v1"` Keyid string `protobuf:"bytes,1,opt,name=keyid,json=keyId,proto3" json:"keyid,omitempty"` // a unique identifier generated as the SHA256 of the public key - Ed25519Priv string `protobuf:"bytes,2,opt,name=ed25519priv,proto3" json:"ed25519priv,omitempty"` - Ed25519Pub string `protobuf:"bytes,3,opt,name=ed25519pub,proto3" json:"ed25519pub,omitempty"` + EcdsaPriv string `protobuf:"bytes,2,opt,name=ecdsa_priv,json=ecdsaPriv,proto3" json:"ecdsa_priv,omitempty"` + EcdsaPub string `protobuf:"bytes,3,opt,name=ecdsa_pub,json=ecdsaPub,proto3" json:"ecdsa_pub,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -187,16 +187,16 @@ func (x *PrivateKey) GetKeyid() string { return "" } -func (x *PrivateKey) GetEd25519Priv() string { +func (x *PrivateKey) GetEcdsaPriv() string { if x != nil { - return x.Ed25519Priv + return x.EcdsaPriv } return "" } -func (x *PrivateKey) GetEd25519Pub() string { +func (x *PrivateKey) GetEcdsaPub() string { if x != nil { - return x.Ed25519Pub + return x.EcdsaPub } return "" } @@ -210,19 +210,16 @@ const file_v1_crypto_proto_rawDesc = "" + "\x05keyid\x18\x01 \x01(\tR\x05keyid\x12\x18\n" + "\apayload\x18\x02 \x01(\fR\apayload\x12\x1c\n" + "\tsignature\x18\x03 \x01(\fR\tsignature\x12(\n" + - "\x0ftimestampMillis\x18\x04 \x01(\x03R\x0ftimestampMillis\"A\n" + + "\x0ftimestampMillis\x18\x04 \x01(\x03R\x0ftimestampMillis\">\n" + "\tPublicKey\x12\x14\n" + - "\x05keyid\x18\x01 \x01(\tR\x05keyId\x12\x1e\n" + - "\n" + - "ed25519pub\x18\x02 \x01(\tR\n" + - "ed25519pub\"d\n" + + "\x05keyid\x18\x01 \x01(\tR\x05keyId\x12\x1b\n" + + "\tecdsa_pub\x18\x02 \x01(\tR\becdsaPub\"^\n" + "\n" + "PrivateKey\x12\x14\n" + - "\x05keyid\x18\x01 \x01(\tR\x05keyId\x12 \n" + - "\ved25519priv\x18\x02 \x01(\tR\ved25519priv\x12\x1e\n" + + "\x05keyid\x18\x01 \x01(\tR\x05keyId\x12\x1d\n" + "\n" + - "ed25519pub\x18\x03 \x01(\tR\n" + - "ed25519pubB,Z*github.com/garethgeorge/backrest/gen/go/v1b\x06proto3" + "ecdsa_priv\x18\x02 \x01(\tR\tecdsaPriv\x12\x1b\n" + + "\tecdsa_pub\x18\x03 \x01(\tR\becdsaPubB,Z*github.com/garethgeorge/backrest/gen/go/v1b\x06proto3" var ( file_v1_crypto_proto_rawDescOnce sync.Once diff --git a/gen/go/v1/service.pb.go b/gen/go/v1/service.pb.go index ea9edb60..aa5849ea 100644 --- a/gen/go/v1/service.pb.go +++ b/gen/go/v1/service.pb.go @@ -1307,6 +1307,118 @@ func (x *SummaryDashboardResponse) GetDataPath() string { return "" } +type GeneratePairingTokenRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Label string `protobuf:"bytes,1,opt,name=label,proto3" json:"label,omitempty"` // human-readable label for the token + TtlSeconds int64 `protobuf:"varint,2,opt,name=ttl_seconds,json=ttlSeconds,proto3" json:"ttl_seconds,omitempty"` // time-to-live in seconds (e.g. 3600 for 1 hour) + MaxUses int32 `protobuf:"varint,3,opt,name=max_uses,json=maxUses,proto3" json:"max_uses,omitempty"` // max number of clients that can pair with this token, 0 for unlimited + Permissions []*Multihost_Permission `protobuf:"bytes,4,rep,name=permissions,proto3" json:"permissions,omitempty"` // permissions to grant to clients that pair with this token + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GeneratePairingTokenRequest) Reset() { + *x = GeneratePairingTokenRequest{} + mi := &file_v1_service_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GeneratePairingTokenRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratePairingTokenRequest) ProtoMessage() {} + +func (x *GeneratePairingTokenRequest) ProtoReflect() protoreflect.Message { + mi := &file_v1_service_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratePairingTokenRequest.ProtoReflect.Descriptor instead. +func (*GeneratePairingTokenRequest) Descriptor() ([]byte, []int) { + return file_v1_service_proto_rawDescGZIP(), []int{20} +} + +func (x *GeneratePairingTokenRequest) GetLabel() string { + if x != nil { + return x.Label + } + return "" +} + +func (x *GeneratePairingTokenRequest) GetTtlSeconds() int64 { + if x != nil { + return x.TtlSeconds + } + return 0 +} + +func (x *GeneratePairingTokenRequest) GetMaxUses() int32 { + if x != nil { + return x.MaxUses + } + return 0 +} + +func (x *GeneratePairingTokenRequest) GetPermissions() []*Multihost_Permission { + if x != nil { + return x.Permissions + } + return nil +} + +type GeneratePairingTokenResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"` // the opaque pairing token string: ":#" + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GeneratePairingTokenResponse) Reset() { + *x = GeneratePairingTokenResponse{} + mi := &file_v1_service_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GeneratePairingTokenResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratePairingTokenResponse) ProtoMessage() {} + +func (x *GeneratePairingTokenResponse) ProtoReflect() protoreflect.Message { + mi := &file_v1_service_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratePairingTokenResponse.ProtoReflect.Descriptor instead. +func (*GeneratePairingTokenResponse) Descriptor() ([]byte, []int) { + return file_v1_service_proto_rawDescGZIP(), []int{21} +} + +func (x *GeneratePairingTokenResponse) GetToken() string { + if x != nil { + return x.Token + } + return "" +} + type SummaryDashboardResponse_Summary struct { state protoimpl.MessageState `protogen:"open.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` @@ -1327,7 +1439,7 @@ type SummaryDashboardResponse_Summary struct { func (x *SummaryDashboardResponse_Summary) Reset() { *x = SummaryDashboardResponse_Summary{} - mi := &file_v1_service_proto_msgTypes[20] + mi := &file_v1_service_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1339,7 +1451,7 @@ func (x *SummaryDashboardResponse_Summary) String() string { func (*SummaryDashboardResponse_Summary) ProtoMessage() {} func (x *SummaryDashboardResponse_Summary) ProtoReflect() protoreflect.Message { - mi := &file_v1_service_proto_msgTypes[20] + mi := &file_v1_service_proto_msgTypes[22] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1445,7 +1557,7 @@ type SummaryDashboardResponse_BackupChart struct { func (x *SummaryDashboardResponse_BackupChart) Reset() { *x = SummaryDashboardResponse_BackupChart{} - mi := &file_v1_service_proto_msgTypes[21] + mi := &file_v1_service_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1457,7 +1569,7 @@ func (x *SummaryDashboardResponse_BackupChart) String() string { func (*SummaryDashboardResponse_BackupChart) ProtoMessage() {} func (x *SummaryDashboardResponse_BackupChart) ProtoReflect() protoreflect.Message { - mi := &file_v1_service_proto_msgTypes[21] + mi := &file_v1_service_proto_msgTypes[23] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1649,7 +1761,15 @@ const file_v1_service_proto_rawDesc = "" + "durationMs\x12+\n" + "\x06status\x18\x04 \x03(\x0e2\x13.v1.OperationStatusR\x06status\x12\x1f\n" + "\vbytes_added\x18\x05 \x03(\x03R\n" + - "bytesAdded2\x91\n" + + "bytesAdded\"\xab\x01\n" + + "\x1bGeneratePairingTokenRequest\x12\x14\n" + + "\x05label\x18\x01 \x01(\tR\x05label\x12\x1f\n" + + "\vttl_seconds\x18\x02 \x01(\x03R\n" + + "ttlSeconds\x12\x19\n" + + "\bmax_uses\x18\x03 \x01(\x05R\amaxUses\x12:\n" + + "\vpermissions\x18\x04 \x03(\v2\x18.v1.Multihost.PermissionR\vpermissions\"4\n" + + "\x1cGeneratePairingTokenResponse\x12\x14\n" + + "\x05token\x18\x01 \x01(\tR\x05token2\xee\n" + "\n" + "\bBackrest\x121\n" + "\tGetConfig\x12\x16.google.protobuf.Empty\x1a\n" + @@ -1680,7 +1800,8 @@ const file_v1_service_proto_rawDesc = "" + "\x0eGetDownloadURL\x12\x19.v1.GetDownloadURLRequest\x1a\x12.types.StringValue\"\x00\x12A\n" + "\fClearHistory\x12\x17.v1.ClearHistoryRequest\x1a\x16.google.protobuf.Empty\"\x00\x12;\n" + "\x10PathAutocomplete\x12\x12.types.StringValue\x1a\x11.types.StringList\"\x00\x12M\n" + - "\x13GetSummaryDashboard\x12\x16.google.protobuf.Empty\x1a\x1c.v1.SummaryDashboardResponse\"\x00B,Z*github.com/garethgeorge/backrest/gen/go/v1b\x06proto3" + "\x13GetSummaryDashboard\x12\x16.google.protobuf.Empty\x1a\x1c.v1.SummaryDashboardResponse\"\x00\x12[\n" + + "\x14GeneratePairingToken\x12\x1f.v1.GeneratePairingTokenRequest\x1a .v1.GeneratePairingTokenResponse\"\x00B,Z*github.com/garethgeorge/backrest/gen/go/v1b\x06proto3" var ( file_v1_service_proto_rawDescOnce sync.Once @@ -1695,7 +1816,7 @@ func file_v1_service_proto_rawDescGZIP() []byte { } var file_v1_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_v1_service_proto_msgTypes = make([]protoimpl.MessageInfo, 22) +var file_v1_service_proto_msgTypes = make([]protoimpl.MessageInfo, 24) var file_v1_service_proto_goTypes = []any{ (DoRepoTaskRequest_Task)(0), // 0: v1.DoRepoTaskRequest.Task (*BackupRequest)(nil), // 1: v1.BackupRequest @@ -1718,78 +1839,84 @@ var file_v1_service_proto_goTypes = []any{ (*LsEntry)(nil), // 18: v1.LsEntry (*RunCommandRequest)(nil), // 19: v1.RunCommandRequest (*SummaryDashboardResponse)(nil), // 20: v1.SummaryDashboardResponse - (*SummaryDashboardResponse_Summary)(nil), // 21: v1.SummaryDashboardResponse.Summary - (*SummaryDashboardResponse_BackupChart)(nil), // 22: v1.SummaryDashboardResponse.BackupChart - (*Repo)(nil), // 23: v1.Repo - (OperationStatus)(0), // 24: v1.OperationStatus - (*emptypb.Empty)(nil), // 25: google.protobuf.Empty - (*Config)(nil), // 26: v1.Config - (*types.StringValue)(nil), // 27: types.StringValue - (*types.Int64Value)(nil), // 28: types.Int64Value - (*OperationEvent)(nil), // 29: v1.OperationEvent - (*OperationList)(nil), // 30: v1.OperationList - (*ResticSnapshotList)(nil), // 31: v1.ResticSnapshotList - (*types.BytesValue)(nil), // 32: types.BytesValue - (*types.StringList)(nil), // 33: types.StringList + (*GeneratePairingTokenRequest)(nil), // 21: v1.GeneratePairingTokenRequest + (*GeneratePairingTokenResponse)(nil), // 22: v1.GeneratePairingTokenResponse + (*SummaryDashboardResponse_Summary)(nil), // 23: v1.SummaryDashboardResponse.Summary + (*SummaryDashboardResponse_BackupChart)(nil), // 24: v1.SummaryDashboardResponse.BackupChart + (*Repo)(nil), // 25: v1.Repo + (*Multihost_Permission)(nil), // 26: v1.Multihost.Permission + (OperationStatus)(0), // 27: v1.OperationStatus + (*emptypb.Empty)(nil), // 28: google.protobuf.Empty + (*Config)(nil), // 29: v1.Config + (*types.StringValue)(nil), // 30: types.StringValue + (*types.Int64Value)(nil), // 31: types.Int64Value + (*OperationEvent)(nil), // 32: v1.OperationEvent + (*OperationList)(nil), // 33: v1.OperationList + (*ResticSnapshotList)(nil), // 34: v1.ResticSnapshotList + (*types.BytesValue)(nil), // 35: types.BytesValue + (*types.StringList)(nil), // 36: types.StringList } var file_v1_service_proto_depIdxs = []int32{ - 23, // 0: v1.CheckRepoExistsRequest.repo:type_name -> v1.Repo - 23, // 1: v1.AddRepoRequest.repo:type_name -> v1.Repo + 25, // 0: v1.CheckRepoExistsRequest.repo:type_name -> v1.Repo + 25, // 1: v1.AddRepoRequest.repo:type_name -> v1.Repo 0, // 2: v1.DoRepoTaskRequest.task:type_name -> v1.DoRepoTaskRequest.Task 2, // 3: v1.ClearHistoryRequest.selector:type_name -> v1.OpSelector 2, // 4: v1.GetOperationsRequest.selector:type_name -> v1.OpSelector 18, // 5: v1.ListSnapshotFilesResponse.entries:type_name -> v1.LsEntry - 21, // 6: v1.SummaryDashboardResponse.repo_summaries:type_name -> v1.SummaryDashboardResponse.Summary - 21, // 7: v1.SummaryDashboardResponse.plan_summaries:type_name -> v1.SummaryDashboardResponse.Summary - 22, // 8: v1.SummaryDashboardResponse.Summary.recent_backups:type_name -> v1.SummaryDashboardResponse.BackupChart - 24, // 9: v1.SummaryDashboardResponse.BackupChart.status:type_name -> v1.OperationStatus - 25, // 10: v1.Backrest.GetConfig:input_type -> google.protobuf.Empty - 26, // 11: v1.Backrest.SetConfig:input_type -> v1.Config - 3, // 12: v1.Backrest.SetupSftp:input_type -> v1.SetupSftpRequest - 5, // 13: v1.Backrest.CheckRepoExists:input_type -> v1.CheckRepoExistsRequest - 7, // 14: v1.Backrest.AddRepo:input_type -> v1.AddRepoRequest - 27, // 15: v1.Backrest.RemoveRepo:input_type -> types.StringValue - 25, // 16: v1.Backrest.GetOperationEvents:input_type -> google.protobuf.Empty - 12, // 17: v1.Backrest.GetOperations:input_type -> v1.GetOperationsRequest - 11, // 18: v1.Backrest.ListSnapshots:input_type -> v1.ListSnapshotsRequest - 14, // 19: v1.Backrest.ListSnapshotFiles:input_type -> v1.ListSnapshotFilesRequest - 1, // 20: v1.Backrest.Backup:input_type -> v1.BackupRequest - 8, // 21: v1.Backrest.DoRepoTask:input_type -> v1.DoRepoTaskRequest - 10, // 22: v1.Backrest.Forget:input_type -> v1.ForgetRequest - 13, // 23: v1.Backrest.Restore:input_type -> v1.RestoreSnapshotRequest - 28, // 24: v1.Backrest.Cancel:input_type -> types.Int64Value - 16, // 25: v1.Backrest.GetLogs:input_type -> v1.LogDataRequest - 19, // 26: v1.Backrest.RunCommand:input_type -> v1.RunCommandRequest - 17, // 27: v1.Backrest.GetDownloadURL:input_type -> v1.GetDownloadURLRequest - 9, // 28: v1.Backrest.ClearHistory:input_type -> v1.ClearHistoryRequest - 27, // 29: v1.Backrest.PathAutocomplete:input_type -> types.StringValue - 25, // 30: v1.Backrest.GetSummaryDashboard:input_type -> google.protobuf.Empty - 26, // 31: v1.Backrest.GetConfig:output_type -> v1.Config - 26, // 32: v1.Backrest.SetConfig:output_type -> v1.Config - 4, // 33: v1.Backrest.SetupSftp:output_type -> v1.SetupSftpResponse - 6, // 34: v1.Backrest.CheckRepoExists:output_type -> v1.CheckRepoExistsResponse - 26, // 35: v1.Backrest.AddRepo:output_type -> v1.Config - 26, // 36: v1.Backrest.RemoveRepo:output_type -> v1.Config - 29, // 37: v1.Backrest.GetOperationEvents:output_type -> v1.OperationEvent - 30, // 38: v1.Backrest.GetOperations:output_type -> v1.OperationList - 31, // 39: v1.Backrest.ListSnapshots:output_type -> v1.ResticSnapshotList - 15, // 40: v1.Backrest.ListSnapshotFiles:output_type -> v1.ListSnapshotFilesResponse - 25, // 41: v1.Backrest.Backup:output_type -> google.protobuf.Empty - 25, // 42: v1.Backrest.DoRepoTask:output_type -> google.protobuf.Empty - 25, // 43: v1.Backrest.Forget:output_type -> google.protobuf.Empty - 25, // 44: v1.Backrest.Restore:output_type -> google.protobuf.Empty - 25, // 45: v1.Backrest.Cancel:output_type -> google.protobuf.Empty - 32, // 46: v1.Backrest.GetLogs:output_type -> types.BytesValue - 28, // 47: v1.Backrest.RunCommand:output_type -> types.Int64Value - 27, // 48: v1.Backrest.GetDownloadURL:output_type -> types.StringValue - 25, // 49: v1.Backrest.ClearHistory:output_type -> google.protobuf.Empty - 33, // 50: v1.Backrest.PathAutocomplete:output_type -> types.StringList - 20, // 51: v1.Backrest.GetSummaryDashboard:output_type -> v1.SummaryDashboardResponse - 31, // [31:52] is the sub-list for method output_type - 10, // [10:31] is the sub-list for method input_type - 10, // [10:10] is the sub-list for extension type_name - 10, // [10:10] is the sub-list for extension extendee - 0, // [0:10] is the sub-list for field type_name + 23, // 6: v1.SummaryDashboardResponse.repo_summaries:type_name -> v1.SummaryDashboardResponse.Summary + 23, // 7: v1.SummaryDashboardResponse.plan_summaries:type_name -> v1.SummaryDashboardResponse.Summary + 26, // 8: v1.GeneratePairingTokenRequest.permissions:type_name -> v1.Multihost.Permission + 24, // 9: v1.SummaryDashboardResponse.Summary.recent_backups:type_name -> v1.SummaryDashboardResponse.BackupChart + 27, // 10: v1.SummaryDashboardResponse.BackupChart.status:type_name -> v1.OperationStatus + 28, // 11: v1.Backrest.GetConfig:input_type -> google.protobuf.Empty + 29, // 12: v1.Backrest.SetConfig:input_type -> v1.Config + 3, // 13: v1.Backrest.SetupSftp:input_type -> v1.SetupSftpRequest + 5, // 14: v1.Backrest.CheckRepoExists:input_type -> v1.CheckRepoExistsRequest + 7, // 15: v1.Backrest.AddRepo:input_type -> v1.AddRepoRequest + 30, // 16: v1.Backrest.RemoveRepo:input_type -> types.StringValue + 28, // 17: v1.Backrest.GetOperationEvents:input_type -> google.protobuf.Empty + 12, // 18: v1.Backrest.GetOperations:input_type -> v1.GetOperationsRequest + 11, // 19: v1.Backrest.ListSnapshots:input_type -> v1.ListSnapshotsRequest + 14, // 20: v1.Backrest.ListSnapshotFiles:input_type -> v1.ListSnapshotFilesRequest + 1, // 21: v1.Backrest.Backup:input_type -> v1.BackupRequest + 8, // 22: v1.Backrest.DoRepoTask:input_type -> v1.DoRepoTaskRequest + 10, // 23: v1.Backrest.Forget:input_type -> v1.ForgetRequest + 13, // 24: v1.Backrest.Restore:input_type -> v1.RestoreSnapshotRequest + 31, // 25: v1.Backrest.Cancel:input_type -> types.Int64Value + 16, // 26: v1.Backrest.GetLogs:input_type -> v1.LogDataRequest + 19, // 27: v1.Backrest.RunCommand:input_type -> v1.RunCommandRequest + 17, // 28: v1.Backrest.GetDownloadURL:input_type -> v1.GetDownloadURLRequest + 9, // 29: v1.Backrest.ClearHistory:input_type -> v1.ClearHistoryRequest + 30, // 30: v1.Backrest.PathAutocomplete:input_type -> types.StringValue + 28, // 31: v1.Backrest.GetSummaryDashboard:input_type -> google.protobuf.Empty + 21, // 32: v1.Backrest.GeneratePairingToken:input_type -> v1.GeneratePairingTokenRequest + 29, // 33: v1.Backrest.GetConfig:output_type -> v1.Config + 29, // 34: v1.Backrest.SetConfig:output_type -> v1.Config + 4, // 35: v1.Backrest.SetupSftp:output_type -> v1.SetupSftpResponse + 6, // 36: v1.Backrest.CheckRepoExists:output_type -> v1.CheckRepoExistsResponse + 29, // 37: v1.Backrest.AddRepo:output_type -> v1.Config + 29, // 38: v1.Backrest.RemoveRepo:output_type -> v1.Config + 32, // 39: v1.Backrest.GetOperationEvents:output_type -> v1.OperationEvent + 33, // 40: v1.Backrest.GetOperations:output_type -> v1.OperationList + 34, // 41: v1.Backrest.ListSnapshots:output_type -> v1.ResticSnapshotList + 15, // 42: v1.Backrest.ListSnapshotFiles:output_type -> v1.ListSnapshotFilesResponse + 28, // 43: v1.Backrest.Backup:output_type -> google.protobuf.Empty + 28, // 44: v1.Backrest.DoRepoTask:output_type -> google.protobuf.Empty + 28, // 45: v1.Backrest.Forget:output_type -> google.protobuf.Empty + 28, // 46: v1.Backrest.Restore:output_type -> google.protobuf.Empty + 28, // 47: v1.Backrest.Cancel:output_type -> google.protobuf.Empty + 35, // 48: v1.Backrest.GetLogs:output_type -> types.BytesValue + 31, // 49: v1.Backrest.RunCommand:output_type -> types.Int64Value + 30, // 50: v1.Backrest.GetDownloadURL:output_type -> types.StringValue + 28, // 51: v1.Backrest.ClearHistory:output_type -> google.protobuf.Empty + 36, // 52: v1.Backrest.PathAutocomplete:output_type -> types.StringList + 20, // 53: v1.Backrest.GetSummaryDashboard:output_type -> v1.SummaryDashboardResponse + 22, // 54: v1.Backrest.GeneratePairingToken:output_type -> v1.GeneratePairingTokenResponse + 33, // [33:55] is the sub-list for method output_type + 11, // [11:33] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name } func init() { file_v1_service_proto_init() } @@ -1808,7 +1935,7 @@ func file_v1_service_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_v1_service_proto_rawDesc), len(file_v1_service_proto_rawDesc)), NumEnums: 1, - NumMessages: 22, + NumMessages: 24, NumExtensions: 0, NumServices: 1, }, diff --git a/gen/go/v1/service_grpc.pb.go b/gen/go/v1/service_grpc.pb.go index 59795253..cbea140c 100644 --- a/gen/go/v1/service_grpc.pb.go +++ b/gen/go/v1/service_grpc.pb.go @@ -21,27 +21,28 @@ import ( const _ = grpc.SupportPackageIsVersion9 const ( - Backrest_GetConfig_FullMethodName = "/v1.Backrest/GetConfig" - Backrest_SetConfig_FullMethodName = "/v1.Backrest/SetConfig" - Backrest_SetupSftp_FullMethodName = "/v1.Backrest/SetupSftp" - Backrest_CheckRepoExists_FullMethodName = "/v1.Backrest/CheckRepoExists" - Backrest_AddRepo_FullMethodName = "/v1.Backrest/AddRepo" - Backrest_RemoveRepo_FullMethodName = "/v1.Backrest/RemoveRepo" - Backrest_GetOperationEvents_FullMethodName = "/v1.Backrest/GetOperationEvents" - Backrest_GetOperations_FullMethodName = "/v1.Backrest/GetOperations" - Backrest_ListSnapshots_FullMethodName = "/v1.Backrest/ListSnapshots" - Backrest_ListSnapshotFiles_FullMethodName = "/v1.Backrest/ListSnapshotFiles" - Backrest_Backup_FullMethodName = "/v1.Backrest/Backup" - Backrest_DoRepoTask_FullMethodName = "/v1.Backrest/DoRepoTask" - Backrest_Forget_FullMethodName = "/v1.Backrest/Forget" - Backrest_Restore_FullMethodName = "/v1.Backrest/Restore" - Backrest_Cancel_FullMethodName = "/v1.Backrest/Cancel" - Backrest_GetLogs_FullMethodName = "/v1.Backrest/GetLogs" - Backrest_RunCommand_FullMethodName = "/v1.Backrest/RunCommand" - Backrest_GetDownloadURL_FullMethodName = "/v1.Backrest/GetDownloadURL" - Backrest_ClearHistory_FullMethodName = "/v1.Backrest/ClearHistory" - Backrest_PathAutocomplete_FullMethodName = "/v1.Backrest/PathAutocomplete" - Backrest_GetSummaryDashboard_FullMethodName = "/v1.Backrest/GetSummaryDashboard" + Backrest_GetConfig_FullMethodName = "/v1.Backrest/GetConfig" + Backrest_SetConfig_FullMethodName = "/v1.Backrest/SetConfig" + Backrest_SetupSftp_FullMethodName = "/v1.Backrest/SetupSftp" + Backrest_CheckRepoExists_FullMethodName = "/v1.Backrest/CheckRepoExists" + Backrest_AddRepo_FullMethodName = "/v1.Backrest/AddRepo" + Backrest_RemoveRepo_FullMethodName = "/v1.Backrest/RemoveRepo" + Backrest_GetOperationEvents_FullMethodName = "/v1.Backrest/GetOperationEvents" + Backrest_GetOperations_FullMethodName = "/v1.Backrest/GetOperations" + Backrest_ListSnapshots_FullMethodName = "/v1.Backrest/ListSnapshots" + Backrest_ListSnapshotFiles_FullMethodName = "/v1.Backrest/ListSnapshotFiles" + Backrest_Backup_FullMethodName = "/v1.Backrest/Backup" + Backrest_DoRepoTask_FullMethodName = "/v1.Backrest/DoRepoTask" + Backrest_Forget_FullMethodName = "/v1.Backrest/Forget" + Backrest_Restore_FullMethodName = "/v1.Backrest/Restore" + Backrest_Cancel_FullMethodName = "/v1.Backrest/Cancel" + Backrest_GetLogs_FullMethodName = "/v1.Backrest/GetLogs" + Backrest_RunCommand_FullMethodName = "/v1.Backrest/RunCommand" + Backrest_GetDownloadURL_FullMethodName = "/v1.Backrest/GetDownloadURL" + Backrest_ClearHistory_FullMethodName = "/v1.Backrest/ClearHistory" + Backrest_PathAutocomplete_FullMethodName = "/v1.Backrest/PathAutocomplete" + Backrest_GetSummaryDashboard_FullMethodName = "/v1.Backrest/GetSummaryDashboard" + Backrest_GeneratePairingToken_FullMethodName = "/v1.Backrest/GeneratePairingToken" ) // BackrestClient is the client API for Backrest service. @@ -80,6 +81,9 @@ type BackrestClient interface { PathAutocomplete(ctx context.Context, in *types.StringValue, opts ...grpc.CallOption) (*types.StringList, error) // GetSummaryDashboard returns data for the dashboard view. GetSummaryDashboard(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*SummaryDashboardResponse, error) + // GeneratePairingToken creates a new pairing token on the server that can be shared with clients to simplify peering. + // The token format is ":#" — an opaque string the client pastes when adding a known host. + GeneratePairingToken(ctx context.Context, in *GeneratePairingTokenRequest, opts ...grpc.CallOption) (*GeneratePairingTokenResponse, error) } type backrestClient struct { @@ -318,6 +322,16 @@ func (c *backrestClient) GetSummaryDashboard(ctx context.Context, in *emptypb.Em return out, nil } +func (c *backrestClient) GeneratePairingToken(ctx context.Context, in *GeneratePairingTokenRequest, opts ...grpc.CallOption) (*GeneratePairingTokenResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GeneratePairingTokenResponse) + err := c.cc.Invoke(ctx, Backrest_GeneratePairingToken_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + // BackrestServer is the server API for Backrest service. // All implementations must embed UnimplementedBackrestServer // for forward compatibility. @@ -354,6 +368,9 @@ type BackrestServer interface { PathAutocomplete(context.Context, *types.StringValue) (*types.StringList, error) // GetSummaryDashboard returns data for the dashboard view. GetSummaryDashboard(context.Context, *emptypb.Empty) (*SummaryDashboardResponse, error) + // GeneratePairingToken creates a new pairing token on the server that can be shared with clients to simplify peering. + // The token format is ":#" — an opaque string the client pastes when adding a known host. + GeneratePairingToken(context.Context, *GeneratePairingTokenRequest) (*GeneratePairingTokenResponse, error) mustEmbedUnimplementedBackrestServer() } @@ -427,6 +444,9 @@ func (UnimplementedBackrestServer) PathAutocomplete(context.Context, *types.Stri func (UnimplementedBackrestServer) GetSummaryDashboard(context.Context, *emptypb.Empty) (*SummaryDashboardResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetSummaryDashboard not implemented") } +func (UnimplementedBackrestServer) GeneratePairingToken(context.Context, *GeneratePairingTokenRequest) (*GeneratePairingTokenResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GeneratePairingToken not implemented") +} func (UnimplementedBackrestServer) mustEmbedUnimplementedBackrestServer() {} func (UnimplementedBackrestServer) testEmbeddedByValue() {} @@ -812,6 +832,24 @@ func _Backrest_GetSummaryDashboard_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } +func _Backrest_GeneratePairingToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GeneratePairingTokenRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackrestServer).GeneratePairingToken(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Backrest_GeneratePairingToken_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackrestServer).GeneratePairingToken(ctx, req.(*GeneratePairingTokenRequest)) + } + return interceptor(ctx, in, info, handler) +} + // Backrest_ServiceDesc is the grpc.ServiceDesc for Backrest service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -895,6 +933,10 @@ var Backrest_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetSummaryDashboard", Handler: _Backrest_GetSummaryDashboard_Handler, }, + { + MethodName: "GeneratePairingToken", + Handler: _Backrest_GeneratePairingToken_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/gen/go/v1/v1connect/service.connect.go b/gen/go/v1/v1connect/service.connect.go index 34efcc24..226e7daa 100644 --- a/gen/go/v1/v1connect/service.connect.go +++ b/gen/go/v1/v1connect/service.connect.go @@ -82,6 +82,9 @@ const ( // BackrestGetSummaryDashboardProcedure is the fully-qualified name of the Backrest's // GetSummaryDashboard RPC. BackrestGetSummaryDashboardProcedure = "/v1.Backrest/GetSummaryDashboard" + // BackrestGeneratePairingTokenProcedure is the fully-qualified name of the Backrest's + // GeneratePairingToken RPC. + BackrestGeneratePairingTokenProcedure = "/v1.Backrest/GeneratePairingToken" ) // BackrestClient is a client for the v1.Backrest service. @@ -118,6 +121,9 @@ type BackrestClient interface { PathAutocomplete(context.Context, *connect.Request[types.StringValue]) (*connect.Response[types.StringList], error) // GetSummaryDashboard returns data for the dashboard view. GetSummaryDashboard(context.Context, *connect.Request[emptypb.Empty]) (*connect.Response[v1.SummaryDashboardResponse], error) + // GeneratePairingToken creates a new pairing token on the server that can be shared with clients to simplify peering. + // The token format is ":#" — an opaque string the client pastes when adding a known host. + GeneratePairingToken(context.Context, *connect.Request[v1.GeneratePairingTokenRequest]) (*connect.Response[v1.GeneratePairingTokenResponse], error) } // NewBackrestClient constructs a client for the v1.Backrest service. By default, it uses the @@ -257,32 +263,39 @@ func NewBackrestClient(httpClient connect.HTTPClient, baseURL string, opts ...co connect.WithSchema(backrestMethods.ByName("GetSummaryDashboard")), connect.WithClientOptions(opts...), ), + generatePairingToken: connect.NewClient[v1.GeneratePairingTokenRequest, v1.GeneratePairingTokenResponse]( + httpClient, + baseURL+BackrestGeneratePairingTokenProcedure, + connect.WithSchema(backrestMethods.ByName("GeneratePairingToken")), + connect.WithClientOptions(opts...), + ), } } // backrestClient implements BackrestClient. type backrestClient struct { - getConfig *connect.Client[emptypb.Empty, v1.Config] - setConfig *connect.Client[v1.Config, v1.Config] - setupSftp *connect.Client[v1.SetupSftpRequest, v1.SetupSftpResponse] - checkRepoExists *connect.Client[v1.CheckRepoExistsRequest, v1.CheckRepoExistsResponse] - addRepo *connect.Client[v1.AddRepoRequest, v1.Config] - removeRepo *connect.Client[types.StringValue, v1.Config] - getOperationEvents *connect.Client[emptypb.Empty, v1.OperationEvent] - getOperations *connect.Client[v1.GetOperationsRequest, v1.OperationList] - listSnapshots *connect.Client[v1.ListSnapshotsRequest, v1.ResticSnapshotList] - listSnapshotFiles *connect.Client[v1.ListSnapshotFilesRequest, v1.ListSnapshotFilesResponse] - backup *connect.Client[v1.BackupRequest, emptypb.Empty] - doRepoTask *connect.Client[v1.DoRepoTaskRequest, emptypb.Empty] - forget *connect.Client[v1.ForgetRequest, emptypb.Empty] - restore *connect.Client[v1.RestoreSnapshotRequest, emptypb.Empty] - cancel *connect.Client[types.Int64Value, emptypb.Empty] - getLogs *connect.Client[v1.LogDataRequest, types.BytesValue] - runCommand *connect.Client[v1.RunCommandRequest, types.Int64Value] - getDownloadURL *connect.Client[v1.GetDownloadURLRequest, types.StringValue] - clearHistory *connect.Client[v1.ClearHistoryRequest, emptypb.Empty] - pathAutocomplete *connect.Client[types.StringValue, types.StringList] - getSummaryDashboard *connect.Client[emptypb.Empty, v1.SummaryDashboardResponse] + getConfig *connect.Client[emptypb.Empty, v1.Config] + setConfig *connect.Client[v1.Config, v1.Config] + setupSftp *connect.Client[v1.SetupSftpRequest, v1.SetupSftpResponse] + checkRepoExists *connect.Client[v1.CheckRepoExistsRequest, v1.CheckRepoExistsResponse] + addRepo *connect.Client[v1.AddRepoRequest, v1.Config] + removeRepo *connect.Client[types.StringValue, v1.Config] + getOperationEvents *connect.Client[emptypb.Empty, v1.OperationEvent] + getOperations *connect.Client[v1.GetOperationsRequest, v1.OperationList] + listSnapshots *connect.Client[v1.ListSnapshotsRequest, v1.ResticSnapshotList] + listSnapshotFiles *connect.Client[v1.ListSnapshotFilesRequest, v1.ListSnapshotFilesResponse] + backup *connect.Client[v1.BackupRequest, emptypb.Empty] + doRepoTask *connect.Client[v1.DoRepoTaskRequest, emptypb.Empty] + forget *connect.Client[v1.ForgetRequest, emptypb.Empty] + restore *connect.Client[v1.RestoreSnapshotRequest, emptypb.Empty] + cancel *connect.Client[types.Int64Value, emptypb.Empty] + getLogs *connect.Client[v1.LogDataRequest, types.BytesValue] + runCommand *connect.Client[v1.RunCommandRequest, types.Int64Value] + getDownloadURL *connect.Client[v1.GetDownloadURLRequest, types.StringValue] + clearHistory *connect.Client[v1.ClearHistoryRequest, emptypb.Empty] + pathAutocomplete *connect.Client[types.StringValue, types.StringList] + getSummaryDashboard *connect.Client[emptypb.Empty, v1.SummaryDashboardResponse] + generatePairingToken *connect.Client[v1.GeneratePairingTokenRequest, v1.GeneratePairingTokenResponse] } // GetConfig calls v1.Backrest.GetConfig. @@ -390,6 +403,11 @@ func (c *backrestClient) GetSummaryDashboard(ctx context.Context, req *connect.R return c.getSummaryDashboard.CallUnary(ctx, req) } +// GeneratePairingToken calls v1.Backrest.GeneratePairingToken. +func (c *backrestClient) GeneratePairingToken(ctx context.Context, req *connect.Request[v1.GeneratePairingTokenRequest]) (*connect.Response[v1.GeneratePairingTokenResponse], error) { + return c.generatePairingToken.CallUnary(ctx, req) +} + // BackrestHandler is an implementation of the v1.Backrest service. type BackrestHandler interface { GetConfig(context.Context, *connect.Request[emptypb.Empty]) (*connect.Response[v1.Config], error) @@ -424,6 +442,9 @@ type BackrestHandler interface { PathAutocomplete(context.Context, *connect.Request[types.StringValue]) (*connect.Response[types.StringList], error) // GetSummaryDashboard returns data for the dashboard view. GetSummaryDashboard(context.Context, *connect.Request[emptypb.Empty]) (*connect.Response[v1.SummaryDashboardResponse], error) + // GeneratePairingToken creates a new pairing token on the server that can be shared with clients to simplify peering. + // The token format is ":#" — an opaque string the client pastes when adding a known host. + GeneratePairingToken(context.Context, *connect.Request[v1.GeneratePairingTokenRequest]) (*connect.Response[v1.GeneratePairingTokenResponse], error) } // NewBackrestHandler builds an HTTP handler from the service implementation. It returns the path on @@ -559,6 +580,12 @@ func NewBackrestHandler(svc BackrestHandler, opts ...connect.HandlerOption) (str connect.WithSchema(backrestMethods.ByName("GetSummaryDashboard")), connect.WithHandlerOptions(opts...), ) + backrestGeneratePairingTokenHandler := connect.NewUnaryHandler( + BackrestGeneratePairingTokenProcedure, + svc.GeneratePairingToken, + connect.WithSchema(backrestMethods.ByName("GeneratePairingToken")), + connect.WithHandlerOptions(opts...), + ) return "/v1.Backrest/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case BackrestGetConfigProcedure: @@ -603,6 +630,8 @@ func NewBackrestHandler(svc BackrestHandler, opts ...connect.HandlerOption) (str backrestPathAutocompleteHandler.ServeHTTP(w, r) case BackrestGetSummaryDashboardProcedure: backrestGetSummaryDashboardHandler.ServeHTTP(w, r) + case BackrestGeneratePairingTokenProcedure: + backrestGeneratePairingTokenHandler.ServeHTTP(w, r) default: http.NotFound(w, r) } @@ -695,3 +724,7 @@ func (UnimplementedBackrestHandler) PathAutocomplete(context.Context, *connect.R func (UnimplementedBackrestHandler) GetSummaryDashboard(context.Context, *connect.Request[emptypb.Empty]) (*connect.Response[v1.SummaryDashboardResponse], error) { return nil, connect.NewError(connect.CodeUnimplemented, errors.New("v1.Backrest.GetSummaryDashboard is not implemented")) } + +func (UnimplementedBackrestHandler) GeneratePairingToken(context.Context, *connect.Request[v1.GeneratePairingTokenRequest]) (*connect.Response[v1.GeneratePairingTokenResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("v1.Backrest.GeneratePairingToken is not implemented")) +} diff --git a/gen/go/v1sync/syncservice.pb.go b/gen/go/v1sync/syncservice.pb.go index 33251c06..51c23f79 100644 --- a/gen/go/v1sync/syncservice.pb.go +++ b/gen/go/v1sync/syncservice.pb.go @@ -142,7 +142,7 @@ func (x SyncStreamItem_RepoConnectionState) Number() protoreflect.EnumNumber { // Deprecated: Use SyncStreamItem_RepoConnectionState.Descriptor instead. func (SyncStreamItem_RepoConnectionState) EnumDescriptor() ([]byte, []int) { - return file_v1sync_syncservice_proto_rawDescGZIP(), []int{11, 0} + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{13, 0} } type SyncStateStreamRequest struct { @@ -669,6 +669,118 @@ func (x *SetConfigRequest) GetPlansToDelete() []string { return nil } +type SetRemoteClientConfigRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + PeerKeyid string `protobuf:"bytes,1,opt,name=peer_keyid,json=peerKeyid,proto3" json:"peer_keyid,omitempty"` // The key ID of the connected peer to push config to. + Repos []*v1.Repo `protobuf:"bytes,2,rep,name=repos,proto3" json:"repos,omitempty"` // Repos to create or update on the peer. + Plans []*v1.Plan `protobuf:"bytes,3,rep,name=plans,proto3" json:"plans,omitempty"` // Plans to create or update on the peer. + ReposToDelete []string `protobuf:"bytes,4,rep,name=repos_to_delete,json=reposToDelete,proto3" json:"repos_to_delete,omitempty"` // Repo IDs to delete on the peer. + PlansToDelete []string `protobuf:"bytes,5,rep,name=plans_to_delete,json=plansToDelete,proto3" json:"plans_to_delete,omitempty"` // Plan IDs to delete on the peer. + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetRemoteClientConfigRequest) Reset() { + *x = SetRemoteClientConfigRequest{} + mi := &file_v1sync_syncservice_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetRemoteClientConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetRemoteClientConfigRequest) ProtoMessage() {} + +func (x *SetRemoteClientConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_v1sync_syncservice_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetRemoteClientConfigRequest.ProtoReflect.Descriptor instead. +func (*SetRemoteClientConfigRequest) Descriptor() ([]byte, []int) { + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{9} +} + +func (x *SetRemoteClientConfigRequest) GetPeerKeyid() string { + if x != nil { + return x.PeerKeyid + } + return "" +} + +func (x *SetRemoteClientConfigRequest) GetRepos() []*v1.Repo { + if x != nil { + return x.Repos + } + return nil +} + +func (x *SetRemoteClientConfigRequest) GetPlans() []*v1.Plan { + if x != nil { + return x.Plans + } + return nil +} + +func (x *SetRemoteClientConfigRequest) GetReposToDelete() []string { + if x != nil { + return x.ReposToDelete + } + return nil +} + +func (x *SetRemoteClientConfigRequest) GetPlansToDelete() []string { + if x != nil { + return x.PlansToDelete + } + return nil +} + +type SetRemoteClientConfigResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetRemoteClientConfigResponse) Reset() { + *x = SetRemoteClientConfigResponse{} + mi := &file_v1sync_syncservice_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetRemoteClientConfigResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetRemoteClientConfigResponse) ProtoMessage() {} + +func (x *SetRemoteClientConfigResponse) ProtoReflect() protoreflect.Message { + mi := &file_v1sync_syncservice_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetRemoteClientConfigResponse.ProtoReflect.Descriptor instead. +func (*SetRemoteClientConfigResponse) Descriptor() ([]byte, []int) { + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{10} +} + type RemoteConfig struct { state protoimpl.MessageState `protogen:"open.v1"` Modno int32 `protobuf:"varint,1,opt,name=modno,proto3" json:"modno,omitempty"` // The modno of the config. @@ -681,7 +793,7 @@ type RemoteConfig struct { func (x *RemoteConfig) Reset() { *x = RemoteConfig{} - mi := &file_v1sync_syncservice_proto_msgTypes[9] + mi := &file_v1sync_syncservice_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -693,7 +805,7 @@ func (x *RemoteConfig) String() string { func (*RemoteConfig) ProtoMessage() {} func (x *RemoteConfig) ProtoReflect() protoreflect.Message { - mi := &file_v1sync_syncservice_proto_msgTypes[9] + mi := &file_v1sync_syncservice_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -706,7 +818,7 @@ func (x *RemoteConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoteConfig.ProtoReflect.Descriptor instead. func (*RemoteConfig) Descriptor() ([]byte, []int) { - return file_v1sync_syncservice_proto_rawDescGZIP(), []int{9} + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{11} } func (x *RemoteConfig) GetModno() int32 { @@ -747,7 +859,7 @@ type AuthorizationToken struct { func (x *AuthorizationToken) Reset() { *x = AuthorizationToken{} - mi := &file_v1sync_syncservice_proto_msgTypes[10] + mi := &file_v1sync_syncservice_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -759,7 +871,7 @@ func (x *AuthorizationToken) String() string { func (*AuthorizationToken) ProtoMessage() {} func (x *AuthorizationToken) ProtoReflect() protoreflect.Message { - mi := &file_v1sync_syncservice_proto_msgTypes[10] + mi := &file_v1sync_syncservice_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -772,7 +884,7 @@ func (x *AuthorizationToken) ProtoReflect() protoreflect.Message { // Deprecated: Use AuthorizationToken.ProtoReflect.Descriptor instead. func (*AuthorizationToken) Descriptor() ([]byte, []int) { - return file_v1sync_syncservice_proto_rawDescGZIP(), []int{10} + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{12} } func (x *AuthorizationToken) GetPublicKey() *v1.PublicKey { @@ -796,8 +908,9 @@ type SyncStreamItem struct { // *SyncStreamItem_SignedMessage // *SyncStreamItem_Handshake // *SyncStreamItem_Heartbeat - // *SyncStreamItem_RequestOperations + // *SyncStreamItem_OperationManifest // *SyncStreamItem_ReceiveOperations + // *SyncStreamItem_RequestOperationData // *SyncStreamItem_ReceiveConfig // *SyncStreamItem_SetConfig // *SyncStreamItem_RequestResources @@ -812,7 +925,7 @@ type SyncStreamItem struct { func (x *SyncStreamItem) Reset() { *x = SyncStreamItem{} - mi := &file_v1sync_syncservice_proto_msgTypes[11] + mi := &file_v1sync_syncservice_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -824,7 +937,7 @@ func (x *SyncStreamItem) String() string { func (*SyncStreamItem) ProtoMessage() {} func (x *SyncStreamItem) ProtoReflect() protoreflect.Message { - mi := &file_v1sync_syncservice_proto_msgTypes[11] + mi := &file_v1sync_syncservice_proto_msgTypes[13] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -837,7 +950,7 @@ func (x *SyncStreamItem) ProtoReflect() protoreflect.Message { // Deprecated: Use SyncStreamItem.ProtoReflect.Descriptor instead. func (*SyncStreamItem) Descriptor() ([]byte, []int) { - return file_v1sync_syncservice_proto_rawDescGZIP(), []int{11} + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{13} } func (x *SyncStreamItem) GetAction() isSyncStreamItem_Action { @@ -874,10 +987,10 @@ func (x *SyncStreamItem) GetHeartbeat() *SyncStreamItem_SyncActionHeartbeat { return nil } -func (x *SyncStreamItem) GetRequestOperations() *SyncStreamItem_SyncActionRequestOperations { +func (x *SyncStreamItem) GetOperationManifest() *SyncStreamItem_SyncActionOperationManifest { if x != nil { - if x, ok := x.Action.(*SyncStreamItem_RequestOperations); ok { - return x.RequestOperations + if x, ok := x.Action.(*SyncStreamItem_OperationManifest); ok { + return x.OperationManifest } } return nil @@ -892,6 +1005,15 @@ func (x *SyncStreamItem) GetReceiveOperations() *SyncStreamItem_SyncActionReceiv return nil } +func (x *SyncStreamItem) GetRequestOperationData() *SyncStreamItem_SyncActionRequestOperationData { + if x != nil { + if x, ok := x.Action.(*SyncStreamItem_RequestOperationData); ok { + return x.RequestOperationData + } + } + return nil +} + func (x *SyncStreamItem) GetReceiveConfig() *SyncStreamItem_SyncActionReceiveConfig { if x != nil { if x, ok := x.Action.(*SyncStreamItem_ReceiveConfig); ok { @@ -971,16 +1093,20 @@ type SyncStreamItem_Heartbeat struct { Heartbeat *SyncStreamItem_SyncActionHeartbeat `protobuf:"bytes,4,opt,name=heartbeat,proto3,oneof"` } -type SyncStreamItem_RequestOperations struct { - RequestOperations *SyncStreamItem_SyncActionRequestOperations `protobuf:"bytes,20,opt,name=request_operations,json=requestOperations,proto3,oneof"` +type SyncStreamItem_OperationManifest struct { + OperationManifest *SyncStreamItem_SyncActionOperationManifest `protobuf:"bytes,20,opt,name=operation_manifest,json=operationManifest,proto3,oneof"` } type SyncStreamItem_ReceiveOperations struct { ReceiveOperations *SyncStreamItem_SyncActionReceiveOperations `protobuf:"bytes,21,opt,name=receive_operations,json=receiveOperations,proto3,oneof"` } +type SyncStreamItem_RequestOperationData struct { + RequestOperationData *SyncStreamItem_SyncActionRequestOperationData `protobuf:"bytes,22,opt,name=request_operation_data,json=requestOperationData,proto3,oneof"` +} + type SyncStreamItem_ReceiveConfig struct { - ReceiveConfig *SyncStreamItem_SyncActionReceiveConfig `protobuf:"bytes,22,opt,name=receive_config,json=receiveConfig,proto3,oneof"` + ReceiveConfig *SyncStreamItem_SyncActionReceiveConfig `protobuf:"bytes,23,opt,name=receive_config,json=receiveConfig,proto3,oneof"` } type SyncStreamItem_SetConfig struct { @@ -1013,10 +1139,12 @@ func (*SyncStreamItem_Handshake) isSyncStreamItem_Action() {} func (*SyncStreamItem_Heartbeat) isSyncStreamItem_Action() {} -func (*SyncStreamItem_RequestOperations) isSyncStreamItem_Action() {} +func (*SyncStreamItem_OperationManifest) isSyncStreamItem_Action() {} func (*SyncStreamItem_ReceiveOperations) isSyncStreamItem_Action() {} +func (*SyncStreamItem_RequestOperationData) isSyncStreamItem_Action() {} + func (*SyncStreamItem_ReceiveConfig) isSyncStreamItem_Action() {} func (*SyncStreamItem_SetConfig) isSyncStreamItem_Action() {} @@ -1036,13 +1164,14 @@ type SyncStreamItem_SyncActionHandshake struct { ProtocolVersion int64 `protobuf:"varint,1,opt,name=protocol_version,json=protocolVersion,proto3" json:"protocol_version,omitempty"` PublicKey *v1.PublicKey `protobuf:"bytes,2,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` InstanceId *v1.SignedMessage `protobuf:"bytes,3,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + PairingSecret string `protobuf:"bytes,4,opt,name=pairing_secret,json=pairingSecret,proto3" json:"pairing_secret,omitempty"` // optional one-time secret from a pairing token, used to auto-authorize a new client unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *SyncStreamItem_SyncActionHandshake) Reset() { *x = SyncStreamItem_SyncActionHandshake{} - mi := &file_v1sync_syncservice_proto_msgTypes[12] + mi := &file_v1sync_syncservice_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1054,7 +1183,7 @@ func (x *SyncStreamItem_SyncActionHandshake) String() string { func (*SyncStreamItem_SyncActionHandshake) ProtoMessage() {} func (x *SyncStreamItem_SyncActionHandshake) ProtoReflect() protoreflect.Message { - mi := &file_v1sync_syncservice_proto_msgTypes[12] + mi := &file_v1sync_syncservice_proto_msgTypes[14] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1067,7 +1196,7 @@ func (x *SyncStreamItem_SyncActionHandshake) ProtoReflect() protoreflect.Message // Deprecated: Use SyncStreamItem_SyncActionHandshake.ProtoReflect.Descriptor instead. func (*SyncStreamItem_SyncActionHandshake) Descriptor() ([]byte, []int) { - return file_v1sync_syncservice_proto_rawDescGZIP(), []int{11, 0} + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{13, 0} } func (x *SyncStreamItem_SyncActionHandshake) GetProtocolVersion() int64 { @@ -1091,6 +1220,13 @@ func (x *SyncStreamItem_SyncActionHandshake) GetInstanceId() *v1.SignedMessage { return nil } +func (x *SyncStreamItem_SyncActionHandshake) GetPairingSecret() string { + if x != nil { + return x.PairingSecret + } + return "" +} + // SyncActionHeartbeat is sent periodically to keep the connection alive. type SyncStreamItem_SyncActionHeartbeat struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -1100,7 +1236,7 @@ type SyncStreamItem_SyncActionHeartbeat struct { func (x *SyncStreamItem_SyncActionHeartbeat) Reset() { *x = SyncStreamItem_SyncActionHeartbeat{} - mi := &file_v1sync_syncservice_proto_msgTypes[13] + mi := &file_v1sync_syncservice_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1112,7 +1248,7 @@ func (x *SyncStreamItem_SyncActionHeartbeat) String() string { func (*SyncStreamItem_SyncActionHeartbeat) ProtoMessage() {} func (x *SyncStreamItem_SyncActionHeartbeat) ProtoReflect() protoreflect.Message { - mi := &file_v1sync_syncservice_proto_msgTypes[13] + mi := &file_v1sync_syncservice_proto_msgTypes[15] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1125,7 +1261,7 @@ func (x *SyncStreamItem_SyncActionHeartbeat) ProtoReflect() protoreflect.Message // Deprecated: Use SyncStreamItem_SyncActionHeartbeat.ProtoReflect.Descriptor instead. func (*SyncStreamItem_SyncActionHeartbeat) Descriptor() ([]byte, []int) { - return file_v1sync_syncservice_proto_rawDescGZIP(), []int{11, 1} + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{13, 1} } type SyncStreamItem_SyncActionReceiveConfig struct { @@ -1137,7 +1273,7 @@ type SyncStreamItem_SyncActionReceiveConfig struct { func (x *SyncStreamItem_SyncActionReceiveConfig) Reset() { *x = SyncStreamItem_SyncActionReceiveConfig{} - mi := &file_v1sync_syncservice_proto_msgTypes[14] + mi := &file_v1sync_syncservice_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1149,7 +1285,7 @@ func (x *SyncStreamItem_SyncActionReceiveConfig) String() string { func (*SyncStreamItem_SyncActionReceiveConfig) ProtoMessage() {} func (x *SyncStreamItem_SyncActionReceiveConfig) ProtoReflect() protoreflect.Message { - mi := &file_v1sync_syncservice_proto_msgTypes[14] + mi := &file_v1sync_syncservice_proto_msgTypes[16] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1162,7 +1298,7 @@ func (x *SyncStreamItem_SyncActionReceiveConfig) ProtoReflect() protoreflect.Mes // Deprecated: Use SyncStreamItem_SyncActionReceiveConfig.ProtoReflect.Descriptor instead. func (*SyncStreamItem_SyncActionReceiveConfig) Descriptor() ([]byte, []int) { - return file_v1sync_syncservice_proto_rawDescGZIP(), []int{11, 2} + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{13, 2} } func (x *SyncStreamItem_SyncActionReceiveConfig) GetConfig() *RemoteConfig { @@ -1184,7 +1320,7 @@ type SyncStreamItem_SyncActionSetConfig struct { func (x *SyncStreamItem_SyncActionSetConfig) Reset() { *x = SyncStreamItem_SyncActionSetConfig{} - mi := &file_v1sync_syncservice_proto_msgTypes[15] + mi := &file_v1sync_syncservice_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1196,7 +1332,7 @@ func (x *SyncStreamItem_SyncActionSetConfig) String() string { func (*SyncStreamItem_SyncActionSetConfig) ProtoMessage() {} func (x *SyncStreamItem_SyncActionSetConfig) ProtoReflect() protoreflect.Message { - mi := &file_v1sync_syncservice_proto_msgTypes[15] + mi := &file_v1sync_syncservice_proto_msgTypes[17] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1209,7 +1345,7 @@ func (x *SyncStreamItem_SyncActionSetConfig) ProtoReflect() protoreflect.Message // Deprecated: Use SyncStreamItem_SyncActionSetConfig.ProtoReflect.Descriptor instead. func (*SyncStreamItem_SyncActionSetConfig) Descriptor() ([]byte, []int) { - return file_v1sync_syncservice_proto_rawDescGZIP(), []int{11, 3} + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{13, 3} } func (x *SyncStreamItem_SyncActionSetConfig) GetRepos() []*v1.Repo { @@ -1248,7 +1384,7 @@ type SyncStreamItem_SyncActionRequestResources struct { func (x *SyncStreamItem_SyncActionRequestResources) Reset() { *x = SyncStreamItem_SyncActionRequestResources{} - mi := &file_v1sync_syncservice_proto_msgTypes[16] + mi := &file_v1sync_syncservice_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1260,7 +1396,7 @@ func (x *SyncStreamItem_SyncActionRequestResources) String() string { func (*SyncStreamItem_SyncActionRequestResources) ProtoMessage() {} func (x *SyncStreamItem_SyncActionRequestResources) ProtoReflect() protoreflect.Message { - mi := &file_v1sync_syncservice_proto_msgTypes[16] + mi := &file_v1sync_syncservice_proto_msgTypes[18] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1273,7 +1409,7 @@ func (x *SyncStreamItem_SyncActionRequestResources) ProtoReflect() protoreflect. // Deprecated: Use SyncStreamItem_SyncActionRequestResources.ProtoReflect.Descriptor instead. func (*SyncStreamItem_SyncActionRequestResources) Descriptor() ([]byte, []int) { - return file_v1sync_syncservice_proto_rawDescGZIP(), []int{11, 4} + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{13, 4} } type SyncStreamItem_SyncActionReceiveResources struct { @@ -1286,7 +1422,7 @@ type SyncStreamItem_SyncActionReceiveResources struct { func (x *SyncStreamItem_SyncActionReceiveResources) Reset() { *x = SyncStreamItem_SyncActionReceiveResources{} - mi := &file_v1sync_syncservice_proto_msgTypes[17] + mi := &file_v1sync_syncservice_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1298,7 +1434,7 @@ func (x *SyncStreamItem_SyncActionReceiveResources) String() string { func (*SyncStreamItem_SyncActionReceiveResources) ProtoMessage() {} func (x *SyncStreamItem_SyncActionReceiveResources) ProtoReflect() protoreflect.Message { - mi := &file_v1sync_syncservice_proto_msgTypes[17] + mi := &file_v1sync_syncservice_proto_msgTypes[19] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1311,7 +1447,7 @@ func (x *SyncStreamItem_SyncActionReceiveResources) ProtoReflect() protoreflect. // Deprecated: Use SyncStreamItem_SyncActionReceiveResources.ProtoReflect.Descriptor instead. func (*SyncStreamItem_SyncActionReceiveResources) Descriptor() ([]byte, []int) { - return file_v1sync_syncservice_proto_rawDescGZIP(), []int{11, 5} + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{13, 5} } func (x *SyncStreamItem_SyncActionReceiveResources) GetRepos() []*RepoMetadata { @@ -1337,7 +1473,7 @@ type SyncStreamItem_SyncActionConnectRepo struct { func (x *SyncStreamItem_SyncActionConnectRepo) Reset() { *x = SyncStreamItem_SyncActionConnectRepo{} - mi := &file_v1sync_syncservice_proto_msgTypes[18] + mi := &file_v1sync_syncservice_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1349,7 +1485,7 @@ func (x *SyncStreamItem_SyncActionConnectRepo) String() string { func (*SyncStreamItem_SyncActionConnectRepo) ProtoMessage() {} func (x *SyncStreamItem_SyncActionConnectRepo) ProtoReflect() protoreflect.Message { - mi := &file_v1sync_syncservice_proto_msgTypes[18] + mi := &file_v1sync_syncservice_proto_msgTypes[20] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1362,7 +1498,7 @@ func (x *SyncStreamItem_SyncActionConnectRepo) ProtoReflect() protoreflect.Messa // Deprecated: Use SyncStreamItem_SyncActionConnectRepo.ProtoReflect.Descriptor instead. func (*SyncStreamItem_SyncActionConnectRepo) Descriptor() ([]byte, []int) { - return file_v1sync_syncservice_proto_rawDescGZIP(), []int{11, 6} + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{13, 6} } func (x *SyncStreamItem_SyncActionConnectRepo) GetRepoId() string { @@ -1372,29 +1508,29 @@ func (x *SyncStreamItem_SyncActionConnectRepo) GetRepoId() string { return "" } -type SyncStreamItem_SyncActionRequestOperations struct { +type SyncStreamItem_SyncActionOperationManifest struct { state protoimpl.MessageState `protogen:"open.v1"` - HighOpid int64 `protobuf:"varint,1,opt,name=high_opid,json=highOpid,proto3" json:"high_opid,omitempty"` // The highest operation ID the requester has. - HighModno int64 `protobuf:"varint,2,opt,name=high_modno,json=highModno,proto3" json:"high_modno,omitempty"` // The highest modno the requester has. + OpIds []int64 `protobuf:"varint,1,rep,packed,name=op_ids,json=opIds,proto3" json:"op_ids,omitempty"` + Modnos []int64 `protobuf:"varint,2,rep,packed,name=modnos,proto3" json:"modnos,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *SyncStreamItem_SyncActionRequestOperations) Reset() { - *x = SyncStreamItem_SyncActionRequestOperations{} - mi := &file_v1sync_syncservice_proto_msgTypes[19] +func (x *SyncStreamItem_SyncActionOperationManifest) Reset() { + *x = SyncStreamItem_SyncActionOperationManifest{} + mi := &file_v1sync_syncservice_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *SyncStreamItem_SyncActionRequestOperations) String() string { +func (x *SyncStreamItem_SyncActionOperationManifest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SyncStreamItem_SyncActionRequestOperations) ProtoMessage() {} +func (*SyncStreamItem_SyncActionOperationManifest) ProtoMessage() {} -func (x *SyncStreamItem_SyncActionRequestOperations) ProtoReflect() protoreflect.Message { - mi := &file_v1sync_syncservice_proto_msgTypes[19] +func (x *SyncStreamItem_SyncActionOperationManifest) ProtoReflect() protoreflect.Message { + mi := &file_v1sync_syncservice_proto_msgTypes[21] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1405,23 +1541,67 @@ func (x *SyncStreamItem_SyncActionRequestOperations) ProtoReflect() protoreflect return mi.MessageOf(x) } -// Deprecated: Use SyncStreamItem_SyncActionRequestOperations.ProtoReflect.Descriptor instead. -func (*SyncStreamItem_SyncActionRequestOperations) Descriptor() ([]byte, []int) { - return file_v1sync_syncservice_proto_rawDescGZIP(), []int{11, 7} +// Deprecated: Use SyncStreamItem_SyncActionOperationManifest.ProtoReflect.Descriptor instead. +func (*SyncStreamItem_SyncActionOperationManifest) Descriptor() ([]byte, []int) { + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{13, 7} } -func (x *SyncStreamItem_SyncActionRequestOperations) GetHighOpid() int64 { +func (x *SyncStreamItem_SyncActionOperationManifest) GetOpIds() []int64 { if x != nil { - return x.HighOpid + return x.OpIds } - return 0 + return nil } -func (x *SyncStreamItem_SyncActionRequestOperations) GetHighModno() int64 { +func (x *SyncStreamItem_SyncActionOperationManifest) GetModnos() []int64 { if x != nil { - return x.HighModno + return x.Modnos } - return 0 + return nil +} + +type SyncStreamItem_SyncActionRequestOperationData struct { + state protoimpl.MessageState `protogen:"open.v1"` + OpIds []int64 `protobuf:"varint,1,rep,packed,name=op_ids,json=opIds,proto3" json:"op_ids,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncStreamItem_SyncActionRequestOperationData) Reset() { + *x = SyncStreamItem_SyncActionRequestOperationData{} + mi := &file_v1sync_syncservice_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncStreamItem_SyncActionRequestOperationData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncStreamItem_SyncActionRequestOperationData) ProtoMessage() {} + +func (x *SyncStreamItem_SyncActionRequestOperationData) ProtoReflect() protoreflect.Message { + mi := &file_v1sync_syncservice_proto_msgTypes[22] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncStreamItem_SyncActionRequestOperationData.ProtoReflect.Descriptor instead. +func (*SyncStreamItem_SyncActionRequestOperationData) Descriptor() ([]byte, []int) { + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{13, 8} +} + +func (x *SyncStreamItem_SyncActionRequestOperationData) GetOpIds() []int64 { + if x != nil { + return x.OpIds + } + return nil } type SyncStreamItem_SyncActionReceiveOperations struct { @@ -1433,7 +1613,7 @@ type SyncStreamItem_SyncActionReceiveOperations struct { func (x *SyncStreamItem_SyncActionReceiveOperations) Reset() { *x = SyncStreamItem_SyncActionReceiveOperations{} - mi := &file_v1sync_syncservice_proto_msgTypes[20] + mi := &file_v1sync_syncservice_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1445,7 +1625,7 @@ func (x *SyncStreamItem_SyncActionReceiveOperations) String() string { func (*SyncStreamItem_SyncActionReceiveOperations) ProtoMessage() {} func (x *SyncStreamItem_SyncActionReceiveOperations) ProtoReflect() protoreflect.Message { - mi := &file_v1sync_syncservice_proto_msgTypes[20] + mi := &file_v1sync_syncservice_proto_msgTypes[23] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1458,7 +1638,7 @@ func (x *SyncStreamItem_SyncActionReceiveOperations) ProtoReflect() protoreflect // Deprecated: Use SyncStreamItem_SyncActionReceiveOperations.ProtoReflect.Descriptor instead. func (*SyncStreamItem_SyncActionReceiveOperations) Descriptor() ([]byte, []int) { - return file_v1sync_syncservice_proto_rawDescGZIP(), []int{11, 8} + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{13, 9} } func (x *SyncStreamItem_SyncActionReceiveOperations) GetEvent() *v1.OperationEvent { @@ -1477,7 +1657,7 @@ type SyncStreamItem_SyncActionRequestLog struct { func (x *SyncStreamItem_SyncActionRequestLog) Reset() { *x = SyncStreamItem_SyncActionRequestLog{} - mi := &file_v1sync_syncservice_proto_msgTypes[21] + mi := &file_v1sync_syncservice_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1489,7 +1669,7 @@ func (x *SyncStreamItem_SyncActionRequestLog) String() string { func (*SyncStreamItem_SyncActionRequestLog) ProtoMessage() {} func (x *SyncStreamItem_SyncActionRequestLog) ProtoReflect() protoreflect.Message { - mi := &file_v1sync_syncservice_proto_msgTypes[21] + mi := &file_v1sync_syncservice_proto_msgTypes[24] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1502,7 +1682,7 @@ func (x *SyncStreamItem_SyncActionRequestLog) ProtoReflect() protoreflect.Messag // Deprecated: Use SyncStreamItem_SyncActionRequestLog.ProtoReflect.Descriptor instead. func (*SyncStreamItem_SyncActionRequestLog) Descriptor() ([]byte, []int) { - return file_v1sync_syncservice_proto_rawDescGZIP(), []int{11, 9} + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{13, 10} } func (x *SyncStreamItem_SyncActionRequestLog) GetLogId() string { @@ -1528,7 +1708,7 @@ type SyncStreamItem_SyncActionReceiveLogData struct { func (x *SyncStreamItem_SyncActionReceiveLogData) Reset() { *x = SyncStreamItem_SyncActionReceiveLogData{} - mi := &file_v1sync_syncservice_proto_msgTypes[22] + mi := &file_v1sync_syncservice_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1540,7 +1720,7 @@ func (x *SyncStreamItem_SyncActionReceiveLogData) String() string { func (*SyncStreamItem_SyncActionReceiveLogData) ProtoMessage() {} func (x *SyncStreamItem_SyncActionReceiveLogData) ProtoReflect() protoreflect.Message { - mi := &file_v1sync_syncservice_proto_msgTypes[22] + mi := &file_v1sync_syncservice_proto_msgTypes[25] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1553,7 +1733,7 @@ func (x *SyncStreamItem_SyncActionReceiveLogData) ProtoReflect() protoreflect.Me // Deprecated: Use SyncStreamItem_SyncActionReceiveLogData.ProtoReflect.Descriptor instead. func (*SyncStreamItem_SyncActionReceiveLogData) Descriptor() ([]byte, []int) { - return file_v1sync_syncservice_proto_rawDescGZIP(), []int{11, 10} + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{13, 11} } func (x *SyncStreamItem_SyncActionReceiveLogData) GetLogId() string { @@ -1600,7 +1780,7 @@ type SyncStreamItem_SyncActionThrottle struct { func (x *SyncStreamItem_SyncActionThrottle) Reset() { *x = SyncStreamItem_SyncActionThrottle{} - mi := &file_v1sync_syncservice_proto_msgTypes[23] + mi := &file_v1sync_syncservice_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1612,7 +1792,7 @@ func (x *SyncStreamItem_SyncActionThrottle) String() string { func (*SyncStreamItem_SyncActionThrottle) ProtoMessage() {} func (x *SyncStreamItem_SyncActionThrottle) ProtoReflect() protoreflect.Message { - mi := &file_v1sync_syncservice_proto_msgTypes[23] + mi := &file_v1sync_syncservice_proto_msgTypes[26] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1625,7 +1805,7 @@ func (x *SyncStreamItem_SyncActionThrottle) ProtoReflect() protoreflect.Message // Deprecated: Use SyncStreamItem_SyncActionThrottle.ProtoReflect.Descriptor instead. func (*SyncStreamItem_SyncActionThrottle) Descriptor() ([]byte, []int) { - return file_v1sync_syncservice_proto_rawDescGZIP(), []int{11, 11} + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{13, 12} } func (x *SyncStreamItem_SyncActionThrottle) GetDelayMs() int64 { @@ -1637,16 +1817,16 @@ func (x *SyncStreamItem_SyncActionThrottle) GetDelayMs() int64 { type SyncStreamItem_SyncEstablishSharedSecret struct { state protoimpl.MessageState `protogen:"open.v1"` - // a one-time-use ed25519 public key with a matching unshared private key. Used to perform a key exchange. + // a one-time-use ECDSA public key with a matching unshared private key. Used to perform a key exchange. // See https://pkg.go.dev/crypto/ecdh#PrivateKey.ECDH . - Ed25519 string `protobuf:"bytes,2,opt,name=ed25519,json=ed25519pub,proto3" json:"ed25519,omitempty"` // base64 encoded public key + EcdsaPub string `protobuf:"bytes,2,opt,name=ecdsa_pub,json=ecdsaPub,proto3" json:"ecdsa_pub,omitempty"` // base64 encoded public key unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *SyncStreamItem_SyncEstablishSharedSecret) Reset() { *x = SyncStreamItem_SyncEstablishSharedSecret{} - mi := &file_v1sync_syncservice_proto_msgTypes[24] + mi := &file_v1sync_syncservice_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1658,7 +1838,7 @@ func (x *SyncStreamItem_SyncEstablishSharedSecret) String() string { func (*SyncStreamItem_SyncEstablishSharedSecret) ProtoMessage() {} func (x *SyncStreamItem_SyncEstablishSharedSecret) ProtoReflect() protoreflect.Message { - mi := &file_v1sync_syncservice_proto_msgTypes[24] + mi := &file_v1sync_syncservice_proto_msgTypes[27] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1671,12 +1851,12 @@ func (x *SyncStreamItem_SyncEstablishSharedSecret) ProtoReflect() protoreflect.M // Deprecated: Use SyncStreamItem_SyncEstablishSharedSecret.ProtoReflect.Descriptor instead. func (*SyncStreamItem_SyncEstablishSharedSecret) Descriptor() ([]byte, []int) { - return file_v1sync_syncservice_proto_rawDescGZIP(), []int{11, 12} + return file_v1sync_syncservice_proto_rawDescGZIP(), []int{13, 13} } -func (x *SyncStreamItem_SyncEstablishSharedSecret) GetEd25519() string { +func (x *SyncStreamItem_SyncEstablishSharedSecret) GetEcdsaPub() string { if x != nil { - return x.Ed25519 + return x.EcdsaPub } return "" } @@ -1724,7 +1904,15 @@ const file_v1sync_syncservice_proto_rawDesc = "" + "\x05plans\x18\x01 \x03(\v2\b.v1.PlanR\x05plans\x12\x1e\n" + "\x05repos\x18\x02 \x03(\v2\b.v1.RepoR\x05repos\x12&\n" + "\x0frepos_to_delete\x18\x03 \x03(\tR\rreposToDelete\x12&\n" + - "\x0fplans_to_delete\x18\x04 \x03(\tR\rplansToDelete\"~\n" + + "\x0fplans_to_delete\x18\x04 \x03(\tR\rplansToDelete\"\xcd\x01\n" + + "\x1cSetRemoteClientConfigRequest\x12\x1d\n" + + "\n" + + "peer_keyid\x18\x01 \x01(\tR\tpeerKeyid\x12\x1e\n" + + "\x05repos\x18\x02 \x03(\v2\b.v1.RepoR\x05repos\x12\x1e\n" + + "\x05plans\x18\x03 \x03(\v2\b.v1.PlanR\x05plans\x12&\n" + + "\x0frepos_to_delete\x18\x04 \x03(\tR\rreposToDelete\x12&\n" + + "\x0fplans_to_delete\x18\x05 \x03(\tR\rplansToDelete\"\x1f\n" + + "\x1dSetRemoteClientConfigResponse\"~\n" + "\fRemoteConfig\x12\x14\n" + "\x05modno\x18\x01 \x01(\x05R\x05modno\x12\x18\n" + "\aversion\x18\x02 \x01(\x05R\aversion\x12\x1e\n" + @@ -1734,14 +1922,15 @@ const file_v1sync_syncservice_proto_rawDesc = "" + "\n" + "public_key\x18\x01 \x01(\v2\r.v1.PublicKeyR\tpublicKey\x122\n" + "\vinstance_id\x18\x02 \x01(\v2\x11.v1.SignedMessageR\n" + - "instanceId\"\xbd\x12\n" + + "instanceId\"\xff\x13\n" + "\x0eSyncStreamItem\x12:\n" + "\x0esigned_message\x18\x01 \x01(\v2\x11.v1.SignedMessageH\x00R\rsignedMessage\x12J\n" + "\thandshake\x18\x03 \x01(\v2*.v1sync.SyncStreamItem.SyncActionHandshakeH\x00R\thandshake\x12J\n" + "\theartbeat\x18\x04 \x01(\v2*.v1sync.SyncStreamItem.SyncActionHeartbeatH\x00R\theartbeat\x12c\n" + - "\x12request_operations\x18\x14 \x01(\v22.v1sync.SyncStreamItem.SyncActionRequestOperationsH\x00R\x11requestOperations\x12c\n" + - "\x12receive_operations\x18\x15 \x01(\v22.v1sync.SyncStreamItem.SyncActionReceiveOperationsH\x00R\x11receiveOperations\x12W\n" + - "\x0ereceive_config\x18\x16 \x01(\v2..v1sync.SyncStreamItem.SyncActionReceiveConfigH\x00R\rreceiveConfig\x12K\n" + + "\x12operation_manifest\x18\x14 \x01(\v22.v1sync.SyncStreamItem.SyncActionOperationManifestH\x00R\x11operationManifest\x12c\n" + + "\x12receive_operations\x18\x15 \x01(\v22.v1sync.SyncStreamItem.SyncActionReceiveOperationsH\x00R\x11receiveOperations\x12m\n" + + "\x16request_operation_data\x18\x16 \x01(\v25.v1sync.SyncStreamItem.SyncActionRequestOperationDataH\x00R\x14requestOperationData\x12W\n" + + "\x0ereceive_config\x18\x17 \x01(\v2..v1sync.SyncStreamItem.SyncActionReceiveConfigH\x00R\rreceiveConfig\x12K\n" + "\n" + "set_config\x18\x18 \x01(\v2*.v1sync.SyncStreamItem.SyncActionSetConfigH\x00R\tsetConfig\x12`\n" + "\x11request_resources\x18\x19 \x01(\v21.v1sync.SyncStreamItem.SyncActionRequestResourcesH\x00R\x10requestResources\x12`\n" + @@ -1749,13 +1938,14 @@ const file_v1sync_syncservice_proto_rawDesc = "" + "\vrequest_log\x18\x1e \x01(\v2+.v1sync.SyncStreamItem.SyncActionRequestLogH\x00R\n" + "requestLog\x12[\n" + "\x10receive_log_data\x18\x1f \x01(\v2/.v1sync.SyncStreamItem.SyncActionReceiveLogDataH\x00R\x0ereceiveLogData\x12H\n" + - "\bthrottle\x18\xe8\a \x01(\v2).v1sync.SyncStreamItem.SyncActionThrottleH\x00R\bthrottle\x1a\xa2\x01\n" + + "\bthrottle\x18\xe8\a \x01(\v2).v1sync.SyncStreamItem.SyncActionThrottleH\x00R\bthrottle\x1a\xc9\x01\n" + "\x13SyncActionHandshake\x12)\n" + "\x10protocol_version\x18\x01 \x01(\x03R\x0fprotocolVersion\x12,\n" + "\n" + "public_key\x18\x02 \x01(\v2\r.v1.PublicKeyR\tpublicKey\x122\n" + "\vinstance_id\x18\x03 \x01(\v2\x11.v1.SignedMessageR\n" + - "instanceId\x1a\x15\n" + + "instanceId\x12%\n" + + "\x0epairing_secret\x18\x04 \x01(\tR\rpairingSecret\x1a\x15\n" + "\x13SyncActionHeartbeat\x1aG\n" + "\x17SyncActionReceiveConfig\x12,\n" + "\x06config\x18\x01 \x01(\v2\x14.v1sync.RemoteConfigR\x06config\x1a\xa5\x01\n" + @@ -1769,11 +1959,12 @@ const file_v1sync_syncservice_proto_rawDesc = "" + "\x05repos\x18\x01 \x03(\v2\x14.v1sync.RepoMetadataR\x05repos\x12*\n" + "\x05plans\x18\x02 \x03(\v2\x14.v1sync.PlanMetadataR\x05plans\x1a0\n" + "\x15SyncActionConnectRepo\x12\x17\n" + - "\arepo_id\x18\x01 \x01(\tR\x06repoId\x1aY\n" + - "\x1bSyncActionRequestOperations\x12\x1b\n" + - "\thigh_opid\x18\x01 \x01(\x03R\bhighOpid\x12\x1d\n" + - "\n" + - "high_modno\x18\x02 \x01(\x03R\thighModno\x1aG\n" + + "\arepo_id\x18\x01 \x01(\tR\x06repoId\x1aL\n" + + "\x1bSyncActionOperationManifest\x12\x15\n" + + "\x06op_ids\x18\x01 \x03(\x03R\x05opIds\x12\x16\n" + + "\x06modnos\x18\x02 \x03(\x03R\x06modnos\x1a7\n" + + "\x1eSyncActionRequestOperationData\x12\x15\n" + + "\x06op_ids\x18\x01 \x03(\x03R\x05opIds\x1aG\n" + "\x1bSyncActionReceiveOperations\x12(\n" + "\x05event\x18\x01 \x01(\v2\x12.v1.OperationEventR\x05event\x1a-\n" + "\x14SyncActionRequestLog\x12\x15\n" + @@ -1788,8 +1979,7 @@ const file_v1sync_syncservice_proto_rawDesc = "" + "\x12SyncActionThrottle\x12\x19\n" + "\bdelay_ms\x18\x01 \x01(\x03R\adelayMs\x1a8\n" + "\x19SyncEstablishSharedSecret\x12\x1b\n" + - "\aed25519\x18\x02 \x01(\tR\n" + - "ed25519pub\"\xb4\x01\n" + + "\tecdsa_pub\x18\x02 \x01(\tR\becdsaPub\"\xb4\x01\n" + "\x13RepoConnectionState\x12\x1c\n" + "\x18CONNECTION_STATE_UNKNOWN\x10\x00\x12\x1c\n" + "\x18CONNECTION_STATE_PENDING\x10\x01\x12\x1e\n" + @@ -1808,9 +1998,10 @@ const file_v1sync_syncservice_proto_rawDesc = "" + "\x1fCONNECTION_STATE_ERROR_PROTOCOL\x10\v\x12#\n" + "\x1fCONNECTION_STATE_ERROR_INTERNAL\x10\f2S\n" + "\x13BackrestSyncService\x12<\n" + - "\x04Sync\x12\x16.v1sync.SyncStreamItem\x1a\x16.v1sync.SyncStreamItem\"\x00(\x010\x012l\n" + + "\x04Sync\x12\x16.v1sync.SyncStreamItem\x1a\x16.v1sync.SyncStreamItem\"\x00(\x010\x012\xd4\x01\n" + "\x18BackrestSyncStateService\x12P\n" + - "\x17GetPeerSyncStatesStream\x12\x1e.v1sync.SyncStateStreamRequest\x1a\x11.v1sync.PeerState\"\x000\x01B0Z.github.com/garethgeorge/backrest/gen/go/v1syncb\x06proto3" + "\x17GetPeerSyncStatesStream\x12\x1e.v1sync.SyncStateStreamRequest\x1a\x11.v1sync.PeerState\"\x000\x01\x12f\n" + + "\x15SetRemoteClientConfig\x12$.v1sync.SetRemoteClientConfigRequest\x1a%.v1sync.SetRemoteClientConfigResponse\"\x00B0Z.github.com/garethgeorge/backrest/gen/go/v1syncb\x06proto3" var ( file_v1sync_syncservice_proto_rawDescOnce sync.Once @@ -1825,84 +2016,92 @@ func file_v1sync_syncservice_proto_rawDescGZIP() []byte { } var file_v1sync_syncservice_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_v1sync_syncservice_proto_msgTypes = make([]protoimpl.MessageInfo, 25) +var file_v1sync_syncservice_proto_msgTypes = make([]protoimpl.MessageInfo, 28) var file_v1sync_syncservice_proto_goTypes = []any{ - (ConnectionState)(0), // 0: v1sync.ConnectionState - (SyncStreamItem_RepoConnectionState)(0), // 1: v1sync.SyncStreamItem.RepoConnectionState - (*SyncStateStreamRequest)(nil), // 2: v1sync.SyncStateStreamRequest - (*PeerState)(nil), // 3: v1sync.PeerState - (*AuthenticateRequest)(nil), // 4: v1sync.AuthenticateRequest - (*GetOperationMetadataResponse)(nil), // 5: v1sync.GetOperationMetadataResponse - (*LogDataEntry)(nil), // 6: v1sync.LogDataEntry - (*SetAvailableResourcesRequest)(nil), // 7: v1sync.SetAvailableResourcesRequest - (*RepoMetadata)(nil), // 8: v1sync.RepoMetadata - (*PlanMetadata)(nil), // 9: v1sync.PlanMetadata - (*SetConfigRequest)(nil), // 10: v1sync.SetConfigRequest - (*RemoteConfig)(nil), // 11: v1sync.RemoteConfig - (*AuthorizationToken)(nil), // 12: v1sync.AuthorizationToken - (*SyncStreamItem)(nil), // 13: v1sync.SyncStreamItem - (*SyncStreamItem_SyncActionHandshake)(nil), // 14: v1sync.SyncStreamItem.SyncActionHandshake - (*SyncStreamItem_SyncActionHeartbeat)(nil), // 15: v1sync.SyncStreamItem.SyncActionHeartbeat - (*SyncStreamItem_SyncActionReceiveConfig)(nil), // 16: v1sync.SyncStreamItem.SyncActionReceiveConfig - (*SyncStreamItem_SyncActionSetConfig)(nil), // 17: v1sync.SyncStreamItem.SyncActionSetConfig - (*SyncStreamItem_SyncActionRequestResources)(nil), // 18: v1sync.SyncStreamItem.SyncActionRequestResources - (*SyncStreamItem_SyncActionReceiveResources)(nil), // 19: v1sync.SyncStreamItem.SyncActionReceiveResources - (*SyncStreamItem_SyncActionConnectRepo)(nil), // 20: v1sync.SyncStreamItem.SyncActionConnectRepo - (*SyncStreamItem_SyncActionRequestOperations)(nil), // 21: v1sync.SyncStreamItem.SyncActionRequestOperations - (*SyncStreamItem_SyncActionReceiveOperations)(nil), // 22: v1sync.SyncStreamItem.SyncActionReceiveOperations - (*SyncStreamItem_SyncActionRequestLog)(nil), // 23: v1sync.SyncStreamItem.SyncActionRequestLog - (*SyncStreamItem_SyncActionReceiveLogData)(nil), // 24: v1sync.SyncStreamItem.SyncActionReceiveLogData - (*SyncStreamItem_SyncActionThrottle)(nil), // 25: v1sync.SyncStreamItem.SyncActionThrottle - (*SyncStreamItem_SyncEstablishSharedSecret)(nil), // 26: v1sync.SyncStreamItem.SyncEstablishSharedSecret - (*v1.SignedMessage)(nil), // 27: v1.SignedMessage - (*v1.Plan)(nil), // 28: v1.Plan - (*v1.Repo)(nil), // 29: v1.Repo - (*v1.PublicKey)(nil), // 30: v1.PublicKey - (*v1.OperationEvent)(nil), // 31: v1.OperationEvent + (ConnectionState)(0), // 0: v1sync.ConnectionState + (SyncStreamItem_RepoConnectionState)(0), // 1: v1sync.SyncStreamItem.RepoConnectionState + (*SyncStateStreamRequest)(nil), // 2: v1sync.SyncStateStreamRequest + (*PeerState)(nil), // 3: v1sync.PeerState + (*AuthenticateRequest)(nil), // 4: v1sync.AuthenticateRequest + (*GetOperationMetadataResponse)(nil), // 5: v1sync.GetOperationMetadataResponse + (*LogDataEntry)(nil), // 6: v1sync.LogDataEntry + (*SetAvailableResourcesRequest)(nil), // 7: v1sync.SetAvailableResourcesRequest + (*RepoMetadata)(nil), // 8: v1sync.RepoMetadata + (*PlanMetadata)(nil), // 9: v1sync.PlanMetadata + (*SetConfigRequest)(nil), // 10: v1sync.SetConfigRequest + (*SetRemoteClientConfigRequest)(nil), // 11: v1sync.SetRemoteClientConfigRequest + (*SetRemoteClientConfigResponse)(nil), // 12: v1sync.SetRemoteClientConfigResponse + (*RemoteConfig)(nil), // 13: v1sync.RemoteConfig + (*AuthorizationToken)(nil), // 14: v1sync.AuthorizationToken + (*SyncStreamItem)(nil), // 15: v1sync.SyncStreamItem + (*SyncStreamItem_SyncActionHandshake)(nil), // 16: v1sync.SyncStreamItem.SyncActionHandshake + (*SyncStreamItem_SyncActionHeartbeat)(nil), // 17: v1sync.SyncStreamItem.SyncActionHeartbeat + (*SyncStreamItem_SyncActionReceiveConfig)(nil), // 18: v1sync.SyncStreamItem.SyncActionReceiveConfig + (*SyncStreamItem_SyncActionSetConfig)(nil), // 19: v1sync.SyncStreamItem.SyncActionSetConfig + (*SyncStreamItem_SyncActionRequestResources)(nil), // 20: v1sync.SyncStreamItem.SyncActionRequestResources + (*SyncStreamItem_SyncActionReceiveResources)(nil), // 21: v1sync.SyncStreamItem.SyncActionReceiveResources + (*SyncStreamItem_SyncActionConnectRepo)(nil), // 22: v1sync.SyncStreamItem.SyncActionConnectRepo + (*SyncStreamItem_SyncActionOperationManifest)(nil), // 23: v1sync.SyncStreamItem.SyncActionOperationManifest + (*SyncStreamItem_SyncActionRequestOperationData)(nil), // 24: v1sync.SyncStreamItem.SyncActionRequestOperationData + (*SyncStreamItem_SyncActionReceiveOperations)(nil), // 25: v1sync.SyncStreamItem.SyncActionReceiveOperations + (*SyncStreamItem_SyncActionRequestLog)(nil), // 26: v1sync.SyncStreamItem.SyncActionRequestLog + (*SyncStreamItem_SyncActionReceiveLogData)(nil), // 27: v1sync.SyncStreamItem.SyncActionReceiveLogData + (*SyncStreamItem_SyncActionThrottle)(nil), // 28: v1sync.SyncStreamItem.SyncActionThrottle + (*SyncStreamItem_SyncEstablishSharedSecret)(nil), // 29: v1sync.SyncStreamItem.SyncEstablishSharedSecret + (*v1.SignedMessage)(nil), // 30: v1.SignedMessage + (*v1.Plan)(nil), // 31: v1.Plan + (*v1.Repo)(nil), // 32: v1.Repo + (*v1.PublicKey)(nil), // 33: v1.PublicKey + (*v1.OperationEvent)(nil), // 34: v1.OperationEvent } var file_v1sync_syncservice_proto_depIdxs = []int32{ 0, // 0: v1sync.PeerState.state:type_name -> v1sync.ConnectionState 9, // 1: v1sync.PeerState.known_plans:type_name -> v1sync.PlanMetadata 8, // 2: v1sync.PeerState.known_repos:type_name -> v1sync.RepoMetadata - 11, // 3: v1sync.PeerState.remote_config:type_name -> v1sync.RemoteConfig - 27, // 4: v1sync.AuthenticateRequest.instance_id:type_name -> v1.SignedMessage + 13, // 3: v1sync.PeerState.remote_config:type_name -> v1sync.RemoteConfig + 30, // 4: v1sync.AuthenticateRequest.instance_id:type_name -> v1.SignedMessage 9, // 5: v1sync.SetAvailableResourcesRequest.repos:type_name -> v1sync.PlanMetadata 8, // 6: v1sync.SetAvailableResourcesRequest.plans:type_name -> v1sync.RepoMetadata - 28, // 7: v1sync.SetConfigRequest.plans:type_name -> v1.Plan - 29, // 8: v1sync.SetConfigRequest.repos:type_name -> v1.Repo - 29, // 9: v1sync.RemoteConfig.repos:type_name -> v1.Repo - 28, // 10: v1sync.RemoteConfig.plans:type_name -> v1.Plan - 30, // 11: v1sync.AuthorizationToken.public_key:type_name -> v1.PublicKey - 27, // 12: v1sync.AuthorizationToken.instance_id:type_name -> v1.SignedMessage - 27, // 13: v1sync.SyncStreamItem.signed_message:type_name -> v1.SignedMessage - 14, // 14: v1sync.SyncStreamItem.handshake:type_name -> v1sync.SyncStreamItem.SyncActionHandshake - 15, // 15: v1sync.SyncStreamItem.heartbeat:type_name -> v1sync.SyncStreamItem.SyncActionHeartbeat - 21, // 16: v1sync.SyncStreamItem.request_operations:type_name -> v1sync.SyncStreamItem.SyncActionRequestOperations - 22, // 17: v1sync.SyncStreamItem.receive_operations:type_name -> v1sync.SyncStreamItem.SyncActionReceiveOperations - 16, // 18: v1sync.SyncStreamItem.receive_config:type_name -> v1sync.SyncStreamItem.SyncActionReceiveConfig - 17, // 19: v1sync.SyncStreamItem.set_config:type_name -> v1sync.SyncStreamItem.SyncActionSetConfig - 18, // 20: v1sync.SyncStreamItem.request_resources:type_name -> v1sync.SyncStreamItem.SyncActionRequestResources - 19, // 21: v1sync.SyncStreamItem.receive_resources:type_name -> v1sync.SyncStreamItem.SyncActionReceiveResources - 23, // 22: v1sync.SyncStreamItem.request_log:type_name -> v1sync.SyncStreamItem.SyncActionRequestLog - 24, // 23: v1sync.SyncStreamItem.receive_log_data:type_name -> v1sync.SyncStreamItem.SyncActionReceiveLogData - 25, // 24: v1sync.SyncStreamItem.throttle:type_name -> v1sync.SyncStreamItem.SyncActionThrottle - 30, // 25: v1sync.SyncStreamItem.SyncActionHandshake.public_key:type_name -> v1.PublicKey - 27, // 26: v1sync.SyncStreamItem.SyncActionHandshake.instance_id:type_name -> v1.SignedMessage - 11, // 27: v1sync.SyncStreamItem.SyncActionReceiveConfig.config:type_name -> v1sync.RemoteConfig - 29, // 28: v1sync.SyncStreamItem.SyncActionSetConfig.repos:type_name -> v1.Repo - 28, // 29: v1sync.SyncStreamItem.SyncActionSetConfig.plans:type_name -> v1.Plan - 8, // 30: v1sync.SyncStreamItem.SyncActionReceiveResources.repos:type_name -> v1sync.RepoMetadata - 9, // 31: v1sync.SyncStreamItem.SyncActionReceiveResources.plans:type_name -> v1sync.PlanMetadata - 31, // 32: v1sync.SyncStreamItem.SyncActionReceiveOperations.event:type_name -> v1.OperationEvent - 13, // 33: v1sync.BackrestSyncService.Sync:input_type -> v1sync.SyncStreamItem - 2, // 34: v1sync.BackrestSyncStateService.GetPeerSyncStatesStream:input_type -> v1sync.SyncStateStreamRequest - 13, // 35: v1sync.BackrestSyncService.Sync:output_type -> v1sync.SyncStreamItem - 3, // 36: v1sync.BackrestSyncStateService.GetPeerSyncStatesStream:output_type -> v1sync.PeerState - 35, // [35:37] is the sub-list for method output_type - 33, // [33:35] is the sub-list for method input_type - 33, // [33:33] is the sub-list for extension type_name - 33, // [33:33] is the sub-list for extension extendee - 0, // [0:33] is the sub-list for field type_name + 31, // 7: v1sync.SetConfigRequest.plans:type_name -> v1.Plan + 32, // 8: v1sync.SetConfigRequest.repos:type_name -> v1.Repo + 32, // 9: v1sync.SetRemoteClientConfigRequest.repos:type_name -> v1.Repo + 31, // 10: v1sync.SetRemoteClientConfigRequest.plans:type_name -> v1.Plan + 32, // 11: v1sync.RemoteConfig.repos:type_name -> v1.Repo + 31, // 12: v1sync.RemoteConfig.plans:type_name -> v1.Plan + 33, // 13: v1sync.AuthorizationToken.public_key:type_name -> v1.PublicKey + 30, // 14: v1sync.AuthorizationToken.instance_id:type_name -> v1.SignedMessage + 30, // 15: v1sync.SyncStreamItem.signed_message:type_name -> v1.SignedMessage + 16, // 16: v1sync.SyncStreamItem.handshake:type_name -> v1sync.SyncStreamItem.SyncActionHandshake + 17, // 17: v1sync.SyncStreamItem.heartbeat:type_name -> v1sync.SyncStreamItem.SyncActionHeartbeat + 23, // 18: v1sync.SyncStreamItem.operation_manifest:type_name -> v1sync.SyncStreamItem.SyncActionOperationManifest + 25, // 19: v1sync.SyncStreamItem.receive_operations:type_name -> v1sync.SyncStreamItem.SyncActionReceiveOperations + 24, // 20: v1sync.SyncStreamItem.request_operation_data:type_name -> v1sync.SyncStreamItem.SyncActionRequestOperationData + 18, // 21: v1sync.SyncStreamItem.receive_config:type_name -> v1sync.SyncStreamItem.SyncActionReceiveConfig + 19, // 22: v1sync.SyncStreamItem.set_config:type_name -> v1sync.SyncStreamItem.SyncActionSetConfig + 20, // 23: v1sync.SyncStreamItem.request_resources:type_name -> v1sync.SyncStreamItem.SyncActionRequestResources + 21, // 24: v1sync.SyncStreamItem.receive_resources:type_name -> v1sync.SyncStreamItem.SyncActionReceiveResources + 26, // 25: v1sync.SyncStreamItem.request_log:type_name -> v1sync.SyncStreamItem.SyncActionRequestLog + 27, // 26: v1sync.SyncStreamItem.receive_log_data:type_name -> v1sync.SyncStreamItem.SyncActionReceiveLogData + 28, // 27: v1sync.SyncStreamItem.throttle:type_name -> v1sync.SyncStreamItem.SyncActionThrottle + 33, // 28: v1sync.SyncStreamItem.SyncActionHandshake.public_key:type_name -> v1.PublicKey + 30, // 29: v1sync.SyncStreamItem.SyncActionHandshake.instance_id:type_name -> v1.SignedMessage + 13, // 30: v1sync.SyncStreamItem.SyncActionReceiveConfig.config:type_name -> v1sync.RemoteConfig + 32, // 31: v1sync.SyncStreamItem.SyncActionSetConfig.repos:type_name -> v1.Repo + 31, // 32: v1sync.SyncStreamItem.SyncActionSetConfig.plans:type_name -> v1.Plan + 8, // 33: v1sync.SyncStreamItem.SyncActionReceiveResources.repos:type_name -> v1sync.RepoMetadata + 9, // 34: v1sync.SyncStreamItem.SyncActionReceiveResources.plans:type_name -> v1sync.PlanMetadata + 34, // 35: v1sync.SyncStreamItem.SyncActionReceiveOperations.event:type_name -> v1.OperationEvent + 15, // 36: v1sync.BackrestSyncService.Sync:input_type -> v1sync.SyncStreamItem + 2, // 37: v1sync.BackrestSyncStateService.GetPeerSyncStatesStream:input_type -> v1sync.SyncStateStreamRequest + 11, // 38: v1sync.BackrestSyncStateService.SetRemoteClientConfig:input_type -> v1sync.SetRemoteClientConfigRequest + 15, // 39: v1sync.BackrestSyncService.Sync:output_type -> v1sync.SyncStreamItem + 3, // 40: v1sync.BackrestSyncStateService.GetPeerSyncStatesStream:output_type -> v1sync.PeerState + 12, // 41: v1sync.BackrestSyncStateService.SetRemoteClientConfig:output_type -> v1sync.SetRemoteClientConfigResponse + 39, // [39:42] is the sub-list for method output_type + 36, // [36:39] is the sub-list for method input_type + 36, // [36:36] is the sub-list for extension type_name + 36, // [36:36] is the sub-list for extension extendee + 0, // [0:36] is the sub-list for field type_name } func init() { file_v1sync_syncservice_proto_init() } @@ -1910,12 +2109,13 @@ func file_v1sync_syncservice_proto_init() { if File_v1sync_syncservice_proto != nil { return } - file_v1sync_syncservice_proto_msgTypes[11].OneofWrappers = []any{ + file_v1sync_syncservice_proto_msgTypes[13].OneofWrappers = []any{ (*SyncStreamItem_SignedMessage)(nil), (*SyncStreamItem_Handshake)(nil), (*SyncStreamItem_Heartbeat)(nil), - (*SyncStreamItem_RequestOperations)(nil), + (*SyncStreamItem_OperationManifest)(nil), (*SyncStreamItem_ReceiveOperations)(nil), + (*SyncStreamItem_RequestOperationData)(nil), (*SyncStreamItem_ReceiveConfig)(nil), (*SyncStreamItem_SetConfig)(nil), (*SyncStreamItem_RequestResources)(nil), @@ -1930,7 +2130,7 @@ func file_v1sync_syncservice_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_v1sync_syncservice_proto_rawDesc), len(file_v1sync_syncservice_proto_rawDesc)), NumEnums: 2, - NumMessages: 25, + NumMessages: 28, NumExtensions: 0, NumServices: 2, }, diff --git a/gen/go/v1sync/syncservice_grpc.pb.go b/gen/go/v1sync/syncservice_grpc.pb.go index b445c78f..5499bd37 100644 --- a/gen/go/v1sync/syncservice_grpc.pb.go +++ b/gen/go/v1sync/syncservice_grpc.pb.go @@ -122,6 +122,7 @@ var BackrestSyncService_ServiceDesc = grpc.ServiceDesc{ const ( BackrestSyncStateService_GetPeerSyncStatesStream_FullMethodName = "/v1sync.BackrestSyncStateService/GetPeerSyncStatesStream" + BackrestSyncStateService_SetRemoteClientConfig_FullMethodName = "/v1sync.BackrestSyncStateService/SetRemoteClientConfig" ) // BackrestSyncStateServiceClient is the client API for BackrestSyncStateService service. @@ -132,6 +133,8 @@ const ( // This service should be served behind authentication and authorization. type BackrestSyncStateServiceClient interface { GetPeerSyncStatesStream(ctx context.Context, in *SyncStateStreamRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[PeerState], error) + // SetRemoteClientConfig pushes a config change to a connected authorized client peer. + SetRemoteClientConfig(ctx context.Context, in *SetRemoteClientConfigRequest, opts ...grpc.CallOption) (*SetRemoteClientConfigResponse, error) } type backrestSyncStateServiceClient struct { @@ -161,6 +164,16 @@ func (c *backrestSyncStateServiceClient) GetPeerSyncStatesStream(ctx context.Con // This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. type BackrestSyncStateService_GetPeerSyncStatesStreamClient = grpc.ServerStreamingClient[PeerState] +func (c *backrestSyncStateServiceClient) SetRemoteClientConfig(ctx context.Context, in *SetRemoteClientConfigRequest, opts ...grpc.CallOption) (*SetRemoteClientConfigResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SetRemoteClientConfigResponse) + err := c.cc.Invoke(ctx, BackrestSyncStateService_SetRemoteClientConfig_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + // BackrestSyncStateServiceServer is the server API for BackrestSyncStateService service. // All implementations must embed UnimplementedBackrestSyncStateServiceServer // for forward compatibility. @@ -169,6 +182,8 @@ type BackrestSyncStateService_GetPeerSyncStatesStreamClient = grpc.ServerStreami // This service should be served behind authentication and authorization. type BackrestSyncStateServiceServer interface { GetPeerSyncStatesStream(*SyncStateStreamRequest, grpc.ServerStreamingServer[PeerState]) error + // SetRemoteClientConfig pushes a config change to a connected authorized client peer. + SetRemoteClientConfig(context.Context, *SetRemoteClientConfigRequest) (*SetRemoteClientConfigResponse, error) mustEmbedUnimplementedBackrestSyncStateServiceServer() } @@ -182,6 +197,9 @@ type UnimplementedBackrestSyncStateServiceServer struct{} func (UnimplementedBackrestSyncStateServiceServer) GetPeerSyncStatesStream(*SyncStateStreamRequest, grpc.ServerStreamingServer[PeerState]) error { return status.Errorf(codes.Unimplemented, "method GetPeerSyncStatesStream not implemented") } +func (UnimplementedBackrestSyncStateServiceServer) SetRemoteClientConfig(context.Context, *SetRemoteClientConfigRequest) (*SetRemoteClientConfigResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetRemoteClientConfig not implemented") +} func (UnimplementedBackrestSyncStateServiceServer) mustEmbedUnimplementedBackrestSyncStateServiceServer() { } func (UnimplementedBackrestSyncStateServiceServer) testEmbeddedByValue() {} @@ -215,13 +233,36 @@ func _BackrestSyncStateService_GetPeerSyncStatesStream_Handler(srv interface{}, // This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. type BackrestSyncStateService_GetPeerSyncStatesStreamServer = grpc.ServerStreamingServer[PeerState] +func _BackrestSyncStateService_SetRemoteClientConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetRemoteClientConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackrestSyncStateServiceServer).SetRemoteClientConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackrestSyncStateService_SetRemoteClientConfig_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackrestSyncStateServiceServer).SetRemoteClientConfig(ctx, req.(*SetRemoteClientConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + // BackrestSyncStateService_ServiceDesc is the grpc.ServiceDesc for BackrestSyncStateService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) var BackrestSyncStateService_ServiceDesc = grpc.ServiceDesc{ ServiceName: "v1sync.BackrestSyncStateService", HandlerType: (*BackrestSyncStateServiceServer)(nil), - Methods: []grpc.MethodDesc{}, + Methods: []grpc.MethodDesc{ + { + MethodName: "SetRemoteClientConfig", + Handler: _BackrestSyncStateService_SetRemoteClientConfig_Handler, + }, + }, Streams: []grpc.StreamDesc{ { StreamName: "GetPeerSyncStatesStream", diff --git a/gen/go/v1sync/v1syncconnect/syncservice.connect.go b/gen/go/v1sync/v1syncconnect/syncservice.connect.go index e8af57be..4134cbe9 100644 --- a/gen/go/v1sync/v1syncconnect/syncservice.connect.go +++ b/gen/go/v1sync/v1syncconnect/syncservice.connect.go @@ -41,6 +41,9 @@ const ( // BackrestSyncStateServiceGetPeerSyncStatesStreamProcedure is the fully-qualified name of the // BackrestSyncStateService's GetPeerSyncStatesStream RPC. BackrestSyncStateServiceGetPeerSyncStatesStreamProcedure = "/v1sync.BackrestSyncStateService/GetPeerSyncStatesStream" + // BackrestSyncStateServiceSetRemoteClientConfigProcedure is the fully-qualified name of the + // BackrestSyncStateService's SetRemoteClientConfig RPC. + BackrestSyncStateServiceSetRemoteClientConfigProcedure = "/v1sync.BackrestSyncStateService/SetRemoteClientConfig" ) // BackrestSyncServiceClient is a client for the v1sync.BackrestSyncService service. @@ -116,6 +119,8 @@ func (UnimplementedBackrestSyncServiceHandler) Sync(context.Context, *connect.Bi // BackrestSyncStateServiceClient is a client for the v1sync.BackrestSyncStateService service. type BackrestSyncStateServiceClient interface { GetPeerSyncStatesStream(context.Context, *connect.Request[v1sync.SyncStateStreamRequest]) (*connect.ServerStreamForClient[v1sync.PeerState], error) + // SetRemoteClientConfig pushes a config change to a connected authorized client peer. + SetRemoteClientConfig(context.Context, *connect.Request[v1sync.SetRemoteClientConfigRequest]) (*connect.Response[v1sync.SetRemoteClientConfigResponse], error) } // NewBackrestSyncStateServiceClient constructs a client for the v1sync.BackrestSyncStateService @@ -135,12 +140,19 @@ func NewBackrestSyncStateServiceClient(httpClient connect.HTTPClient, baseURL st connect.WithSchema(backrestSyncStateServiceMethods.ByName("GetPeerSyncStatesStream")), connect.WithClientOptions(opts...), ), + setRemoteClientConfig: connect.NewClient[v1sync.SetRemoteClientConfigRequest, v1sync.SetRemoteClientConfigResponse]( + httpClient, + baseURL+BackrestSyncStateServiceSetRemoteClientConfigProcedure, + connect.WithSchema(backrestSyncStateServiceMethods.ByName("SetRemoteClientConfig")), + connect.WithClientOptions(opts...), + ), } } // backrestSyncStateServiceClient implements BackrestSyncStateServiceClient. type backrestSyncStateServiceClient struct { getPeerSyncStatesStream *connect.Client[v1sync.SyncStateStreamRequest, v1sync.PeerState] + setRemoteClientConfig *connect.Client[v1sync.SetRemoteClientConfigRequest, v1sync.SetRemoteClientConfigResponse] } // GetPeerSyncStatesStream calls v1sync.BackrestSyncStateService.GetPeerSyncStatesStream. @@ -148,10 +160,17 @@ func (c *backrestSyncStateServiceClient) GetPeerSyncStatesStream(ctx context.Con return c.getPeerSyncStatesStream.CallServerStream(ctx, req) } +// SetRemoteClientConfig calls v1sync.BackrestSyncStateService.SetRemoteClientConfig. +func (c *backrestSyncStateServiceClient) SetRemoteClientConfig(ctx context.Context, req *connect.Request[v1sync.SetRemoteClientConfigRequest]) (*connect.Response[v1sync.SetRemoteClientConfigResponse], error) { + return c.setRemoteClientConfig.CallUnary(ctx, req) +} + // BackrestSyncStateServiceHandler is an implementation of the v1sync.BackrestSyncStateService // service. type BackrestSyncStateServiceHandler interface { GetPeerSyncStatesStream(context.Context, *connect.Request[v1sync.SyncStateStreamRequest], *connect.ServerStream[v1sync.PeerState]) error + // SetRemoteClientConfig pushes a config change to a connected authorized client peer. + SetRemoteClientConfig(context.Context, *connect.Request[v1sync.SetRemoteClientConfigRequest]) (*connect.Response[v1sync.SetRemoteClientConfigResponse], error) } // NewBackrestSyncStateServiceHandler builds an HTTP handler from the service implementation. It @@ -167,10 +186,18 @@ func NewBackrestSyncStateServiceHandler(svc BackrestSyncStateServiceHandler, opt connect.WithSchema(backrestSyncStateServiceMethods.ByName("GetPeerSyncStatesStream")), connect.WithHandlerOptions(opts...), ) + backrestSyncStateServiceSetRemoteClientConfigHandler := connect.NewUnaryHandler( + BackrestSyncStateServiceSetRemoteClientConfigProcedure, + svc.SetRemoteClientConfig, + connect.WithSchema(backrestSyncStateServiceMethods.ByName("SetRemoteClientConfig")), + connect.WithHandlerOptions(opts...), + ) return "/v1sync.BackrestSyncStateService/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case BackrestSyncStateServiceGetPeerSyncStatesStreamProcedure: backrestSyncStateServiceGetPeerSyncStatesStreamHandler.ServeHTTP(w, r) + case BackrestSyncStateServiceSetRemoteClientConfigProcedure: + backrestSyncStateServiceSetRemoteClientConfigHandler.ServeHTTP(w, r) default: http.NotFound(w, r) } @@ -183,3 +210,7 @@ type UnimplementedBackrestSyncStateServiceHandler struct{} func (UnimplementedBackrestSyncStateServiceHandler) GetPeerSyncStatesStream(context.Context, *connect.Request[v1sync.SyncStateStreamRequest], *connect.ServerStream[v1sync.PeerState]) error { return connect.NewError(connect.CodeUnimplemented, errors.New("v1sync.BackrestSyncStateService.GetPeerSyncStatesStream is not implemented")) } + +func (UnimplementedBackrestSyncStateServiceHandler) SetRemoteClientConfig(context.Context, *connect.Request[v1sync.SetRemoteClientConfigRequest]) (*connect.Response[v1sync.SetRemoteClientConfigResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("v1sync.BackrestSyncStateService.SetRemoteClientConfig is not implemented")) +} diff --git a/internal/api/backresthandler.go b/internal/api/backresthandler.go index 496794f5..21a6f23d 100644 --- a/internal/api/backresthandler.go +++ b/internal/api/backresthandler.go @@ -71,26 +71,17 @@ func (s *BackrestHandler) GetConfig(ctx context.Context, req *connect.Request[em // SetConfig implements POST /v1/config func (s *BackrestHandler) SetConfig(ctx context.Context, req *connect.Request[v1.Config]) (*connect.Response[v1.Config], error) { - existing, err := s.config.Get() - if err != nil { - return nil, fmt.Errorf("failed to check current config: %w", err) - } - - // Compare and increment modno - if existing.Modno != req.Msg.Modno { - return nil, errors.New("config modno mismatch, reload and try again") - } - - // Rehydrate the network sanitized config - rehydratedConfig := config.RehydrateNetworkSanitizedConfig(req.Msg, existing) - - if err := config.ValidateConfig(rehydratedConfig); err != nil { - return nil, fmt.Errorf("validation error: %w", err) - } - - rehydratedConfig.Modno++ - - if err := s.config.Update(rehydratedConfig); err != nil { + if err := s.config.Transform(func(cfg *v1.Config) (*v1.Config, error) { + if cfg.Modno != req.Msg.Modno { + return nil, errors.New("config modno mismatch, reload and try again") + } + rehydrated := config.RehydrateNetworkSanitizedConfig(req.Msg, cfg) + if err := config.ValidateConfig(rehydrated); err != nil { + return nil, fmt.Errorf("validation error: %w", err) + } + rehydrated.Modno++ + return rehydrated, nil + }); err != nil { return nil, fmt.Errorf("failed to update config: %w", err) } @@ -98,7 +89,7 @@ func (s *BackrestHandler) SetConfig(ctx context.Context, req *connect.Request[v1 if err != nil { return nil, fmt.Errorf("failed to get newly set config: %w", err) } - return connect.NewResponse(newConfig), nil + return connect.NewResponse(config.SanitizeForNetwork(newConfig)), nil } func (s *BackrestHandler) CheckRepoExists(ctx context.Context, req *connect.Request[v1.CheckRepoExistsRequest]) (*connect.Response[v1.CheckRepoExistsResponse], error) { @@ -229,26 +220,23 @@ func (s *BackrestHandler) AddRepo(ctx context.Context, req *connect.Request[v1.A s.orchestrator.ScheduleTask(tasks.NewOneoffIndexSnapshotsTask(newRepo, time.Now()), tasks.TaskPriorityInteractive+tasks.TaskPriorityIndexSnapshots) zap.L().Debug("done add repo") - return connect.NewResponse(c), nil + return connect.NewResponse(config.SanitizeForNetwork(c)), nil } func (s *BackrestHandler) RemoveRepo(ctx context.Context, req *connect.Request[types.StringValue]) (*connect.Response[v1.Config], error) { - cfg, err := s.config.Get() - if err != nil { - return nil, fmt.Errorf("failed to get config: %w", err) - } - - // Remove the repo from the configuration - cfg.Repos = slices.DeleteFunc(cfg.Repos, func(r *v1.Repo) bool { - return r.Id == req.Msg.Value - }) - if err := s.config.Update(cfg); err != nil { + var instanceID string + if err := s.config.Transform(func(cfg *v1.Config) (*v1.Config, error) { + instanceID = cfg.Instance + cfg.Repos = slices.DeleteFunc(cfg.Repos, func(r *v1.Repo) bool { + return r.Id == req.Msg.Value + }) + return cfg, nil + }); err != nil { return nil, fmt.Errorf("failed to update config: %w", err) } // Query for all operations for the repo - q := oplog.Query{}. - SetInstanceID(cfg.Instance) + q := oplog.Query{}.SetInstanceID(instanceID) q.DeprecatedRepoID = &req.Msg.Value var opIDs []int64 if err := s.oplog.Query(q, func(op *v1.Operation) error { @@ -260,17 +248,18 @@ func (s *BackrestHandler) RemoveRepo(ctx context.Context, req *connect.Request[t // Delete operations referencing the repo from the oplog in batches for len(opIDs) > 0 { - batchSize := 256 - if batchSize > len(opIDs) { - batchSize = len(opIDs) - } + batchSize := min(256, len(opIDs)) if err := s.oplog.Delete(opIDs[:batchSize]...); err != nil { return nil, fmt.Errorf("failed to delete operations: %w", err) } opIDs = opIDs[batchSize:] } - return connect.NewResponse(cfg), nil + newConfig, err := s.config.Get() + if err != nil { + return nil, fmt.Errorf("failed to get config: %w", err) + } + return connect.NewResponse(config.SanitizeForNetwork(newConfig)), nil } // SetupSftp implements SetupSftp RPC @@ -454,13 +443,13 @@ func (s *BackrestHandler) GetOperationEvents(ctx context.Context, req *connect.R func (s *BackrestHandler) GetOperations(ctx context.Context, req *connect.Request[v1.GetOperationsRequest]) (*connect.Response[v1.OperationList], error) { q, err := protoutil.OpSelectorToQuery(req.Msg.Selector) + if err != nil { + return nil, err + } if req.Msg.LastN != 0 { q.Reversed = true q.Limit = int(req.Msg.LastN) } - if err != nil { - return nil, err - } var ops []*v1.Operation opCollector := func(op *v1.Operation) error { @@ -628,7 +617,9 @@ func (s *BackrestHandler) Restore(ctx context.Context, req *connect.Request[v1.R } at := time.Now() - s.orchestrator.ScheduleTask(tasks.NewOneoffRestoreTask(repo, req.Msg.PlanId, 0 /* flowID */, at, req.Msg.SnapshotId, req.Msg.Path, req.Msg.Target), tasks.TaskPriorityInteractive+tasks.TaskPriorityDefault) + if err := s.orchestrator.ScheduleTask(tasks.NewOneoffRestoreTask(repo, req.Msg.PlanId, 0 /* flowID */, at, req.Msg.SnapshotId, req.Msg.Path, req.Msg.Target), tasks.TaskPriorityInteractive+tasks.TaskPriorityDefault); err != nil { + return nil, fmt.Errorf("failed to schedule restore task: %w", err) + } return connect.NewResponse(&emptypb.Empty{}), nil } @@ -944,6 +935,52 @@ func (s *BackrestHandler) GetSummaryDashboard(ctx context.Context, req *connect. return connect.NewResponse(response), nil } +func (s *BackrestHandler) GeneratePairingToken(ctx context.Context, req *connect.Request[v1.GeneratePairingTokenRequest]) (*connect.Response[v1.GeneratePairingTokenResponse], error) { + // Generate the one-time secret before taking the lock. + secret, err := cryptoutil.GeneratePairingSecret() + if err != nil { + return nil, fmt.Errorf("failed to generate pairing secret: %w", err) + } + + now := time.Now().Unix() + var expiresAt int64 + if req.Msg.TtlSeconds > 0 { + expiresAt = now + req.Msg.TtlSeconds + } + + var tokenStr string + if err := s.config.Transform(func(cfg *v1.Config) (*v1.Config, error) { + if cfg.Instance == "" { + return nil, connect.NewError(connect.CodeFailedPrecondition, errors.New("instance name must be set before generating pairing tokens")) + } + + identity := cfg.GetMultihost().GetIdentity() + if identity == nil { + return nil, connect.NewError(connect.CodeFailedPrecondition, errors.New("multihost identity must be configured before generating pairing tokens")) + } + + cfg.Multihost.PairingTokens = append(cfg.Multihost.PairingTokens, &v1.Multihost_PairingToken{ + Secret: secret, + Label: req.Msg.Label, + CreatedAtUnix: now, + ExpiresAtUnix: expiresAt, + MaxUses: req.Msg.MaxUses, + Uses: 0, + Permissions: req.Msg.Permissions, + }) + cfg.Modno++ + + tokenStr = cryptoutil.FormatPairingToken(identity.Keyid, secret, cfg.Instance) + return cfg, nil + }); err != nil { + return nil, fmt.Errorf("failed to save pairing token: %w", err) + } + + return connect.NewResponse(&v1.GeneratePairingTokenResponse{ + Token: tokenStr, + }), nil +} + func sanitizeRepoFlags(repo *v1.Repo) { for i, flag := range repo.Flags { if strings.HasPrefix(flag, "--option=sftp.args=") { diff --git a/internal/api/syncapi/authmiddleware.go b/internal/api/syncapi/authmiddleware.go deleted file mode 100644 index 2070297d..00000000 --- a/internal/api/syncapi/authmiddleware.go +++ /dev/null @@ -1,245 +0,0 @@ -package syncapi - -import ( - "context" - "encoding/base64" - "encoding/binary" - "errors" - "fmt" - "net/http" - "slices" - "time" - - "connectrpc.com/connect" - v1 "github.com/garethgeorge/backrest/gen/go/v1" - "github.com/garethgeorge/backrest/gen/go/v1sync" - "github.com/garethgeorge/backrest/internal/config" - "github.com/garethgeorge/backrest/internal/cryptoutil" - "google.golang.org/protobuf/proto" -) - -var authTokenHeader = "Authorization" -var maxSignatureAge = 5 * time.Minute // Maximum age of a signature before it is considered invalid - -type peerContextKey string - -const PeerContextKey peerContextKey = "peer" - -func ContextWithPeer(ctx context.Context, peer *v1.Multihost_Peer) context.Context { - return context.WithValue(ctx, PeerContextKey, peer) -} - -func PeerFromContext(ctx context.Context) *v1.Multihost_Peer { - peer, ok := ctx.Value(PeerContextKey).(*v1.Multihost_Peer) - if !ok { - return nil - } - return peer -} - -func newAuthHandler(config *config.ConfigManager, next http.Handler) http.Handler { - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - config, err := config.Get() - if err != nil { - http.Error(rw, "internal error", http.StatusInternalServerError) - return - } - - authHeaderValue, err := createAuthHeader(config) - if err != nil { - http.Error(rw, fmt.Sprintf("internal error: %v", err), http.StatusInternalServerError) - return - } - rw.Header().Set(authTokenHeader, authHeaderValue) - - peer, err := decodeAndVerifyAuthHeader(r, config.Instance, config.GetMultihost().GetAuthorizedClients()) - if err != nil { - http.Error(rw, fmt.Sprintf("unauthorized: %v", err), http.StatusUnauthorized) - return - } - - next.ServeHTTP(rw, r.WithContext(context.WithValue(r.Context(), PeerContextKey, peer))) - }) -} - -func createAuthHeader(config *v1.Config) (string, error) { - if config == nil || config.GetMultihost().GetIdentity() == nil { - return "", errors.New("config missing multihost.identity") - } - - privKey, err := cryptoutil.NewPrivateKey(config.GetMultihost().GetIdentity()) - if err != nil { - return "", fmt.Errorf("load private key: %w", err) - } - - signedMessage, err := createSignedMessage([]byte(config.Instance), privKey) - if err != nil { - return "", fmt.Errorf("create signed message: %w", err) - } - - authToken := &v1sync.AuthorizationToken{ - InstanceId: signedMessage, - PublicKey: privKey.PublicKeyProto(), - } - - tokenBytes, err := proto.Marshal(authToken) - if err != nil { - return "", fmt.Errorf("marshal auth token: %w", err) - } - - return base64.StdEncoding.EncodeToString(tokenBytes), nil -} - -type authHeaderClient struct { - configManager *config.ConfigManager - delegate connect.HTTPClient - wantPeer *v1.Multihost_Peer -} - -func (c *authHeaderClient) Do(req *http.Request) (*http.Response, error) { - // create the header - cfg, err := c.configManager.Get() - if err != nil { - return nil, fmt.Errorf("get config: %w", err) - } - authHeaderValue, err := createAuthHeader(cfg) - if err != nil { - return nil, fmt.Errorf("create auth header: %w", err) - } - req.Header.Set(authTokenHeader, authHeaderValue) - - resp, err := c.delegate.Do(req) - // verify the response header - if err != nil { - return nil, fmt.Errorf("HTTP request failed: %w", err) - } - if resp.StatusCode != http.StatusOK { - return resp, fmt.Errorf("HTTP request failed with status %d: %s", resp.StatusCode, resp.Status) - } - peer, err := decodeAndVerifyAuthHeader(req, cfg.Instance, cfg.GetMultihost().GetAuthorizedClients()) - if err != nil { - return resp, fmt.Errorf("verify auth header: %w", err) - } - - // Check the peer matches the expected one. - if c.wantPeer == nil || c.wantPeer.GetInstanceId() != peer.GetInstanceId() { - return resp, fmt.Errorf("peer instance ID mismatch: expected %s, got %s", c.wantPeer.GetInstanceId(), peer.GetInstanceId()) - } - if c.wantPeer.GetKeyid() != peer.GetKeyid() { - return resp, fmt.Errorf("peer key ID mismatch: expected %s, got %s", c.wantPeer.GetKeyid(), peer.GetKeyid()) - } - return resp, nil -} - -func newHTTPClientWithConfig(cfg *config.ConfigManager, delegate connect.HTTPClient) (connect.HTTPClient, error) { - return &authHeaderClient{ - configManager: cfg, - delegate: delegate, - }, nil -} - -func decodeAndVerifyAuthHeader(r *http.Request, localInstanceID string, peers []*v1.Multihost_Peer) (*v1.Multihost_Peer, error) { - authHeader := r.Header.Get(authTokenHeader) - if len(authHeader) == 0 { - return nil, errors.New("missing authorization header") - } - - // Decode the auth token from the header - tokenBytes, err := base64.StdEncoding.DecodeString(authHeader) - if err != nil { - return nil, errors.New("invalid authorization header format") - } - - var token v1sync.AuthorizationToken - if err := proto.Unmarshal(tokenBytes, &token); err != nil { - return nil, fmt.Errorf("unmarshal authorization token: %w", err) - } - - // Load the public key from the token - publicKey, err := cryptoutil.NewPublicKey(token.GetPublicKey()) - if err != nil { - return nil, fmt.Errorf("load public key: %w", err) - } - if publicKey.KeyID() != token.InstanceId.GetKeyid() { - return nil, fmt.Errorf("instance ID must be signed with public key in token: expected %s, got %s", token.InstanceId.GetKeyid(), publicKey.KeyID()) - } - - // Verify the signed message - if err := verifySignedMessage(token.GetInstanceId(), publicKey); err != nil { - return nil, fmt.Errorf("verify signed message: %w", err) - } - - // Now that we've validated that the peer was able to sign the message, we can look it up in the config - peerIdx := slices.IndexFunc(peers, func(peer *v1.Multihost_Peer) bool { - return peer.Keyid == publicKey.KeyID() - }) - if peerIdx == -1 { - return nil, fmt.Errorf("peer with key ID %s not found in authorized clients", publicKey.KeyID()) - } - - // Finally check that the instance ID in the token matches the one in the config - peer := peers[peerIdx] - tokenInstanceID := string(token.GetInstanceId().GetPayload()) - if peer.InstanceId != tokenInstanceID { - return nil, fmt.Errorf("instance ID mismatch: expected %s, got %s", peer.InstanceId, tokenInstanceID) - } - - return peer, nil -} - -func createSignedMessage(payload []byte, identity *cryptoutil.PrivateKey) (*v1.SignedMessage, error) { - if len(payload) == 0 { - return nil, errors.New("payload must not be empty") - } - - timestampMillis := time.Now().UnixMilli() - - payloadWithTimestamp := make([]byte, 0, len(payload)+8) - binary.BigEndian.AppendUint64(payloadWithTimestamp, uint64(timestampMillis)) - payloadWithTimestamp = append(payloadWithTimestamp, payload...) - - signature, err := identity.Sign(payloadWithTimestamp) - if err != nil { - return nil, fmt.Errorf("signing payload: %w", err) - } - - return &v1.SignedMessage{ - Payload: payload, - Signature: signature, - Keyid: identity.KeyID(), - TimestampMillis: timestampMillis, - }, nil -} - -func verifySignedMessage(msg *v1.SignedMessage, publicKey *cryptoutil.PublicKey) error { - if msg == nil { - return errors.New("signed message must not be nil") - } - if len(msg.GetPayload()) == 0 { - return errors.New("signed message payload must not be empty") - } - if len(msg.GetSignature()) == 0 { - return errors.New("signed message signature must not be empty") - } - if len(msg.GetKeyid()) == 0 { - return errors.New("signed message key ID must not be empty") - } - - if publicKey.KeyID() != msg.GetKeyid() { - return fmt.Errorf("public key ID mismatch: expected %s, got %s", publicKey.KeyID(), msg.GetKeyid()) - } - - payloadWithTimestamp := make([]byte, 0, len(msg.GetPayload())+8) - binary.BigEndian.AppendUint64(payloadWithTimestamp, uint64(msg.GetTimestampMillis())) - payloadWithTimestamp = append(payloadWithTimestamp, msg.GetPayload()...) - - if err := publicKey.Verify(payloadWithTimestamp, msg.GetSignature()); err != nil { - return fmt.Errorf("verifying signed message: %w", err) - } - - if time.Since(time.UnixMilli(msg.GetTimestampMillis())) > maxSignatureAge { - return fmt.Errorf("signature is too old, max age is %s. Is the clock out of sync?", maxSignatureAge) - } - - return nil -} diff --git a/internal/api/syncapi/authmiddleware_test.go b/internal/api/syncapi/authmiddleware_test.go deleted file mode 100644 index 18151198..00000000 --- a/internal/api/syncapi/authmiddleware_test.go +++ /dev/null @@ -1,200 +0,0 @@ -package syncapi - -import ( - "encoding/base64" - "fmt" - "net/http" - "net/http/httptest" - "testing" - "time" - - v1 "github.com/garethgeorge/backrest/gen/go/v1" - "github.com/garethgeorge/backrest/gen/go/v1sync" - "github.com/garethgeorge/backrest/internal/config" - "github.com/garethgeorge/backrest/internal/config/migrations" - "github.com/garethgeorge/backrest/internal/cryptoutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" -) - -func TestAuthMiddleware(t *testing.T) { - serverPrivKey, err := cryptoutil.GeneratePrivateKey() - require.NoError(t, err) - - clientPrivKey, err := cryptoutil.GeneratePrivateKey() - require.NoError(t, err) - - // Create a mock config manager - cfgManager := &config.ConfigManager{ - Store: &config.MemoryStore{ - Config: &v1.Config{ - Version: migrations.CurrentVersion, - Instance: "test-instance", - Multihost: &v1.Multihost{ - Identity: serverPrivKey, - AuthorizedClients: []*v1.Multihost_Peer{ - { - InstanceId: "client-instance", - Keyid: clientPrivKey.Keyid, - }, - }, - }, - }, - }, - } - - // Create a mock handler - mockHandler := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - peer := PeerFromContext(r.Context()) - require.NotNil(t, peer) - assert.Equal(t, "client-instance", peer.InstanceId) - rw.WriteHeader(http.StatusOK) - }) - - // Create the auth handler - authHandler := newAuthHandler(cfgManager, mockHandler) - - // Create a test server - server := httptest.NewServer(authHandler) - defer server.Close() - - t.Run("valid auth header", func(t *testing.T) { - // Create a request with a valid auth header - req, err := http.NewRequest("GET", server.URL, nil) - require.NoError(t, err) - - // Create a valid auth header - clientCfg := &v1.Config{ - Instance: "client-instance", - Multihost: &v1.Multihost{ - Identity: clientPrivKey, - }, - } - authHeader, err := createAuthHeader(clientCfg) - require.NoError(t, err) - req.Header.Set(authTokenHeader, authHeader) - - // Make the request - resp, err := http.DefaultClient.Do(req) - require.NoError(t, err) - defer resp.Body.Close() - - // Check the response - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) - - t.Run("missing auth header", func(t *testing.T) { - req, err := http.NewRequest("GET", server.URL, nil) - require.NoError(t, err) - - resp, err := http.DefaultClient.Do(req) - require.NoError(t, err) - defer resp.Body.Close() - - assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) - }) - - t.Run("invalid auth header", func(t *testing.T) { - req, err := http.NewRequest("GET", server.URL, nil) - require.NoError(t, err) - req.Header.Set(authTokenHeader, "invalid") - - resp, err := http.DefaultClient.Do(req) - require.NoError(t, err) - defer resp.Body.Close() - - assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) - }) - - t.Run("unauthorized peer", func(t *testing.T) { - unauthorizedPrivKey, err := cryptoutil.GeneratePrivateKey() - require.NoError(t, err) - - req, err := http.NewRequest("GET", server.URL, nil) - require.NoError(t, err) - - clientCfg := &v1.Config{ - Instance: "unauthorized-instance", - Multihost: &v1.Multihost{ - Identity: unauthorizedPrivKey, - }, - } - authHeader, err := createAuthHeader(clientCfg) - require.NoError(t, err) - req.Header.Set(authTokenHeader, authHeader) - - resp, err := http.DefaultClient.Do(req) - require.NoError(t, err) - defer resp.Body.Close() - - assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) - }) - - t.Run("instance id mismatch", func(t *testing.T) { - req, err := http.NewRequest("GET", server.URL, nil) - require.NoError(t, err) - - clientCfg := &v1.Config{ - Instance: "wrong-instance", - Multihost: &v1.Multihost{ - Identity: clientPrivKey, - }, - } - authHeader, err := createAuthHeader(clientCfg) - require.NoError(t, err) - req.Header.Set(authTokenHeader, authHeader) - - resp, err := http.DefaultClient.Do(req) - require.NoError(t, err) - defer resp.Body.Close() - - assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) - }) - - t.Run("signature too old", func(t *testing.T) { - req, err := http.NewRequest("GET", server.URL, nil) - require.NoError(t, err) - - clientCfg := &v1.Config{ - Instance: "client-instance", - Multihost: &v1.Multihost{ - Identity: clientPrivKey, - }, - } - - privKey, err := cryptoutil.NewPrivateKey(clientPrivKey) - require.NoError(t, err) - - // create a signed message with an old timestamp - signedMessage, err := createSignedMessage([]byte(clientCfg.Instance), privKey) - require.NoError(t, err) - signedMessage.TimestampMillis = time.Now().Add(-2 * maxSignatureAge).UnixMilli() - - // create the auth token - authToken, err := createAuthToken(signedMessage, privKey) - require.NoError(t, err) - - req.Header.Set(authTokenHeader, authToken) - - resp, err := http.DefaultClient.Do(req) - require.NoError(t, err) - defer resp.Body.Close() - - assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) - }) -} - -func createAuthToken(signedMessage *v1.SignedMessage, privKey *cryptoutil.PrivateKey) (string, error) { - authToken := &v1sync.AuthorizationToken{ - InstanceId: signedMessage, - PublicKey: privKey.PublicKeyProto(), - } - - tokenBytes, err := proto.Marshal(authToken) - if err != nil { - return "", fmt.Errorf("marshal auth token: %w", err) - } - - return base64.StdEncoding.EncodeToString(tokenBytes), nil -} diff --git a/internal/api/syncapi/cmdstreamutil.go b/internal/api/syncapi/cmdstreamutil.go index c4f52340..3db793cb 100644 --- a/internal/api/syncapi/cmdstreamutil.go +++ b/internal/api/syncapi/cmdstreamutil.go @@ -27,7 +27,7 @@ type bidiSyncCommandStream struct { func newBidiSyncCommandStream() *bidiSyncCommandStream { return &bidiSyncCommandStream{ - sendChan: make(chan *v1sync.SyncStreamItem, 64), // Buffered channel to allow sending items without blocking + sendChan: make(chan *v1sync.SyncStreamItem, 256), // Buffered channel to allow sending items without blocking recvChan: make(chan *v1sync.SyncStreamItem, 1), terminateWithErrChan: make(chan error, 1), } @@ -74,15 +74,19 @@ func (s *bidiSyncCommandStream) ConnectStream(ctx context.Context, stream syncCo ctx, cancel := context.WithCancel(ctx) defer cancel() go func() { - for ctx.Err() == nil { - if val, err := stream.Receive(); err != nil { + defer close(s.recvChan) + for { + val, err := stream.Receive() + if err != nil { s.SendErrorAndTerminate(NewSyncErrorDisconnected(fmt.Errorf("receiving item: %w", err))) - break - } else { - s.recvChan <- val + return + } + select { + case s.recvChan <- val: + case <-ctx.Done(): + return } } - close(s.recvChan) }() for { diff --git a/internal/api/syncapi/errors.go b/internal/api/syncapi/errors.go index 85530b3d..13e2ea80 100644 --- a/internal/api/syncapi/errors.go +++ b/internal/api/syncapi/errors.go @@ -28,34 +28,6 @@ func NewSyncErrorDisconnected(message error) *SyncError { } } -func NewSyncErrorUnknown(message error) *SyncError { - return &SyncError{ - State: v1sync.ConnectionState_CONNECTION_STATE_UNKNOWN, - Message: message, - } -} - -func NewSyncErrorPending(message error) *SyncError { - return &SyncError{ - State: v1sync.ConnectionState_CONNECTION_STATE_PENDING, - Message: message, - } -} - -func NewSyncErrorConnected(message error) *SyncError { - return &SyncError{ - State: v1sync.ConnectionState_CONNECTION_STATE_CONNECTED, - Message: message, - } -} - -func NewSyncErrorRetryWait(message error) *SyncError { - return &SyncError{ - State: v1sync.ConnectionState_CONNECTION_STATE_RETRY_WAIT, - Message: message, - } -} - func NewSyncErrorAuth(message error) *SyncError { return &SyncError{ State: v1sync.ConnectionState_CONNECTION_STATE_ERROR_AUTH, diff --git a/internal/api/syncapi/pairing_test.go b/internal/api/syncapi/pairing_test.go new file mode 100644 index 00000000..6cb197e1 --- /dev/null +++ b/internal/api/syncapi/pairing_test.go @@ -0,0 +1,416 @@ +package syncapi + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + v1 "github.com/garethgeorge/backrest/gen/go/v1" + "github.com/garethgeorge/backrest/gen/go/v1sync" + "github.com/garethgeorge/backrest/internal/config/migrations" + "github.com/garethgeorge/backrest/internal/cryptoutil" + "github.com/garethgeorge/backrest/internal/testutil" +) + +func TestValidatePairingSecret(t *testing.T) { + now := time.Unix(1000, 0) + + tokens := []*v1.Multihost_PairingToken{ + { + Secret: "valid-secret", + Label: "test-token", + CreatedAtUnix: 900, + ExpiresAtUnix: 2000, + MaxUses: 3, + Uses: 1, + }, + { + Secret: "unlimited-token", + Label: "unlimited", + CreatedAtUnix: 900, + ExpiresAtUnix: 0, // no expiry + MaxUses: 0, // unlimited uses + Uses: 100, + }, + } + + tests := []struct { + name string + secret string + tokens []*v1.Multihost_PairingToken + now time.Time + wantLabel string + wantErr bool + }{ + { + name: "valid secret", + secret: "valid-secret", + tokens: tokens, + now: now, + wantLabel: "test-token", + }, + { + name: "unlimited token", + secret: "unlimited-token", + tokens: tokens, + now: now, + wantLabel: "unlimited", + }, + { + name: "empty secret", + secret: "", + tokens: tokens, + now: now, + wantErr: true, + }, + { + name: "wrong secret", + secret: "wrong-secret", + tokens: tokens, + now: now, + wantErr: true, + }, + { + name: "expired token", + secret: "valid-secret", + tokens: []*v1.Multihost_PairingToken{ + { + Secret: "valid-secret", + Label: "expired", + ExpiresAtUnix: 500, + MaxUses: 0, + }, + }, + now: now, + wantErr: true, + }, + { + name: "max uses reached", + secret: "valid-secret", + tokens: []*v1.Multihost_PairingToken{ + { + Secret: "valid-secret", + Label: "exhausted", + MaxUses: 2, + Uses: 2, + }, + }, + now: now, + wantErr: true, + }, + { + name: "nil tokens", + secret: "anything", + tokens: nil, + now: now, + wantErr: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + token, err := ValidatePairingSecret(tc.secret, tc.tokens, tc.now) + if tc.wantErr { + if err == nil { + t.Fatal("expected error, got nil") + } + return + } + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if token.Label != tc.wantLabel { + t.Errorf("label = %q, want %q", token.Label, tc.wantLabel) + } + }) + } +} + +func TestPairingTokenFlow(t *testing.T) { + testutil.InstallZapLogger(t) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + peerHostAddr := testutil.AllocOpenBindAddr(t) + peerClientAddr := testutil.AllocOpenBindAddr(t) + + pairingSecret, err := cryptoutil.GeneratePairingSecret() + if err != nil { + t.Fatalf("failed to generate pairing secret: %v", err) + } + + // Host has a pairing token but NO authorized clients yet. + peerHostConfig := &v1.Config{ + Version: migrations.CurrentVersion, + Instance: defaultHostID, + Repos: []*v1.Repo{}, + Multihost: &v1.Multihost{ + Identity: identity1, + AuthorizedClients: []*v1.Multihost_Peer{}, // empty — client not pre-authorized + PairingTokens: []*v1.Multihost_PairingToken{ + { + Secret: pairingSecret, + Label: "test-pairing", + CreatedAtUnix: time.Now().Unix(), + ExpiresAtUnix: time.Now().Add(1 * time.Hour).Unix(), + MaxUses: 1, + Uses: 0, + }, + }, + }, + } + + // Client knows about the host and has the pairing secret. + peerClientConfig := &v1.Config{ + Version: migrations.CurrentVersion, + Instance: defaultClientID, + Repos: []*v1.Repo{}, + Multihost: &v1.Multihost{ + Identity: identity2, + KnownHosts: []*v1.Multihost_Peer{ + { + Keyid: identity1.Keyid, + InstanceId: defaultHostID, + InstanceUrl: fmt.Sprintf("http://%s", peerHostAddr), + InitialPairingSecret: pairingSecret, + }, + }, + }, + } + + peerHost := newPeerUnderTest(t, peerHostConfig) + peerClient := newPeerUnderTest(t, peerClientConfig) + + startRunningSyncAPI(t, peerHost, peerHostAddr) + startRunningSyncAPI(t, peerClient, peerClientAddr) + + // The client should successfully connect via the pairing token. + tryConnect(t, ctx, peerClient, peerClientConfig.Multihost.KnownHosts[0]) + + // Verify the host now has the client in its authorized_clients. + hostConfig, err := peerHost.configMgr.Get() + if err != nil { + t.Fatalf("failed to get host config: %v", err) + } + if len(hostConfig.Multihost.AuthorizedClients) != 1 { + t.Fatalf("expected 1 authorized client, got %d", len(hostConfig.Multihost.AuthorizedClients)) + } + ac := hostConfig.Multihost.AuthorizedClients[0] + if ac.Keyid != identity2.Keyid { + t.Errorf("authorized client keyid = %q, want %q", ac.Keyid, identity2.Keyid) + } + if ac.InstanceId != defaultClientID { + t.Errorf("authorized client instance id = %q, want %q", ac.InstanceId, defaultClientID) + } + + // Verify the pairing token was consumed (max_uses=1, so it should be removed). + if len(hostConfig.Multihost.PairingTokens) != 0 { + t.Errorf("expected 0 pairing tokens after consumption, got %d", len(hostConfig.Multihost.PairingTokens)) + } +} + +func TestPairingTokenExpiredRejected(t *testing.T) { + testutil.InstallZapLogger(t) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + peerHostAddr := testutil.AllocOpenBindAddr(t) + peerClientAddr := testutil.AllocOpenBindAddr(t) + + pairingSecret, _ := cryptoutil.GeneratePairingSecret() + + // Host has an expired pairing token. + peerHostConfig := &v1.Config{ + Version: migrations.CurrentVersion, + Instance: defaultHostID, + Repos: []*v1.Repo{}, + Multihost: &v1.Multihost{ + Identity: identity1, + AuthorizedClients: []*v1.Multihost_Peer{}, + PairingTokens: []*v1.Multihost_PairingToken{ + { + Secret: pairingSecret, + Label: "expired-token", + CreatedAtUnix: time.Now().Add(-2 * time.Hour).Unix(), + ExpiresAtUnix: time.Now().Add(-1 * time.Hour).Unix(), // expired 1 hour ago + MaxUses: 0, + }, + }, + }, + } + + peerClientConfig := &v1.Config{ + Version: migrations.CurrentVersion, + Instance: defaultClientID, + Repos: []*v1.Repo{}, + Multihost: &v1.Multihost{ + Identity: identity2, + KnownHosts: []*v1.Multihost_Peer{ + { + Keyid: identity1.Keyid, + InstanceId: defaultHostID, + InstanceUrl: fmt.Sprintf("http://%s", peerHostAddr), + InitialPairingSecret: pairingSecret, + }, + }, + }, + } + + peerHost := newPeerUnderTest(t, peerHostConfig) + peerClient := newPeerUnderTest(t, peerClientConfig) + + startRunningSyncAPI(t, peerHost, peerHostAddr) + startRunningSyncAPI(t, peerClient, peerClientAddr) + + // Connection should fail with auth error. + waitForConnectionState(t, ctx, peerClient, peerClientConfig.Multihost.KnownHosts[0], v1sync.ConnectionState_CONNECTION_STATE_ERROR_AUTH) + + // Host should still have no authorized clients. + hostConfig, _ := peerHost.configMgr.Get() + if len(hostConfig.Multihost.AuthorizedClients) != 0 { + t.Errorf("expected 0 authorized clients, got %d", len(hostConfig.Multihost.AuthorizedClients)) + } +} + +func TestPairingTokenMaxUsesEnforced(t *testing.T) { + testutil.InstallZapLogger(t) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + peerHostAddr := testutil.AllocOpenBindAddr(t) + + pairingSecret, _ := cryptoutil.GeneratePairingSecret() + + identity3, _ := cryptoutil.GeneratePrivateKey() + + // Host has a pairing token with max_uses=1. + peerHostConfig := &v1.Config{ + Version: migrations.CurrentVersion, + Instance: defaultHostID, + Repos: []*v1.Repo{}, + Multihost: &v1.Multihost{ + Identity: identity1, + AuthorizedClients: []*v1.Multihost_Peer{}, + PairingTokens: []*v1.Multihost_PairingToken{ + { + Secret: pairingSecret, + Label: "single-use", + CreatedAtUnix: time.Now().Unix(), + ExpiresAtUnix: time.Now().Add(1 * time.Hour).Unix(), + MaxUses: 1, + Uses: 0, + }, + }, + }, + } + + // First client pairs successfully. + peerClient1Config := &v1.Config{ + Version: migrations.CurrentVersion, + Instance: "client-1", + Repos: []*v1.Repo{}, + Multihost: &v1.Multihost{ + Identity: identity2, + KnownHosts: []*v1.Multihost_Peer{ + { + Keyid: identity1.Keyid, + InstanceId: defaultHostID, + InstanceUrl: fmt.Sprintf("http://%s", peerHostAddr), + InitialPairingSecret: pairingSecret, + }, + }, + }, + } + + peerHost := newPeerUnderTest(t, peerHostConfig) + peerClient1 := newPeerUnderTest(t, peerClient1Config) + + var wg sync.WaitGroup + syncCtx, cancelSync := context.WithCancel(ctx) + + wg.Add(2) + go func() { defer wg.Done(); runSyncAPIWithCtx(syncCtx, peerHost, peerHostAddr) }() + go func() { + defer wg.Done() + peerClient1Addr := testutil.AllocOpenBindAddr(t) + runSyncAPIWithCtx(syncCtx, peerClient1, peerClient1Addr) + }() + + tryConnect(t, ctx, peerClient1, peerClient1Config.Multihost.KnownHosts[0]) + + // Stop first client, start second client with same pairing secret. + cancelSync() + wg.Wait() + + peerClient2Config := &v1.Config{ + Version: migrations.CurrentVersion, + Instance: "client-2", + Repos: []*v1.Repo{}, + Multihost: &v1.Multihost{ + Identity: identity3, + KnownHosts: []*v1.Multihost_Peer{ + { + Keyid: identity1.Keyid, + InstanceId: defaultHostID, + InstanceUrl: fmt.Sprintf("http://%s", peerHostAddr), + InitialPairingSecret: pairingSecret, + }, + }, + }, + } + + peerClient2 := newPeerUnderTest(t, peerClient2Config) + + startRunningSyncAPI(t, peerHost, peerHostAddr) + startRunningSyncAPI(t, peerClient2, testutil.AllocOpenBindAddr(t)) + + // Second client should fail — token is consumed. + waitForConnectionState(t, ctx, peerClient2, peerClient2Config.Multihost.KnownHosts[0], v1sync.ConnectionState_CONNECTION_STATE_ERROR_AUTH) +} + +func TestNoPairingSecretRejected(t *testing.T) { + testutil.InstallZapLogger(t) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + peerHostAddr := testutil.AllocOpenBindAddr(t) + peerClientAddr := testutil.AllocOpenBindAddr(t) + + // Host has NO pairing tokens and NO authorized clients. + peerHostConfig := &v1.Config{ + Version: migrations.CurrentVersion, + Instance: defaultHostID, + Repos: []*v1.Repo{}, + Multihost: &v1.Multihost{ + Identity: identity1, + AuthorizedClients: []*v1.Multihost_Peer{}, + }, + } + + // Client tries to connect without any pairing secret. + peerClientConfig := &v1.Config{ + Version: migrations.CurrentVersion, + Instance: defaultClientID, + Repos: []*v1.Repo{}, + Multihost: &v1.Multihost{ + Identity: identity2, + KnownHosts: []*v1.Multihost_Peer{ + { + Keyid: identity1.Keyid, + InstanceId: defaultHostID, + InstanceUrl: fmt.Sprintf("http://%s", peerHostAddr), + }, + }, + }, + } + + peerHost := newPeerUnderTest(t, peerHostConfig) + peerClient := newPeerUnderTest(t, peerClientConfig) + + startRunningSyncAPI(t, peerHost, peerHostAddr) + startRunningSyncAPI(t, peerClient, peerClientAddr) + + waitForConnectionState(t, ctx, peerClient, peerClientConfig.Multihost.KnownHosts[0], v1sync.ConnectionState_CONNECTION_STATE_ERROR_AUTH) +} diff --git a/internal/api/syncapi/permissions/groups.go b/internal/api/syncapi/permissions/groups.go index 4f245dd2..5f708e2a 100644 --- a/internal/api/syncapi/permissions/groups.go +++ b/internal/api/syncapi/permissions/groups.go @@ -23,4 +23,8 @@ var ( PermsCanViewOperations = []v1.Multihost_Permission_Type{ v1.Multihost_Permission_PERMISSION_READ_OPERATIONS, } + + PermsCanReceiveSharedRepos = []v1.Multihost_Permission_Type{ + v1.Multihost_Permission_PERMISSION_RECEIVE_SHARED_REPOS, + } ) diff --git a/internal/api/syncapi/permissions/permissions.go b/internal/api/syncapi/permissions/permissions.go index 1c56a67a..e58990c2 100644 --- a/internal/api/syncapi/permissions/permissions.go +++ b/internal/api/syncapi/permissions/permissions.go @@ -96,6 +96,9 @@ type PermissionSet struct { // immutable after construction perms map[v1.Multihost_Permission_Type]ScopeSet + // scopelessPerms tracks permission types that were granted without scopes (e.g. PERMISSION_RECEIVE_SHARED_REPOS) + scopelessPerms map[v1.Multihost_Permission_Type]bool + // caches store computed permission checks per scope id and permission type // cache is best-effort and not bounded; PermissionSet is expected to be short-lived (per connection/request) mu sync.RWMutex @@ -105,13 +108,15 @@ type PermissionSet struct { func NewPermissionSet(perms []*v1.Multihost_Permission) (*PermissionSet, error) { permSet := &PermissionSet{ - perms: make(map[v1.Multihost_Permission_Type]ScopeSet), - planCache: make(map[string]map[v1.Multihost_Permission_Type]bool), - repoCache: make(map[string]map[v1.Multihost_Permission_Type]bool), + perms: make(map[v1.Multihost_Permission_Type]ScopeSet), + scopelessPerms: make(map[v1.Multihost_Permission_Type]bool), + planCache: make(map[string]map[v1.Multihost_Permission_Type]bool), + repoCache: make(map[string]map[v1.Multihost_Permission_Type]bool), } for _, perm := range perms { if perm.Scopes == nil { + permSet.scopelessPerms[perm.Type] = true continue } scopeSet, err := NewScopeSet(perm.Scopes) @@ -124,6 +129,20 @@ func NewPermissionSet(perms []*v1.Multihost_Permission) (*PermissionSet, error) return permSet, nil } +// HasPermissionType checks if any of the given permission types are granted, regardless of scopes. +// Use this for scope-less permissions like PERMISSION_RECEIVE_SHARED_REPOS. +func (p *PermissionSet) HasPermissionType(permTypes ...v1.Multihost_Permission_Type) bool { + for _, permType := range permTypes { + if _, ok := p.scopelessPerms[permType]; ok { + return true + } + if _, ok := p.perms[permType]; ok { + return true + } + } + return false +} + func (p *PermissionSet) CheckPermissionForPlan(planID string, permType ...v1.Multihost_Permission_Type) bool { for _, pt := range permType { if p.checkPlanSingle(planID, pt) { diff --git a/internal/api/syncapi/signing.go b/internal/api/syncapi/signing.go new file mode 100644 index 00000000..e149618a --- /dev/null +++ b/internal/api/syncapi/signing.go @@ -0,0 +1,70 @@ +package syncapi + +import ( + "encoding/binary" + "errors" + "fmt" + "time" + + v1 "github.com/garethgeorge/backrest/gen/go/v1" + "github.com/garethgeorge/backrest/internal/cryptoutil" +) + +const maxSignatureAge = 5 * time.Minute + +func createSignedMessage(payload []byte, identity *cryptoutil.PrivateKey) (*v1.SignedMessage, error) { + if len(payload) == 0 { + return nil, errors.New("payload must not be empty") + } + + timestampMillis := time.Now().UnixMilli() + + payloadWithTimestamp := make([]byte, 0, len(payload)+8) + payloadWithTimestamp = binary.BigEndian.AppendUint64(payloadWithTimestamp, uint64(timestampMillis)) + payloadWithTimestamp = append(payloadWithTimestamp, payload...) + + signature, err := identity.Sign(payloadWithTimestamp) + if err != nil { + return nil, fmt.Errorf("signing payload: %w", err) + } + + return &v1.SignedMessage{ + Payload: payload, + Signature: signature, + Keyid: identity.KeyID(), + TimestampMillis: timestampMillis, + }, nil +} + +func verifySignedMessage(msg *v1.SignedMessage, publicKey *cryptoutil.PublicKey) error { + if msg == nil { + return errors.New("signed message must not be nil") + } + if len(msg.GetPayload()) == 0 { + return errors.New("signed message payload must not be empty") + } + if len(msg.GetSignature()) == 0 { + return errors.New("signed message signature must not be empty") + } + if len(msg.GetKeyid()) == 0 { + return errors.New("signed message key ID must not be empty") + } + + if publicKey.KeyID() != msg.GetKeyid() { + return fmt.Errorf("public key ID mismatch: expected %s, got %s", publicKey.KeyID(), msg.GetKeyid()) + } + + payloadWithTimestamp := make([]byte, 0, len(msg.GetPayload())+8) + payloadWithTimestamp = binary.BigEndian.AppendUint64(payloadWithTimestamp, uint64(msg.GetTimestampMillis())) + payloadWithTimestamp = append(payloadWithTimestamp, msg.GetPayload()...) + + if err := publicKey.Verify(payloadWithTimestamp, msg.GetSignature()); err != nil { + return fmt.Errorf("verifying signed message: %w", err) + } + + if time.Since(time.UnixMilli(msg.GetTimestampMillis())) > maxSignatureAge { + return fmt.Errorf("signature is too old, max age is %s. Is the clock out of sync?", maxSignatureAge) + } + + return nil +} diff --git a/internal/api/syncapi/syncapi_test.go b/internal/api/syncapi/syncapi_test.go index e7d2d25e..01ab300f 100644 --- a/internal/api/syncapi/syncapi_test.go +++ b/internal/api/syncapi/syncapi_test.go @@ -88,8 +88,7 @@ func TestConnectionSucceeds(t *testing.T) { AuthorizedClients: []*v1.Multihost_Peer{ { Keyid: identity2.Keyid, - KeyidVerified: true, - InstanceId: defaultClientID, + InstanceId: defaultClientID, }, }, }, @@ -192,8 +191,7 @@ func TestSyncConfigChange(t *testing.T) { AuthorizedClients: []*v1.Multihost_Peer{ { Keyid: identity2.Keyid, - KeyidVerified: true, - InstanceId: defaultClientID, + InstanceId: defaultClientID, Permissions: []*v1.Multihost_Permission{ { Type: v1.Multihost_Permission_PERMISSION_READ_CONFIG, @@ -292,8 +290,7 @@ func TestSimpleOperationSync(t *testing.T) { AuthorizedClients: []*v1.Multihost_Peer{ { Keyid: identity2.Keyid, - KeyidVerified: true, - InstanceId: defaultClientID, + InstanceId: defaultClientID, }, }, }, @@ -419,8 +416,7 @@ func TestSyncMutations(t *testing.T) { AuthorizedClients: []*v1.Multihost_Peer{ { Keyid: identity2.Keyid, - KeyidVerified: true, - InstanceId: defaultClientID, + InstanceId: defaultClientID, }, }, }, diff --git a/internal/api/syncapi/syncclient.go b/internal/api/syncapi/syncclient.go index 83553b8b..e76dbd71 100644 --- a/internal/api/syncapi/syncclient.go +++ b/internal/api/syncapi/syncclient.go @@ -109,37 +109,48 @@ func (c *SyncClient) RunSync(ctx context.Context) { cmdStream, syncSessionHandler, c.syncConfigSnapshot.config.GetMultihost().GetKnownHosts(), + c.peer.GetInitialPairingSecret(), + nil, // client never handles unknown peers ) cmdStream.SendErrorAndTerminate(err) }() - if err := cmdStream.ConnectStream(ctx, c.client.Sync(ctx)); err != nil { - c.l.Sugar().Infof("lost stream connection to peer %q (%s): %v", c.peer.InstanceId, c.peer.Keyid, err) + connectErr := cmdStream.ConnectStream(ctx, c.client.Sync(ctx)) + if connectErr != nil { + c.l.Sugar().Infof("lost stream connection to peer %q (%s): %v", c.peer.InstanceId, c.peer.Keyid, connectErr) var syncErr *SyncError state := c.mgr.peerStateManager.GetPeerState(c.peer.Keyid).Clone() if state == nil { state = newPeerState(c.peer.InstanceId, c.peer.Keyid) } state.LastHeartbeat = time.Now() - if errors.As(err, &syncErr) { + if errors.As(connectErr, &syncErr) { state.ConnectionState = syncErr.State state.ConnectionStateMessage = syncErr.Message.Error() } else { state.ConnectionState = v1sync.ConnectionState_CONNECTION_STATE_ERROR_INTERNAL - state.ConnectionStateMessage = err.Error() + state.ConnectionStateMessage = connectErr.Error() } c.mgr.peerStateManager.SetPeerState(c.peer.Keyid, state) - } else { - c.reconnectAttempts = 0 } wg.Wait() + // Reset reconnect backoff if the session lasted long enough to be considered a real success, + // rather than a handshake that failed immediately. Using reconnectDelay as the threshold means + // any session that ran at least one full retry window counts as stable. + if time.Since(lastConnect) >= c.reconnectDelay { + c.reconnectAttempts = 0 + } + delay := c.reconnectDelay - time.Since(lastConnect) if c.reconnectAttempts > 0 { backoff := time.Duration(1<= 0 && snapshotHosts[khIdx].GetInitialPairingSecret() != "" { + if err := c.mgr.configMgr.Transform(func(cfg *v1.Config) (*v1.Config, error) { + idx := slices.IndexFunc(cfg.GetMultihost().GetKnownHosts(), func(kh *v1.Multihost_Peer) bool { + return kh.GetKeyid() == peer.GetKeyid() + }) + if idx >= 0 { + cfg.GetMultihost().GetKnownHosts()[idx].InitialPairingSecret = "" + } + cfg.Modno++ + return cfg, nil + }); err != nil { + c.l.Sugar().Warnf("failed to clear pairing secret after successful pairing: %v", err) + } else { + c.l.Sugar().Infof("cleared pairing secret for peer %q after successful connection", peer.InstanceId) + } + } + // Send a heartbeat every interval to keep the connection alive. go sendHeartbeats(ctx, stream, env.MultihostHeartbeatInterval()) // Forward a view of our config (if the peer is allowed to see it). - if err := c.sendConfig(ctx, stream); err != nil { + repoCount, planCount, err := c.sendConfig(ctx, stream) + if err != nil { return fmt.Errorf("send config to peer %q: %w", peer.InstanceId, err) } // Forward a list of the resources we're making available to the peer - if err := c.sendResourceList(ctx, stream); err != nil { + resRepoCount, resPlanCount, err := c.sendResourceList(ctx, stream) + if err != nil { return fmt.Errorf("send resource list to peer %q: %w", peer.InstanceId, err) } @@ -305,16 +379,31 @@ func (c *syncSessionHandlerClient) OnConnectionEstablished(ctx context.Context, }, }) } - c.oplog.Subscribe(oplog.Query{}, &oplogSubscription) - go func() { - <-ctx.Done() - c.oplog.Unsubscribe(&oplogSubscription) - }() + c.oplogSubscription = &oplogSubscription + c.oplog.Subscribe(oplog.Query{}, c.oplogSubscription) + + // Send initial operation manifest to the server for reconciliation. + opCount, err := c.sendManifest(stream) + if err != nil { + return fmt.Errorf("send manifest to peer %q: %w", peer.InstanceId, err) + } + + c.l.Sugar().Infof("sent initial state to server: %d operations, %d repos, %d plans (config); %d repos, %d plans (resources)", + opCount, repoCount, planCount, resRepoCount, resPlanCount) + return nil } +func (c *syncSessionHandlerClient) OnConnectionDisconnected() { + if c.oplogSubscription != nil { + c.oplog.Unsubscribe(c.oplogSubscription) + c.oplogSubscription = nil + } +} + func (c *syncSessionHandlerClient) HandleRequestResources(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionRequestResources) error { - return c.sendResourceList(ctx, stream) + _, _, err := c.sendResourceList(ctx, stream) + return err } func (c *syncSessionHandlerClient) HandleHeartbeat(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionHeartbeat) error { @@ -327,79 +416,46 @@ func (c *syncSessionHandlerClient) HandleHeartbeat(ctx context.Context, stream * return nil } -func (c *syncSessionHandlerClient) HandleRequestOperations(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionRequestOperations) error { - highModno := item.GetHighModno() - highOpid := item.GetHighOpid() - c.l.Sugar().Debugf("received operation request for high_modno: %d, high_opid: %d", highModno, highOpid) - - var batch []*v1.Operation - - send := func() error { - if len(batch) == 0 { - return nil - } - - // find new and updated operations - var newOps []*v1.Operation - var updatedOps []*v1.Operation - for _, op := range batch { - if op.GetId() > highOpid { - newOps = append(newOps, op) - } else { - updatedOps = append(updatedOps, op) - } - } - - // send new and updated operations - if len(newOps) > 0 { - stream.Send(&v1sync.SyncStreamItem{ - Action: &v1sync.SyncStreamItem_ReceiveOperations{ - ReceiveOperations: &v1sync.SyncStreamItem_SyncActionReceiveOperations{ - Event: &v1.OperationEvent{ - Event: &v1.OperationEvent_CreatedOperations{ - CreatedOperations: &v1.OperationList{Operations: batch}, - }, - }, - }, - }, - }) - } - if len(updatedOps) > 0 { - stream.Send(&v1sync.SyncStreamItem{ - Action: &v1sync.SyncStreamItem_ReceiveOperations{ - ReceiveOperations: &v1sync.SyncStreamItem_SyncActionReceiveOperations{ - Event: &v1.OperationEvent{ - Event: &v1.OperationEvent_UpdatedOperations{ - UpdatedOperations: &v1.OperationList{Operations: updatedOps}, - }, - }, - }, - }, - }) - } - - batch = batch[:0] - return nil - } - - c.oplog.Query(oplog.Query{}.SetModnoGte(highModno), func(op *v1.Operation) error { - if !c.canForwardOperation(op) { - return nil // skip operations that the peer is not allowed to read - } - - batch = append(batch, op) - if len(batch) >= 256 { - if err := send(); err != nil { - return err - } - } - return nil - }) - - if err := send(); err != nil { +func (c *syncSessionHandlerClient) HandleOperationManifest(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionOperationManifest) error { + // Server re-requested a manifest (e.g. after reconnect). Respond with a fresh one. + opCount, err := c.sendManifest(stream) + if err != nil { return err } + c.l.Sugar().Debugf("re-sent operation manifest with %d operations", opCount) + return nil +} +func (c *syncSessionHandlerClient) HandleRequestOperationData(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionRequestOperationData) error { + var batch []*v1.Operation + send := func() { + if len(batch) == 0 { + return + } + stream.Send(&v1sync.SyncStreamItem{ + Action: &v1sync.SyncStreamItem_ReceiveOperations{ + ReceiveOperations: &v1sync.SyncStreamItem_SyncActionReceiveOperations{ + Event: &v1.OperationEvent{ + Event: &v1.OperationEvent_UpdatedOperations{ + UpdatedOperations: &v1.OperationList{Operations: batch}, + }, + }, + }, + }, + }) + batch = batch[:0] + } + for _, id := range item.GetOpIds() { + op, err := c.oplog.Get(id) + if err != nil { + continue // may have been deleted between manifest and request + } + batch = append(batch, op) + if len(batch) >= 256 { + send() + } + } + send() return nil } @@ -408,9 +464,7 @@ func (c *syncSessionHandlerClient) HandleReceiveOperations(ctx context.Context, } func (c *syncSessionHandlerClient) HandleReceiveResources(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionReceiveResources) error { - c.l.Debug("received resource list from server", - zap.Any("repos", item.GetRepos()), - zap.Any("plans", item.GetPlans())) + c.l.Sugar().Debugf("received resource list from server: %d repos, %d plans", len(item.GetRepos()), len(item.GetPlans())) peerState := c.mgr.peerStateManager.GetPeerState(c.peer.Keyid).Clone() if peerState == nil { return NewSyncErrorInternal(fmt.Errorf("peer state for %q not found", c.peer.Keyid)) @@ -429,7 +483,8 @@ func (c *syncSessionHandlerClient) HandleReceiveResources(ctx context.Context, s // Note unused: there isn't a situation where the host would send its config for information, the host will only call 'SetConfig' to update the config. func (c *syncSessionHandlerClient) HandleReceiveConfig(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionReceiveConfig) error { - c.l.Sugar().Debugf("received remote config update") + c.l.Sugar().Debugf("received remote config: %d repos, %d plans, modno=%d", + len(item.GetConfig().GetRepos()), len(item.GetConfig().GetPlans()), item.GetConfig().GetModno()) peerState := c.mgr.peerStateManager.GetPeerState(c.peer.Keyid).Clone() if peerState == nil { return NewSyncErrorInternal(fmt.Errorf("peer state for %q not found", c.peer.Keyid)) @@ -444,95 +499,129 @@ func (c *syncSessionHandlerClient) HandleReceiveConfig(ctx context.Context, stre } func (c *syncSessionHandlerClient) HandleSetConfig(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionSetConfig) error { - // Log the received config updates - c.l.Sugar().Debugf("received SetConfig request from peer %q") + return c.mgr.configMgr.Transform(func(cfg *v1.Config) (*v1.Config, error) { + snapshot := proto.Clone(cfg).(*v1.Config) // snapshot for change detection - // Fetch latest config from the config manager - latestConfig, err := c.mgr.configMgr.Get() - if err != nil { - return fmt.Errorf("fetch latest config: %w", err) - } + var plansNew, plansUpdated, plansUnchanged int + for _, plan := range item.GetPlans() { + if !c.permissions.CheckPermissionForPlan(plan.Id, permissions.PermsCanWriteConfiguration...) { + return nil, NewSyncErrorAuth(fmt.Errorf("peer %q is not allowed to update plan %q", c.peer.InstanceId, plan.Id)) + } - latestConfig = proto.Clone(latestConfig).(*v1.Config) // Clone to avoid modifying the original config - - for _, plan := range item.GetPlans() { - c.l.Sugar().Debugf("received plan update: %s", plan.Id) - if !c.permissions.CheckPermissionForPlan(plan.Id, permissions.PermsCanWriteConfiguration...) { - return NewSyncErrorAuth(fmt.Errorf("peer %q is not allowed to update plan %q", c.peer.InstanceId, plan.Id)) + idx := slices.IndexFunc(cfg.Plans, func(p *v1.Plan) bool { + return p.Id == plan.Id + }) + if idx >= 0 { + if proto.Equal(cfg.Plans[idx], plan) { + c.l.Sugar().Debugf("received plan %s (unchanged)", plan.Id) + plansUnchanged++ + } else { + c.l.Sugar().Debugf("received plan %s (updated)", plan.Id) + plansUpdated++ + } + cfg.Plans[idx] = plan + } else { + c.l.Sugar().Debugf("received plan %s (new)", plan.Id) + plansNew++ + cfg.Plans = append(cfg.Plans, plan) + } } - // Update the plan in the local config - idx := slices.IndexFunc(latestConfig.Plans, func(p *v1.Plan) bool { - return p.Id == plan.Id - }) - if idx >= 0 { - latestConfig.Plans[idx] = plan - } else { - latestConfig.Plans = append(latestConfig.Plans, plan) - } - } + var reposNew, reposUpdated, reposUnchanged, reposSkipped int + for _, repo := range item.GetRepos() { + idx := slices.IndexFunc(cfg.Repos, func(r *v1.Repo) bool { + return r.Guid == repo.Guid + }) - for _, repo := range item.GetRepos() { - c.l.Sugar().Debugf("received repo update: %s", repo.Guid) - if !c.permissions.CheckPermissionForRepo(repo.Id, permissions.PermsCanWriteConfiguration...) { - return NewSyncErrorAuth(fmt.Errorf("peer %q is not allowed to update repo %q", c.peer.InstanceId, repo.Id)) + // Permission check: accept if we have RECEIVE_SHARED_REPOS and the repo + // is either new or already owned by this peer; otherwise require scoped write perms. + isNewOrOwnedByPeer := idx < 0 || cfg.Repos[idx].GetOriginInstanceId() == c.peer.InstanceId + allowed := (isNewOrOwnedByPeer && c.permissions.HasPermissionType(permissions.PermsCanReceiveSharedRepos...)) || + c.permissions.CheckPermissionForRepo(repo.Id, permissions.PermsCanWriteConfiguration...) + if !allowed { + return nil, NewSyncErrorAuth(fmt.Errorf("peer %q is not allowed to update repo %q", c.peer.InstanceId, repo.Id)) + } + + if idx >= 0 { + if proto.Equal(cfg.Repos[idx], repo) { + c.l.Sugar().Debugf("received repo %s (unchanged)", repo.Id) + reposUnchanged++ + } else { + c.l.Sugar().Debugf("received repo %s (updated)", repo.Id) + reposUpdated++ + } + cfg.Repos[idx] = repo + } else { + conflictIdx := slices.IndexFunc(cfg.Repos, func(r *v1.Repo) bool { + return r.Id == repo.Id || r.Uri == repo.Uri + }) + if conflictIdx >= 0 { + c.l.Sugar().Warnf("received shared repo %q (guid %s) conflicts with existing local repo %q (guid %s), skipping", repo.Id, repo.Guid, cfg.Repos[conflictIdx].Id, cfg.Repos[conflictIdx].Guid) + reposSkipped++ + continue + } + c.l.Sugar().Debugf("received repo %s (new)", repo.Id) + reposNew++ + cfg.Repos = append(cfg.Repos, repo) + } } - // Update the repo in the local config - idx := slices.IndexFunc(latestConfig.Repos, func(r *v1.Repo) bool { - return r.Guid == repo.Guid - }) - if idx >= 0 { - latestConfig.Repos[idx] = repo - } else { - latestConfig.Repos = append(latestConfig.Repos, repo) - } - } + var plansDeleted int + for _, plan := range item.GetPlansToDelete() { + if !c.permissions.CheckPermissionForPlan(plan, permissions.PermsCanWriteConfiguration...) { + return nil, NewSyncErrorAuth(fmt.Errorf("peer %q is not allowed to delete plan %q", c.peer.InstanceId, plan)) + } - for _, plan := range item.GetPlansToDelete() { - c.l.Sugar().Debugf("received plan deletion request: %s", plan) - if !c.permissions.CheckPermissionForPlan(plan, permissions.PermsCanWriteConfiguration...) { - return NewSyncErrorAuth(fmt.Errorf("peer %q is not allowed to delete plan %q", c.peer.InstanceId, plan)) + idx := slices.IndexFunc(cfg.Plans, func(p *v1.Plan) bool { + return p.Id == plan + }) + if idx >= 0 { + c.l.Sugar().Debugf("received plan deletion: %s", plan) + plansDeleted++ + cfg.Plans = append(cfg.Plans[:idx], cfg.Plans[idx+1:]...) + } else { + c.l.Sugar().Warnf("received plan deletion request for non-existent plan %q, ignoring", plan) + } } - // Remove the plan from the local config - idx := slices.IndexFunc(latestConfig.Plans, func(p *v1.Plan) bool { - return p.Id == plan - }) - if idx >= 0 { - latestConfig.Plans = append(latestConfig.Plans[:idx], latestConfig.Plans[idx+1:]...) - } else { - c.l.Sugar().Warnf("received plan deletion request for non-existent plan %q, ignoring", plan) - } - } + var reposDeleted int + for _, repoID := range item.GetReposToDelete() { + if !c.permissions.CheckPermissionForRepo(repoID, permissions.PermsCanWriteConfiguration...) { + return nil, NewSyncErrorAuth(fmt.Errorf("peer %q is not allowed to delete repo %q", c.peer.InstanceId, repoID)) + } - for _, repoID := range item.GetReposToDelete() { - c.l.Sugar().Debugf("received repo deletion request: %s", repoID) - if !c.permissions.CheckPermissionForRepo(repoID, permissions.PermsCanWriteConfiguration...) { - return NewSyncErrorAuth(fmt.Errorf("peer %q is not allowed to delete repo %q", c.peer.InstanceId, repoID)) + idx := slices.IndexFunc(cfg.Repos, func(r *v1.Repo) bool { + return r.Id == repoID + }) + if idx >= 0 { + c.l.Sugar().Debugf("received repo deletion: %s", repoID) + reposDeleted++ + cfg.Repos = append(cfg.Repos[:idx], cfg.Repos[idx+1:]...) + } else { + c.l.Sugar().Warnf("received repo deletion request for non-existent repo %q, ignoring", repoID) + } } - // Remove the repo from the local config - idx := slices.IndexFunc(latestConfig.Repos, func(r *v1.Repo) bool { - return r.Id == repoID - }) - if idx >= 0 { - latestConfig.Repos = append(latestConfig.Repos[:idx], latestConfig.Repos[idx+1:]...) - } else { - c.l.Sugar().Warnf("received repo deletion request for non-existent repo %q, ignoring", repoID) + // Skip the update if nothing actually changed to avoid triggering a reconnect loop. + hasChanges := !proto.Equal(cfg, snapshot) + if hasChanges { + cfg.Modno++ } - } - // Update the local config with the new changes - latestConfig.Modno++ - if err := c.mgr.configMgr.Update(latestConfig); err != nil { - return fmt.Errorf("set updated config: %w", err) - } + c.l.Sugar().Debugf("SetConfig from peer %q: repos(%d new, %d updated, %d unchanged, %d skipped, %d deleted) plans(%d new, %d updated, %d unchanged, %d deleted) — config %s", + c.peer.GetInstanceId(), + reposNew, reposUpdated, reposUnchanged, reposSkipped, reposDeleted, + plansNew, plansUpdated, plansUnchanged, plansDeleted, + map[bool]string{true: "updated", false: "unchanged"}[hasChanges]) - return nil + if !hasChanges { + return nil, nil + } + return cfg, nil + }) } -func (c *syncSessionHandlerClient) sendConfig(ctx context.Context, stream *bidiSyncCommandStream) error { +func (c *syncSessionHandlerClient) sendConfig(ctx context.Context, stream *bidiSyncCommandStream) (int, int, error) { localConfig := c.syncConfigSnapshot.config remoteConfig := &v1sync.RemoteConfig{ Version: localConfig.Version, @@ -540,7 +629,7 @@ func (c *syncSessionHandlerClient) sendConfig(ctx context.Context, stream *bidiS } for _, repo := range localConfig.Repos { - if c.permissions.CheckPermissionForRepo(repo.Guid, permissions.PermsCanViewConfiguration...) { + if c.permissions.CheckPermissionForRepo(repo.Id, permissions.PermsCanViewConfiguration...) { remoteConfig.Repos = append(remoteConfig.Repos, repo) } } @@ -559,10 +648,10 @@ func (c *syncSessionHandlerClient) sendConfig(ctx context.Context, stream *bidiS }, }) - return nil + return len(remoteConfig.Repos), len(remoteConfig.Plans), nil } -func (c *syncSessionHandlerClient) sendResourceList(ctx context.Context, stream *bidiSyncCommandStream) error { +func (c *syncSessionHandlerClient) sendResourceList(ctx context.Context, stream *bidiSyncCommandStream) (int, int, error) { repoMetadatas := []*v1sync.RepoMetadata{} planMetadatas := []*v1sync.PlanMetadata{} @@ -592,5 +681,5 @@ func (c *syncSessionHandlerClient) sendResourceList(ctx context.Context, stream }, }) - return nil + return len(repoMetadatas), len(planMetadatas), nil } diff --git a/internal/api/syncapi/synccommon.go b/internal/api/syncapi/synccommon.go index 471f47b0..30974f83 100644 --- a/internal/api/syncapi/synccommon.go +++ b/internal/api/syncapi/synccommon.go @@ -16,6 +16,12 @@ import ( lru "github.com/hashicorp/golang-lru/v2" ) +// onUnknownPeerFunc is called when a peer is not found in the known peers list during handshake. +// It receives the handshake item and may return a peer definition to authorize the connection +// (e.g. by validating a pairing token and adding the peer to the config). +// If it returns nil, the connection is rejected. +type onUnknownPeerFunc func(handshake *v1sync.SyncStreamItem) (*v1.Multihost_Peer, error) + func runSync( ctx context.Context, localInstanceID string, @@ -23,9 +29,17 @@ func runSync( commandStream *bidiSyncCommandStream, handler syncSessionHandler, knownPeers []*v1.Multihost_Peer, // could be known hosts or authorized clients, doesn't matter. This is used to verify the handshake packet, authorization comes later. + pairingSecret string, // optional one-time pairing secret to send during the handshake + onUnknownPeer onUnknownPeerFunc, // optional callback for handling unknown peers (e.g. pairing), nil to reject all unknown peers ) error { + // Session-scoped context: cancelled when this runSync invocation returns. Any per-session + // goroutines the handler spawns (heartbeats, watchers, etc.) should use this ctx so they + // die with the session rather than outliving it into the next reconnect cycle. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // send the initial handshake packet to the peer to establish the connection. - handshakePacket, err := createHandshakePacket(localInstanceID, localKey) + handshakePacket, err := createHandshakePacket(localInstanceID, localKey, pairingSecret) if err != nil { return NewSyncErrorAuth(fmt.Errorf("creating handshake packet: %w", err)) } @@ -47,7 +61,14 @@ func runSync( }) if peerIdx >= 0 { peer = knownPeers[peerIdx] - } else { + } else if onUnknownPeer != nil { + // Peer not in known list — try the onUnknownPeer callback (e.g. pairing token validation). + peer, err = onUnknownPeer(handshake) + if err != nil { + return NewSyncErrorAuth(fmt.Errorf("pairing failed: %w", err)) + } + } + if peer == nil { return NewSyncErrorAuth(fmt.Errorf("peer public key ID %s (instance ID %s) not found in known peers", handshake.GetHandshake().GetPublicKey().GetKeyid(), string(handshake.GetHandshake().GetInstanceId().GetPayload()))) } @@ -55,6 +76,8 @@ func runSync( return NewSyncErrorAuth(fmt.Errorf("authorizing handshake as peer: %w", err)) } + defer handler.OnConnectionDisconnected() + if err := handler.OnConnectionEstablished(ctx, commandStream, peer); err != nil { return err } @@ -65,9 +88,13 @@ func runSync( if err := handler.HandleHeartbeat(ctx, commandStream, item.GetHeartbeat()); err != nil { return fmt.Errorf("handling heartbeat: %w", err) } - case *v1sync.SyncStreamItem_RequestOperations: - if err := handler.HandleRequestOperations(ctx, commandStream, item.GetRequestOperations()); err != nil { - return fmt.Errorf("handling request operations: %w", err) + case *v1sync.SyncStreamItem_OperationManifest: + if err := handler.HandleOperationManifest(ctx, commandStream, item.GetOperationManifest()); err != nil { + return fmt.Errorf("handling operation manifest: %w", err) + } + case *v1sync.SyncStreamItem_RequestOperationData: + if err := handler.HandleRequestOperationData(ctx, commandStream, item.GetRequestOperationData()); err != nil { + return fmt.Errorf("handling request operation data: %w", err) } case *v1sync.SyncStreamItem_ReceiveOperations: if err := handler.HandleReceiveOperations(ctx, commandStream, item.GetReceiveOperations()); err != nil { @@ -108,7 +135,7 @@ func runSync( return nil } -func createHandshakePacket(instanceID string, identity *cryptoutil.PrivateKey) (*v1sync.SyncStreamItem, error) { +func createHandshakePacket(instanceID string, identity *cryptoutil.PrivateKey, pairingSecret string) (*v1sync.SyncStreamItem, error) { signedMessage, err := createSignedMessage([]byte(instanceID), identity) if err != nil { return nil, fmt.Errorf("signing instance ID: %w", err) @@ -120,6 +147,7 @@ func createHandshakePacket(instanceID string, identity *cryptoutil.PrivateKey) ( ProtocolVersion: SyncProtocolVersion, InstanceId: signedMessage, PublicKey: identity.PublicKeyProto(), + PairingSecret: pairingSecret, }, }, }, nil @@ -198,10 +226,15 @@ func sendHeartbeats(ctx context.Context, stream *bidiSyncCommandStream, interval // syncSessionHandler is a stateful handler for the messages within the context of a sync stream session. // the handler does not need to be thread safe as it is guaranteed to be called from a single thread. +// +// The ctx passed to every method is scoped to the session: it is cancelled when runSync returns. +// Goroutines spawned by the handler should use this ctx so they don't leak across reconnect cycles. type syncSessionHandler interface { OnConnectionEstablished(ctx context.Context, stream *bidiSyncCommandStream, peer *v1.Multihost_Peer) error + OnConnectionDisconnected() HandleHeartbeat(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionHeartbeat) error - HandleRequestOperations(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionRequestOperations) error + HandleOperationManifest(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionOperationManifest) error + HandleRequestOperationData(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionRequestOperationData) error HandleReceiveOperations(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionReceiveOperations) error HandleReceiveConfig(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionReceiveConfig) error HandleSetConfig(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionSetConfig) error @@ -220,12 +253,18 @@ func (h *unimplementedSyncSessionHandler) OnConnectionEstablished(ctx context.Co return NewSyncErrorProtocol(fmt.Errorf("OnConnectionEstablished not implemented")) } +func (h *unimplementedSyncSessionHandler) OnConnectionDisconnected() {} + func (h *unimplementedSyncSessionHandler) HandleHeartbeat(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionHeartbeat) error { return NewSyncErrorProtocol(fmt.Errorf("HandleHeartbeat not implemented")) } -func (h *unimplementedSyncSessionHandler) HandleRequestOperations(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionRequestOperations) error { - return NewSyncErrorProtocol(fmt.Errorf("HandleRequestOperations not implemented")) +func (h *unimplementedSyncSessionHandler) HandleOperationManifest(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionOperationManifest) error { + return NewSyncErrorProtocol(fmt.Errorf("HandleOperationManifest not implemented")) +} + +func (h *unimplementedSyncSessionHandler) HandleRequestOperationData(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionRequestOperationData) error { + return NewSyncErrorProtocol(fmt.Errorf("HandleRequestOperationData not implemented")) } func (h *unimplementedSyncSessionHandler) HandleReceiveOperations(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionReceiveOperations) error { @@ -306,20 +345,26 @@ func (om *remoteOpIDMapper) translateOpID(originalInstanceKeyid string, original return translatedID, nil } - // Cache miss - query the database - op, err := om.oplog.FindOneMetadata(oplog.Query{ + // Cache miss - query the database. Use QueryMetadata directly to handle + // the case where duplicates already exist (return the first match). + var translatedID int64 + err := om.oplog.QueryMetadata(oplog.Query{ OriginalInstanceKeyid: &originalInstanceKeyid, OriginalID: &originalOpId, + }, func(op oplog.OpMetadata) error { + if translatedID == 0 { + translatedID = op.ID + } + return oplog.ErrStopIteration }) if err != nil { - if errors.Is(err, oplog.ErrNoResults) { - return 0, nil // No results means the ID is not found - } - return 0, err // Other errors should be propagated + return 0, err + } + if translatedID == 0 { + return 0, nil // No results means the ID is not found } // Cache the result and return - translatedID := op.ID om.opIDLru.Add(cacheKey, translatedID) return translatedID, nil } @@ -340,20 +385,26 @@ func (om *remoteOpIDMapper) translateFlowID(originalInstanceKeyid string, origin return translatedID, nil } - // Cache miss - query the database - op, err := om.oplog.FindOneMetadata(oplog.Query{ + // Cache miss - query the database. Use QueryMetadata directly to handle + // the case where duplicates already exist (return the first match). + var translatedID int64 + err := om.oplog.QueryMetadata(oplog.Query{ OriginalInstanceKeyid: &originalInstanceKeyid, OriginalFlowID: &originalFlowId, + }, func(op oplog.OpMetadata) error { + if translatedID == 0 { + translatedID = op.FlowID + } + return oplog.ErrStopIteration }) if err != nil { - if errors.Is(err, oplog.ErrNoResults) { - return 0, nil // No results means the ID is not found - } - return 0, err // Other errors should be propagated + return 0, err + } + if translatedID == 0 { + return 0, nil // No results means the ID is not found } // Cache the result and return - translatedID := op.FlowID om.flowIDLru.Add(cacheKey, translatedID) return translatedID, nil } diff --git a/internal/api/syncapi/syncmanager.go b/internal/api/syncapi/syncmanager.go index a6b6f411..47b594d1 100644 --- a/internal/api/syncapi/syncmanager.go +++ b/internal/api/syncapi/syncmanager.go @@ -10,13 +10,32 @@ import ( v1 "github.com/garethgeorge/backrest/gen/go/v1" "github.com/garethgeorge/backrest/gen/go/v1sync" + "github.com/garethgeorge/backrest/internal/api/syncapi/permissions" "github.com/garethgeorge/backrest/internal/config" "github.com/garethgeorge/backrest/internal/cryptoutil" "github.com/garethgeorge/backrest/internal/oplog" "github.com/garethgeorge/backrest/internal/orchestrator" "go.uber.org/zap" + "google.golang.org/protobuf/proto" ) +// connectedPeerHandle represents a connected peer's stream and metadata. +// It allows the API layer to send messages to a specific connected peer. +type connectedPeerHandle struct { + stream *bidiSyncCommandStream + peer *v1.Multihost_Peer + permissions *permissions.PermissionSet +} + +// SendSetConfig sends a SyncActionSetConfig message to the connected peer. +func (h *connectedPeerHandle) SendSetConfig(setConfig *v1sync.SyncStreamItem_SyncActionSetConfig) { + h.stream.Send(&v1sync.SyncStreamItem{ + Action: &v1sync.SyncStreamItem_SetConfig{ + SetConfig: setConfig, + }, + }) +} + type SyncManager struct { configMgr *config.ConfigManager orchestrator *orchestrator.Orchestrator @@ -31,6 +50,10 @@ type SyncManager struct { syncClients map[string]*SyncClient // current sync clients, protected by mu + // connectedPeers tracks connected authorized client peers by key ID. + // This allows the API layer to send messages to specific connected peers. + connectedPeers map[string]*connectedPeerHandle + peerStateManager PeerStateManager } @@ -66,6 +89,7 @@ func NewSyncManager(configMgr *config.ConfigManager, oplog *oplog.OpLog, orchest syncClientRetryDelay: 60 * time.Second, syncClients: make(map[string]*SyncClient), + connectedPeers: make(map[string]*connectedPeerHandle), peerStateManager: peerStateManager, } @@ -89,7 +113,7 @@ func (m *SyncManager) RunSync(ctx context.Context) { zap.L().Info("syncmanager exited") }() - runSyncWithNewConfig := func() { + startSync := func(config *v1.Config) { m.mu.Lock() defer m.mu.Unlock() @@ -103,12 +127,6 @@ func (m *SyncManager) RunSync(ctx context.Context) { syncCtx, cancel := context.WithCancel(ctx) cancelLastSync = cancel - config, err := m.configMgr.Get() - if err != nil { - zap.S().Errorf("syncmanager failed to refresh config with latest changes so sync is stopped: %v", err) - return - } - if config.Multihost.GetIdentity() == nil { zap.S().Info("syncmanager no identity key configured, sync feature is disabled.") m.snapshot = nil // Clear the snapshot to indicate sync is disabled @@ -152,14 +170,68 @@ func (m *SyncManager) RunSync(ctx context.Context) { } } - runSyncWithNewConfig() + // lastConfig tracks the config that sync is currently running with. + // We only restart sync when the config changes in a meaningful way + // (i.e. ignoring the Modno field which increments on every write). + var lastConfig *v1.Config + + syncConfigEqual := func(a, b *v1.Config) bool { + if a == nil || b == nil { + return a == b + } + // Compare ignoring Modno which changes on every config write. + ac := proto.Clone(a).(*v1.Config) + bc := proto.Clone(b).(*v1.Config) + ac.Modno = 0 + bc.Modno = 0 + return proto.Equal(ac, bc) + } + + restartSyncIfChanged := func() { + config, err := m.configMgr.Get() + if err != nil { + zap.S().Errorf("syncmanager failed to refresh config with latest changes so sync is stopped: %v", err) + return + } + if syncConfigEqual(config, lastConfig) { + zap.L().Debug("syncmanager config changed but sync-relevant config is unchanged, skipping restart") + return + } + lastConfig = proto.Clone(config).(*v1.Config) + startSync(config) + } + + restartSyncIfChanged() + + // Clock jump detection: if the ticker fires much later than expected + // (e.g. after system sleep), force a reconnect to recover dead streams. + clockJumpInterval := 1 * time.Minute + clockJumpGrace := 30 * time.Second + clockJumpTicker := time.NewTicker(clockJumpInterval) + defer clockJumpTicker.Stop() + lastTickTime := time.Now() for { select { case <-ctx.Done(): return case <-configWatchCh: - runSyncWithNewConfig() + restartSyncIfChanged() + case <-clockJumpTicker.C: + delta := time.Since(lastTickTime) - clockJumpInterval + lastTickTime = time.Now() + if delta < 0 { + delta = -delta + } + if delta > clockJumpGrace { + zap.S().Warnf("syncmanager detected clock jump of %v, forcing reconnection", delta) + config, err := m.configMgr.Get() + if err != nil { + zap.S().Errorf("syncmanager failed to get config after clock jump: %v", err) + continue + } + startSync(config) + } } } } @@ -178,19 +250,45 @@ func (m *SyncManager) runSyncWithPeerInternal(ctx context.Context, config *v1.Co return fmt.Errorf("creating sync client: %w", err) } m.mu.Lock() - m.syncClients[knownHostPeer.InstanceId] = newClient + m.syncClients[knownHostPeer.Keyid] = newClient m.mu.Unlock() go func() { newClient.RunSync(ctx) m.mu.Lock() - delete(m.syncClients, knownHostPeer.InstanceId) + // Only remove the entry if it still points at us. On reconfiguration the new + // client may have already inserted itself under the same key; deleting blindly + // would wipe the replacement. + if m.syncClients[knownHostPeer.Keyid] == newClient { + delete(m.syncClients, knownHostPeer.Keyid) + } m.mu.Unlock() }() return nil } +// registerConnectedPeer registers a connected peer's stream handle. +func (m *SyncManager) registerConnectedPeer(keyID string, handle *connectedPeerHandle) { + m.mu.Lock() + defer m.mu.Unlock() + m.connectedPeers[keyID] = handle +} + +// unregisterConnectedPeer removes a connected peer's stream handle. +func (m *SyncManager) unregisterConnectedPeer(keyID string) { + m.mu.Lock() + defer m.mu.Unlock() + delete(m.connectedPeers, keyID) +} + +// GetConnectedPeer returns the handle for a connected peer, or nil if not connected. +func (m *SyncManager) GetConnectedPeer(keyID string) *connectedPeerHandle { + m.mu.Lock() + defer m.mu.Unlock() + return m.connectedPeers[keyID] +} + type syncConfigSnapshot struct { config *v1.Config identityKey *cryptoutil.PrivateKey // the local instance's identity key, used for signing sync messages diff --git a/internal/api/syncapi/syncoperations_test.go b/internal/api/syncapi/syncoperations_test.go new file mode 100644 index 00000000..b15497b2 --- /dev/null +++ b/internal/api/syncapi/syncoperations_test.go @@ -0,0 +1,397 @@ +package syncapi + +import ( + "context" + "fmt" + "math/rand" + "sync" + "testing" + "time" + + v1 "github.com/garethgeorge/backrest/gen/go/v1" + "github.com/garethgeorge/backrest/internal/config/migrations" + "github.com/garethgeorge/backrest/internal/cryptoutil" + "github.com/garethgeorge/backrest/internal/oplog" + "github.com/garethgeorge/backrest/internal/testutil" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/testing/protocmp" + + "github.com/google/go-cmp/cmp" +) + +// TestFuzzOperationSync exercises the sync protocol with randomized operation +// mutations (add, update, delete) interleaved with connection drops and +// reconnections. After every round the test asserts that the host's view of +// the client's operations exactly matches the client's local state. +func TestFuzzOperationSync(t *testing.T) { + testutil.InstallZapLogger(t) + + const ( + numRounds = 10 // rounds of mutations + opsPerRound = 20 // mutations per round + reconnectEvery = 3 // force reconnect every N rounds + testTimeout = 60 * time.Second + ) + + ctx, cancel := context.WithTimeout(context.Background(), testTimeout) + defer cancel() + + peerHostAddr := testutil.AllocOpenBindAddr(t) + peerClientAddr := testutil.AllocOpenBindAddr(t) + + repoGUID := cryptoutil.MustRandomID(cryptoutil.DefaultIDBits) + + peerHostConfig := &v1.Config{ + Version: migrations.CurrentVersion, + Instance: defaultHostID, + Repos: []*v1.Repo{ + {Id: defaultRepoID, Guid: repoGUID, Uri: "test-uri"}, + }, + Multihost: &v1.Multihost{ + Identity: identity1, + AuthorizedClients: []*v1.Multihost_Peer{ + {Keyid: identity2.Keyid, InstanceId: defaultClientID}, + }, + }, + } + + peerClientConfig := &v1.Config{ + Version: migrations.CurrentVersion, + Instance: defaultClientID, + Repos: []*v1.Repo{ + {Id: defaultRepoID, Guid: repoGUID, Uri: "backrest://" + defaultHostID}, + }, + Multihost: &v1.Multihost{ + Identity: identity2, + KnownHosts: []*v1.Multihost_Peer{ + { + Keyid: identity1.Keyid, + InstanceId: defaultHostID, + InstanceUrl: fmt.Sprintf("http://%s", peerHostAddr), + Permissions: []*v1.Multihost_Permission{ + { + Type: v1.Multihost_Permission_PERMISSION_READ_OPERATIONS, + Scopes: []string{"repo:" + defaultRepoID}, + }, + }, + }, + }, + }, + } + + peerHost := newPeerUnderTest(t, peerHostConfig) + peerClient := newPeerUnderTest(t, peerClientConfig) + + opTempl := &v1.Operation{ + InstanceId: defaultClientID, + RepoId: defaultRepoID, + RepoGuid: repoGUID, + PlanId: defaultPlanID, + UnixTimeStartMs: time.Now().UnixMilli() - 1000, + UnixTimeEndMs: time.Now().UnixMilli(), + Status: v1.OperationStatus_STATUS_SUCCESS, + Op: &v1.Operation_OperationBackup{}, + } + + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + query := oplog.Query{}.SetInstanceID(defaultClientID).SetRepoGUID(repoGUID) + + // Track live operations on the client by their ID. + liveOps := map[int64]*v1.Operation{} + + // Start sync infrastructure. + syncCtx, cancelSync := context.WithCancel(ctx) + var syncWg sync.WaitGroup + startSync := func() { + syncCtx, cancelSync = context.WithCancel(ctx) + syncWg.Add(2) + go func() { defer syncWg.Done(); runSyncAPIWithCtx(syncCtx, peerHost, peerHostAddr) }() + go func() { defer syncWg.Done(); runSyncAPIWithCtx(syncCtx, peerClient, peerClientAddr) }() + tryConnect(t, ctx, peerClient, peerClientConfig.Multihost.KnownHosts[0]) + } + stopSync := func() { + cancelSync() + syncWg.Wait() + } + + startSync() + + for round := 0; round < numRounds; round++ { + t.Logf("=== Round %d: %d live ops ===", round, len(liveOps)) + + // Reconnect periodically to exercise the RequestOperations catch-up path. + if round > 0 && round%reconnectEvery == 0 { + t.Logf("--- reconnecting ---") + stopSync() + startSync() + } + + for i := 0; i < opsPerRound; i++ { + action := rng.Intn(10) + switch { + case action < 5: // 50%: add a new operation + op := proto.Clone(opTempl).(*v1.Operation) + op.DisplayMessage = fmt.Sprintf("r%d-op%d", round, i) + op.UnixTimeStartMs = time.Now().UnixMilli() - int64(rng.Intn(10000)) + op.UnixTimeEndMs = op.UnixTimeStartMs + int64(rng.Intn(5000)) + statuses := []v1.OperationStatus{ + v1.OperationStatus_STATUS_PENDING, + v1.OperationStatus_STATUS_INPROGRESS, + v1.OperationStatus_STATUS_SUCCESS, + v1.OperationStatus_STATUS_ERROR, + } + op.Status = statuses[rng.Intn(len(statuses))] + if err := peerClient.oplog.Add(op); err != nil { + t.Fatalf("round %d: add: %v", round, err) + } + liveOps[op.Id] = op + + case action < 8 && len(liveOps) > 0: // 30%: update a random op + op := pickRandom(rng, liveOps) + op = proto.Clone(op).(*v1.Operation) + op.DisplayMessage = fmt.Sprintf("r%d-op%d-updated", round, i) + op.Status = v1.OperationStatus_STATUS_SUCCESS + if err := peerClient.oplog.Update(op); err != nil { + t.Fatalf("round %d: update: %v", round, err) + } + liveOps[op.Id] = op + + case len(liveOps) > 0: // 20%: delete a random op + op := pickRandom(rng, liveOps) + if err := peerClient.oplog.Delete(op.Id); err != nil { + t.Fatalf("round %d: delete: %v", round, err) + } + delete(liveOps, op.Id) + } + } + + // Wait for the host to converge with the client. + assertOpsConverge(t, ctx, peerClient, peerHost, query, + fmt.Sprintf("round %d: ops should converge", round)) + } + + // Final reconnect to exercise one more catch-up after all mutations. + t.Logf("=== Final reconnect ===") + stopSync() + startSync() + assertOpsConverge(t, ctx, peerClient, peerHost, query, "final: ops should converge after reconnect") + + // Assert no duplicates on the host. + assertNoDuplicateOriginalIDs(t, peerHost, query) + + stopSync() +} + +// TestFuzzOperationSyncOfflineMutations creates operations, syncs, disconnects, +// mutates heavily offline, then reconnects and verifies convergence. +func TestFuzzOperationSyncOfflineMutations(t *testing.T) { + testutil.InstallZapLogger(t) + + const ( + initialOps = 20 + offlineOps = 40 + testTimeout = 15 * time.Second + ) + + ctx, cancel := context.WithTimeout(context.Background(), testTimeout) + defer cancel() + + peerHostAddr := testutil.AllocOpenBindAddr(t) + peerClientAddr := testutil.AllocOpenBindAddr(t) + + repoGUID := cryptoutil.MustRandomID(cryptoutil.DefaultIDBits) + + peerHostConfig := &v1.Config{ + Version: migrations.CurrentVersion, + Instance: defaultHostID, + Repos: []*v1.Repo{{Id: defaultRepoID, Guid: repoGUID, Uri: "test-uri"}}, + Multihost: &v1.Multihost{ + Identity: identity1, + AuthorizedClients: []*v1.Multihost_Peer{{Keyid: identity2.Keyid, InstanceId: defaultClientID}}, + }, + } + + peerClientConfig := &v1.Config{ + Version: migrations.CurrentVersion, + Instance: defaultClientID, + Repos: []*v1.Repo{{Id: defaultRepoID, Guid: repoGUID, Uri: "backrest://" + defaultHostID}}, + Multihost: &v1.Multihost{ + Identity: identity2, + KnownHosts: []*v1.Multihost_Peer{{ + Keyid: identity1.Keyid, + InstanceId: defaultHostID, + InstanceUrl: fmt.Sprintf("http://%s", peerHostAddr), + Permissions: []*v1.Multihost_Permission{{ + Type: v1.Multihost_Permission_PERMISSION_READ_OPERATIONS, + Scopes: []string{"repo:" + defaultRepoID}, + }}, + }}, + }, + } + + peerHost := newPeerUnderTest(t, peerHostConfig) + peerClient := newPeerUnderTest(t, peerClientConfig) + + opTempl := &v1.Operation{ + InstanceId: defaultClientID, + RepoId: defaultRepoID, + RepoGuid: repoGUID, + PlanId: defaultPlanID, + UnixTimeStartMs: time.Now().UnixMilli(), + UnixTimeEndMs: time.Now().UnixMilli(), + Status: v1.OperationStatus_STATUS_SUCCESS, + Op: &v1.Operation_OperationBackup{}, + } + + rng := rand.New(rand.NewSource(42)) // deterministic seed + query := oplog.Query{}.SetInstanceID(defaultClientID).SetRepoGUID(repoGUID) + liveOps := map[int64]*v1.Operation{} + + // Phase 1: add initial operations while connected + syncCtx, cancelSync := context.WithCancel(ctx) + var syncWg sync.WaitGroup + syncWg.Add(2) + go func() { defer syncWg.Done(); runSyncAPIWithCtx(syncCtx, peerHost, peerHostAddr) }() + go func() { defer syncWg.Done(); runSyncAPIWithCtx(syncCtx, peerClient, peerClientAddr) }() + tryConnect(t, ctx, peerClient, peerClientConfig.Multihost.KnownHosts[0]) + + for i := 0; i < initialOps; i++ { + op := proto.Clone(opTempl).(*v1.Operation) + op.DisplayMessage = fmt.Sprintf("init-%d", i) + if err := peerClient.oplog.Add(op); err != nil { + t.Fatalf("init add: %v", err) + } + liveOps[op.Id] = op + } + + assertOpsConverge(t, ctx, peerClient, peerHost, query, "initial sync") + + // Phase 2: disconnect and mutate heavily + cancelSync() + syncWg.Wait() + + for i := 0; i < offlineOps; i++ { + action := rng.Intn(10) + switch { + case action < 5: // 50%: add + op := proto.Clone(opTempl).(*v1.Operation) + op.DisplayMessage = fmt.Sprintf("offline-add-%d", i) + if err := peerClient.oplog.Add(op); err != nil { + t.Fatalf("offline add: %v", err) + } + liveOps[op.Id] = op + case action < 8 && len(liveOps) > 0: // 30%: update + op := pickRandom(rng, liveOps) + op = proto.Clone(op).(*v1.Operation) + op.DisplayMessage = fmt.Sprintf("offline-upd-%d", i) + if err := peerClient.oplog.Update(op); err != nil { + t.Fatalf("offline update: %v", err) + } + liveOps[op.Id] = op + case len(liveOps) > 0: // 20%: delete + op := pickRandom(rng, liveOps) + if err := peerClient.oplog.Delete(op.Id); err != nil { + t.Fatalf("offline delete: %v", err) + } + delete(liveOps, op.Id) + } + } + + // Phase 3: reconnect and verify convergence + syncCtx, cancelSync = context.WithCancel(ctx) + syncWg.Add(2) + go func() { defer syncWg.Done(); runSyncAPIWithCtx(syncCtx, peerHost, peerHostAddr) }() + go func() { defer syncWg.Done(); runSyncAPIWithCtx(syncCtx, peerClient, peerClientAddr) }() + tryConnect(t, ctx, peerClient, peerClientConfig.Multihost.KnownHosts[0]) + + assertOpsConverge(t, ctx, peerClient, peerHost, query, "after offline mutations") + assertNoDuplicateOriginalIDs(t, peerHost, query) + + cancelSync() + syncWg.Wait() +} + +// --- helpers --- + +func pickRandom(rng *rand.Rand, m map[int64]*v1.Operation) *v1.Operation { + keys := make([]int64, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + return m[keys[rng.Intn(len(keys))]] +} + +func assertOpsConverge(t *testing.T, ctx context.Context, client, host *peerUnderTest, query oplog.Query, msg string) { + t.Helper() + err := testutil.Retry(t, ctx, func() error { + clientOps := getOperations(t, client.oplog, query) + hostOps := getOperations(t, host.oplog, query) + + // Normalize: clear locally-assigned fields that differ between peers. + normalize := func(ops []*v1.Operation) []*v1.Operation { + out := make([]*v1.Operation, len(ops)) + for i, op := range ops { + c := proto.Clone(op).(*v1.Operation) + c.Id = 0 + c.FlowId = 0 + c.OriginalId = 0 + c.OriginalFlowId = 0 + c.OriginalInstanceKeyid = "" + c.Modno = 0 + out[i] = c + } + return out + } + + cn := normalize(clientOps) + hn := normalize(hostOps) + + sortByMessage := func(a, b *v1.Operation) int { + if a.DisplayMessage < b.DisplayMessage { + return -1 + } + if a.DisplayMessage > b.DisplayMessage { + return 1 + } + return 0 + } + + sortByMessageStable(cn, sortByMessage) + sortByMessageStable(hn, sortByMessage) + + if len(cn) == 0 && len(hn) == 0 { + return nil // both empty is fine + } + if diff := cmp.Diff(cn, hn, protocmp.Transform()); diff != "" { + return fmt.Errorf("not converged (client has %d, host has %d): %s", len(clientOps), len(hostOps), diff) + } + return nil + }) + if err != nil { + t.Fatalf("%s: %v", msg, err) + } +} + +func sortByMessageStable(ops []*v1.Operation, cmp func(a, b *v1.Operation) int) { + for i := 1; i < len(ops); i++ { + for j := i; j > 0 && cmp(ops[j-1], ops[j]) > 0; j-- { + ops[j-1], ops[j] = ops[j], ops[j-1] + } + } +} + +func assertNoDuplicateOriginalIDs(t *testing.T, peer *peerUnderTest, query oplog.Query) { + t.Helper() + ops := getOperations(t, peer.oplog, query) + seen := map[int64]bool{} + for _, op := range ops { + origID := op.OriginalId + if origID == 0 { + continue + } + if seen[origID] { + t.Errorf("duplicate original_id %d found on host", origID) + } + seen[origID] = true + } +} diff --git a/internal/api/syncapi/syncserver.go b/internal/api/syncapi/syncserver.go index 876787b2..c4f6e795 100644 --- a/internal/api/syncapi/syncserver.go +++ b/internal/api/syncapi/syncserver.go @@ -15,6 +15,7 @@ import ( "github.com/garethgeorge/backrest/internal/env" "github.com/garethgeorge/backrest/internal/oplog" "go.uber.org/zap" + "google.golang.org/protobuf/proto" ) const SyncProtocolVersion = 1 @@ -28,7 +29,10 @@ type BackrestSyncHandler struct { var _ v1syncconnect.BackrestSyncServiceHandler = &BackrestSyncHandler{} func NewBackrestSyncHandler(mgr *SyncManager) *BackrestSyncHandler { - mapper, _ := newRemoteOpIDMapper(mgr.oplog, 4096) // error can be ignored, it just checks for valid size + mapper, err := newRemoteOpIDMapper(mgr.oplog, 4096) + if err != nil { + panic(fmt.Errorf("syncapi: constructing remote op ID mapper: %w", err)) + } return &BackrestSyncHandler{ mgr: mgr, mapper: mapper, @@ -54,6 +58,8 @@ func (h *BackrestSyncHandler) Sync(ctx context.Context, stream *connect.BidiStre cmdStream, sessionHandler, snapshot.config.GetMultihost().GetAuthorizedClients(), + "", // server never sends a pairing secret + h.handleUnknownPeerPairing(snapshot), ) cmdStream.SendErrorAndTerminate(err) }() @@ -141,10 +147,6 @@ func (h *syncSessionHandlerServer) OnConnectionEstablished(ctx context.Context, return NewSyncErrorInternal(fmt.Errorf("failed to create permission set for client %q: %w", peer.InstanceId, err)) } - if !h.peer.KeyidVerified { - return NewSyncErrorAuth(fmt.Errorf("client %q is not visually verified, please verify the key ID %q", peer.InstanceId, h.peer.Keyid)) - } - // Configure the state for the connected peer. peerState := newPeerState(peer.InstanceId, h.peer.Keyid) peerState.ConnectionStateMessage = "connected" @@ -154,6 +156,13 @@ func (h *syncSessionHandlerServer) OnConnectionEstablished(ctx context.Context, h.l.Sugar().Infof("accepted a connection from client instance ID %q", h.peer.InstanceId) + // Register this peer's stream handle so the API layer can send messages to it. + h.mgr.registerConnectedPeer(h.peer.Keyid, &connectedPeerHandle{ + stream: stream, + peer: h.peer, + permissions: h.permissions, + }) + // start a heartbeat thread go sendHeartbeats(ctx, stream, env.MultihostHeartbeatInterval()) @@ -164,27 +173,66 @@ func (h *syncSessionHandlerServer) OnConnectionEstablished(ctx context.Context, for { select { case <-configWatchCh: - h.l.Sugar().Infof("disconnecting client due to configuration change") - stream.SendErrorAndTerminate(nil) // terminate so client reconnects and gets new config - return + newConfig, err := h.mgr.configMgr.Get() + if err != nil { + h.l.Sugar().Warnf("failed to get config on change: %v, disconnecting client", err) + stream.SendErrorAndTerminate(nil) + return + } + + // Check if this peer is still authorized + peerIdx := slices.IndexFunc(newConfig.Multihost.GetAuthorizedClients(), func(p *v1.Multihost_Peer) bool { + return p.InstanceId == h.peer.InstanceId && p.Keyid == h.peer.Keyid + }) + if peerIdx == -1 { + h.l.Sugar().Infof("disconnecting client %q: no longer authorized", h.peer.InstanceId) + stream.SendErrorAndTerminate(nil) + return + } + + // Check if permissions changed by comparing the proto peer definition + updatedPeer := newConfig.Multihost.AuthorizedClients[peerIdx] + if !proto.Equal(h.peer, updatedPeer) { + h.l.Sugar().Infof("disconnecting client %q: peer configuration changed", h.peer.InstanceId) + stream.SendErrorAndTerminate(nil) + return + } + + // Permissions unchanged — send updated config and shared repos to client + configRepos, configPlans, err := h.sendConfigToClient(stream, newConfig) + if err != nil { + h.l.Sugar().Warnf("failed to send updated config to client %q: %v", h.peer.InstanceId, err) + } else { + sharedRepos := h.sendSharedReposToClient(stream, newConfig) + h.l.Sugar().Debugf("config changed, sent update to client %q: %d repos, %d plans (config); %d shared repos pushed", + h.peer.InstanceId, configRepos, configPlans, sharedRepos) + } case <-ctx.Done(): return } } }() - if err := h.sendConfigToClient(stream, h.snapshot.config); err != nil { + configRepos, configPlans, err := h.sendConfigToClient(stream, h.snapshot.config) + if err != nil { return NewSyncErrorInternal(fmt.Errorf("sending initial config to client: %w", err)) } - // send initial request for operation sync - if err := h.sendOperationSyncRequest(stream); err != nil { - return NewSyncErrorInternal(fmt.Errorf("sending initial operation sync request: %w", err)) - } + // Push shared repos to the client + sharedRepoCount := h.sendSharedReposToClient(stream, h.snapshot.config) + + h.l.Sugar().Infof("sent initial state to client %q: %d repos, %d plans (config); %d shared repos pushed", + h.peer.InstanceId, configRepos, configPlans, sharedRepoCount) return nil } +func (h *syncSessionHandlerServer) OnConnectionDisconnected() { + if h.peer != nil { + h.mgr.unregisterConnectedPeer(h.peer.Keyid) + } +} + func (h *syncSessionHandlerServer) HandleHeartbeat(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionHeartbeat) error { peerState := h.mgr.peerStateManager.GetPeerState(h.peer.Keyid).Clone() if peerState == nil { @@ -266,18 +314,9 @@ func (h *syncSessionHandlerServer) insertOrUpdate(op *v1.Operation, isUpdate boo op.OriginalFlowId = op.FlowId op.Id = localOpID op.FlowId = localFlowID - if op.Id == 0 { - if isUpdate { - h.l.Sugar().Warnf("received update for non-existent operation %+v, inserting instead", op) - } - op.Modno = 0 - return h.mgr.oplog.Add(op) - } else { - if !isUpdate { - h.l.Sugar().Warnf("received insert for existing operation %+v, updating instead", op) - } - return h.mgr.oplog.Update(op) - } + // Use Set which handles both insert (Id==0) and update (Id!=0), + // preserving the operation's Modno from the client. + return h.mgr.oplog.Set(oplog.SetOptions{}, op) } func (h *syncSessionHandlerServer) deleteByOriginalID(originalID int64) error { @@ -294,14 +333,12 @@ func (h *syncSessionHandlerServer) deleteByOriginalID(originalID int64) error { return h.mgr.oplog.Delete(foundOp.ID) } -func (h *syncSessionHandlerServer) sendConfigToClient(stream *bidiSyncCommandStream, config *v1.Config) error { +func (h *syncSessionHandlerServer) sendConfigToClient(stream *bidiSyncCommandStream, config *v1.Config) (int, int, error) { remoteConfig := &v1sync.RemoteConfig{ Version: config.Version, Modno: config.Modno, } resourceListMsg := &v1sync.SyncStreamItem_SyncActionReceiveResources{} - var allowedRepoIDs []string - var allowedPlanIDs []string for _, repo := range config.Repos { if h.permissions.CheckPermissionForRepo(repo.Id, permissions.PermsCanViewConfiguration...) { remoteConfig.Repos = append(remoteConfig.Repos, repo) @@ -309,7 +346,6 @@ func (h *syncSessionHandlerServer) sendConfigToClient(stream *bidiSyncCommandStr Id: repo.Id, Guid: repo.Guid, }) - allowedRepoIDs = append(allowedRepoIDs, repo.Id) } } for _, plan := range config.Plans { @@ -318,10 +354,8 @@ func (h *syncSessionHandlerServer) sendConfigToClient(stream *bidiSyncCommandStr resourceListMsg.Plans = append(resourceListMsg.Plans, &v1sync.PlanMetadata{ Id: plan.Id, }) - allowedPlanIDs = append(allowedPlanIDs, plan.Id) } } - h.l.Sugar().Debugf("determined client %v is allowlisted to read configs for repos %v and plans %v", h.peer.InstanceId, allowedRepoIDs, allowedPlanIDs) // Send the config, this is the first meaningful packet the client will receive. stream.Send(&v1sync.SyncStreamItem{ @@ -338,22 +372,184 @@ func (h *syncSessionHandlerServer) sendConfigToClient(stream *bidiSyncCommandStr ReceiveResources: resourceListMsg, }, }) - return nil + return len(remoteConfig.Repos), len(remoteConfig.Plans), nil } -func (h *syncSessionHandlerServer) sendOperationSyncRequest(stream *bidiSyncCommandStream) error { - highestID, highestModno, err := h.mgr.oplog.GetHighestOpIDAndModno(oplog.Query{}.SetOriginalInstanceKeyid(h.peer.Keyid)) - if err != nil { - return fmt.Errorf("getting highest opid and modno: %w", err) +// sendSharedReposToClient sends repos marked as shared to the client via SetConfig. +// This pushes repo configurations to the client so they are added to the client's local config. +// Returns the number of shared repos sent. +func (h *syncSessionHandlerServer) sendSharedReposToClient(stream *bidiSyncCommandStream, config *v1.Config) int { + var sharedRepos []*v1.Repo + for _, repo := range config.Repos { + if repo.GetShared() { + repoCopy := proto.Clone(repo).(*v1.Repo) + repoCopy.OriginInstanceId = config.Instance + sharedRepos = append(sharedRepos, repoCopy) + } } + + if len(sharedRepos) == 0 { + return 0 + } + stream.Send(&v1sync.SyncStreamItem{ - Action: &v1sync.SyncStreamItem_RequestOperations{ - RequestOperations: &v1sync.SyncStreamItem_SyncActionRequestOperations{ - HighOpid: highestID, - HighModno: highestModno, + Action: &v1sync.SyncStreamItem_SetConfig{ + SetConfig: &v1sync.SyncStreamItem_SyncActionSetConfig{ + Repos: sharedRepos, }, }, }) - h.l.Sugar().Debugf("requested operations from client starting at opID %d and modno %d", highestID, highestModno) + return len(sharedRepos) +} + +// ValidatePairingSecret checks a pairing secret against a list of pairing tokens. +// Returns the matching token if valid, or an error explaining why validation failed. +// This is a pure function with no side effects, making it easy to test exhaustively. +func ValidatePairingSecret(secret string, tokens []*v1.Multihost_PairingToken, now time.Time) (*v1.Multihost_PairingToken, error) { + if secret == "" { + return nil, fmt.Errorf("empty pairing secret") + } + for _, token := range tokens { + if token.Secret != secret { + continue + } + if token.ExpiresAtUnix > 0 && now.Unix() > token.ExpiresAtUnix { + return nil, fmt.Errorf("pairing token %q has expired", token.Label) + } + if token.MaxUses > 0 && token.Uses >= token.MaxUses { + return nil, fmt.Errorf("pairing token %q has reached its maximum number of uses (%d)", token.Label, token.MaxUses) + } + return token, nil + } + return nil, fmt.Errorf("no matching pairing token found") +} + +// handleUnknownPeerPairing returns an onUnknownPeerFunc that validates a pairing secret +// from the handshake, adds the client to authorized_clients in the config, and consumes the token. +// The peer is added to the config BEFORE runSync proceeds with its normal authorization check, +// ensuring that runSync's hard gate (peer must be in authorized_clients) is never bypassed. +func (h *BackrestSyncHandler) handleUnknownPeerPairing(snapshot *syncConfigSnapshot) onUnknownPeerFunc { + return func(handshake *v1sync.SyncStreamItem) (*v1.Multihost_Peer, error) { + pairingSecret := handshake.GetHandshake().GetPairingSecret() + if pairingSecret == "" { + return nil, fmt.Errorf("unknown peer and no pairing secret provided") + } + + // Defense-in-depth: re-verify the handshake signature to ensure the client + // holds the private key for the public key it presents. This is already checked + // by verifyHandshakePacket in runSync, but we verify again here since this is + // a security-critical path that adds a new authorized client. + if _, err := verifyHandshakePacket(handshake); err != nil { + return nil, fmt.Errorf("handshake signature verification failed: %w", err) + } + + peerKeyID := handshake.GetHandshake().GetPublicKey().GetKeyid() + peerInstanceID := string(handshake.GetHandshake().GetInstanceId().GetPayload()) + + // Atomically validate the pairing secret and add the client. + var newPeer *v1.Multihost_Peer + if err := h.mgr.configMgr.Transform(func(cfg *v1.Config) (*v1.Config, error) { + token, err := ValidatePairingSecret(pairingSecret, cfg.GetMultihost().GetPairingTokens(), time.Now()) + if err != nil { + zap.S().Warnf("rejected pairing attempt from %q (%s): %v", peerInstanceID, peerKeyID, err) + return nil, err + } + + newPeer = &v1.Multihost_Peer{ + InstanceId: peerInstanceID, + Keyid: peerKeyID, + Permissions: token.Permissions, + } + cfg.Multihost.AuthorizedClients = append(cfg.Multihost.AuthorizedClients, newPeer) + + // Consume the token: increment uses, remove if exhausted. + token.Uses++ + if token.MaxUses > 0 && token.Uses >= token.MaxUses { + cfg.Multihost.PairingTokens = slices.DeleteFunc(cfg.Multihost.PairingTokens, func(t *v1.Multihost_PairingToken) bool { + return t.Secret == token.Secret + }) + } + + cfg.Modno++ + return cfg, nil + }); err != nil { + return nil, fmt.Errorf("failed to save paired client: %w", err) + } + + zap.S().Infof("successfully paired client %q (%s)", peerInstanceID, peerKeyID) + return newPeer, nil + } +} + +func (h *syncSessionHandlerServer) HandleOperationManifest(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionOperationManifest) error { + h.l.Sugar().Debugf("received operation manifest with %d operations", len(item.GetOpIds())) + // Build local state: original_id → {localID, modno} + type localOp struct { + localID int64 + modno int64 + } + localState := map[int64]localOp{} + if err := h.mgr.oplog.QueryMetadata( + oplog.Query{}.SetOriginalInstanceKeyid(h.peer.Keyid), + func(meta oplog.OpMetadata) error { + localState[meta.OriginalID] = localOp{localID: meta.ID, modno: meta.Modno} + return nil + }, + ); err != nil { + return fmt.Errorf("querying local operation metadata: %w", err) + } + h.l.Sugar().Debugf("local state has %d operations from this peer", len(localState)) + + // Build remote set from manifest + if len(item.GetOpIds()) != len(item.GetModnos()) { + return NewSyncErrorProtocol(fmt.Errorf("operation manifest has mismatched OpIds (%d) and Modnos (%d) lengths", len(item.GetOpIds()), len(item.GetModnos()))) + } + remoteSet := make(map[int64]int64, len(item.GetOpIds())) + for i, id := range item.GetOpIds() { + remoteSet[id] = item.GetModnos()[i] + } + + // Delete ops not in manifest + var toDelete []int64 + for origID, local := range localState { + if _, exists := remoteSet[origID]; !exists { + toDelete = append(toDelete, local.localID) + } + } + if len(toDelete) > 0 { + h.l.Sugar().Debugf("deleting %d stale operations", len(toDelete)) + if err := h.mgr.oplog.Delete(toDelete...); err != nil { + h.l.Sugar().Warnf("failed to delete stale operations: %v", err) + } + } + + // Find ops we need (new or changed modno), preserving manifest order + opIDs := item.GetOpIds() + modnos := item.GetModnos() + var needIDs []int64 + for i, id := range opIDs { + modno := modnos[i] + local, exists := localState[id] + if !exists || local.modno != modno { + needIDs = append(needIDs, id) + } + } + h.l.Sugar().Debugf("need %d operations (new or changed), local state comparison: remoteSet=%v localState=%v", len(needIDs), remoteSet, localState) + + // Request the ops we need + if len(needIDs) > 0 { + stream.Send(&v1sync.SyncStreamItem{ + Action: &v1sync.SyncStreamItem_RequestOperationData{ + RequestOperationData: &v1sync.SyncStreamItem_SyncActionRequestOperationData{ + OpIds: needIDs, + }, + }, + }) + } + return nil } + +func (h *syncSessionHandlerServer) HandleRequestOperationData(ctx context.Context, stream *bidiSyncCommandStream, item *v1sync.SyncStreamItem_SyncActionRequestOperationData) error { + return NewSyncErrorProtocol(fmt.Errorf("server should not receive RequestOperationData")) +} diff --git a/internal/api/syncapi/syncstatehandler.go b/internal/api/syncapi/syncstatehandler.go index ba36f46c..18d9a521 100644 --- a/internal/api/syncapi/syncstatehandler.go +++ b/internal/api/syncapi/syncstatehandler.go @@ -2,6 +2,7 @@ package syncapi import ( "context" + "fmt" "time" "connectrpc.com/connect" @@ -23,6 +24,27 @@ func NewBackrestSyncStateHandler(mgr *SyncManager) *BackrestSyncStateHandler { } } +func (h *BackrestSyncStateHandler) SetRemoteClientConfig(ctx context.Context, req *connect.Request[v1sync.SetRemoteClientConfigRequest]) (*connect.Response[v1sync.SetRemoteClientConfigResponse], error) { + peerKeyID := req.Msg.GetPeerKeyid() + if peerKeyID == "" { + return nil, connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("peer_keyid is required")) + } + + handle := h.mgr.GetConnectedPeer(peerKeyID) + if handle == nil { + return nil, connect.NewError(connect.CodeNotFound, fmt.Errorf("peer %q is not connected", peerKeyID)) + } + + handle.SendSetConfig(&v1sync.SyncStreamItem_SyncActionSetConfig{ + Repos: req.Msg.GetRepos(), + Plans: req.Msg.GetPlans(), + ReposToDelete: req.Msg.GetReposToDelete(), + PlansToDelete: req.Msg.GetPlansToDelete(), + }) + + return connect.NewResponse(&v1sync.SetRemoteClientConfigResponse{}), nil +} + func (h *BackrestSyncStateHandler) GetPeerSyncStatesStream(ctx context.Context, req *connect.Request[v1sync.SyncStateStreamRequest], stream *connect.ServerStream[v1sync.PeerState]) error { ctx, cancel := context.WithCancelCause(ctx) defer cancel(nil) diff --git a/internal/api/syncapi/uriutil.go b/internal/api/syncapi/uriutil.go deleted file mode 100644 index fef31c5c..00000000 --- a/internal/api/syncapi/uriutil.go +++ /dev/null @@ -1,60 +0,0 @@ -package syncapi - -import ( - "errors" - "net/url" -) - -var ErrNotBackrestURI = errors.New("not a backrest URI") - -func CreateRemoteRepoURI(instanceUrl string) (string, error) { - u, err := url.Parse(instanceUrl) - if err != nil { - return "", err - } - - if u.Scheme == "http" { - u.Scheme = "backrest" - } else if u.Scheme == "https" { - u.Scheme = "sbackrest" - } else { - return "", errors.New("unsupported scheme") - } - - return u.String(), nil -} - -func IsBackrestRemoteRepoURI(repoUri string) bool { - u, err := url.Parse(repoUri) - if err != nil { - return false - } - - return u.Scheme == "backrest" -} - -func InstanceForBackrestURI(repoUri string) (string, error) { - u, err := url.Parse(repoUri) - if err != nil { - return "", err - } - - if u.Scheme != "backrest" { - return "", errors.New("not a backrest URI") - } - - return u.Hostname(), nil -} - -func RepoForBackrestURI(repoUri string) (string, error) { - u, err := url.Parse(repoUri) - if err != nil { - return "", err - } - - if u.Scheme != "backrest" { - return "", errors.New("not a backrest URI") - } - - return u.Path, nil -} diff --git a/internal/config/config.go b/internal/config/config.go index 4b877583..d2d43a52 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -10,6 +10,7 @@ import ( "github.com/garethgeorge/backrest/internal/cryptoutil" "github.com/garethgeorge/backrest/internal/eventemitter" "go.uber.org/zap" + "google.golang.org/protobuf/proto" ) var ErrConfigNotFound = fmt.Errorf("config not found") @@ -57,7 +58,12 @@ func (m *ConfigManager) migrate(config *v1.Config) error { func (m *ConfigManager) Get() (*v1.Config, error) { m.cachedMu.Lock() defer m.cachedMu.Unlock() + return m.getCachedLocked() +} +// getCachedLocked returns the cached config, loading from the store if necessary. +// Must be called with cachedMu held. +func (m *ConfigManager) getCachedLocked() (*v1.Config, error) { if m.cached != nil { return m.cached, nil } @@ -107,9 +113,45 @@ func (m *ConfigManager) Update(config *v1.Config) error { return nil } +// Transform atomically reads the current config, passes a deep clone to fn, +// and saves the result. If fn returns a nil config, no update is performed. +// The caller should not call Get/Update on the ConfigManager from within fn. +func (m *ConfigManager) Transform(fn func(cfg *v1.Config) (*v1.Config, error)) error { + m.cachedMu.Lock() + defer m.cachedMu.Unlock() + + current, err := m.getCachedLocked() + if err != nil { + return err + } + + cloned := proto.Clone(current).(*v1.Config) + result, err := fn(cloned) + if err != nil { + return err + } + if result == nil { + return nil // no update requested + } + + if err := ValidateConfig(result); err != nil { + return err + } + + if err := m.Store.Update(result); err != nil { + return err + } + m.cached = result + m.OnChange.Emit(struct{}{}) + return nil +} + type ConfigStore interface { Get() (*v1.Config, error) Update(config *v1.Config) error + // Transform atomically reads the current config, passes a deep clone to fn, + // and if fn returns a non-nil config, saves it. Returning (nil, nil) skips the update. + Transform(fn func(cfg *v1.Config) (*v1.Config, error)) error } func NewDefaultConfig() *v1.Config { diff --git a/internal/config/jsonstore.go b/internal/config/jsonstore.go index 2cbfe6ef..d7ef16d2 100644 --- a/internal/config/jsonstore.go +++ b/internal/config/jsonstore.go @@ -12,6 +12,7 @@ import ( v1 "github.com/garethgeorge/backrest/gen/go/v1" "github.com/natefinch/atomic" "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) var ( @@ -28,7 +29,37 @@ var _ ConfigStore = &JsonFileStore{} func (f *JsonFileStore) Get() (*v1.Config, error) { f.mu.Lock() defer f.mu.Unlock() + return f.get() +} +func (f *JsonFileStore) Update(config *v1.Config) error { + f.mu.Lock() + defer f.mu.Unlock() + return f.update(config) +} + +func (f *JsonFileStore) Transform(fn func(cfg *v1.Config) (*v1.Config, error)) error { + f.mu.Lock() + defer f.mu.Unlock() + + current, err := f.get() + if err != nil { + return err + } + + cloned := proto.Clone(current).(*v1.Config) + result, err := fn(cloned) + if err != nil { + return err + } + if result == nil { + return nil + } + return f.update(result) +} + +// get reads the config from disk. Must be called with mu held. +func (f *JsonFileStore) get() (*v1.Config, error) { data, err := os.ReadFile(f.Path) if err != nil { if errors.Is(err, os.ErrNotExist) { @@ -45,10 +76,8 @@ func (f *JsonFileStore) Get() (*v1.Config, error) { return &config, nil } -func (f *JsonFileStore) Update(config *v1.Config) error { - f.mu.Lock() - defer f.mu.Unlock() - +// update writes the config to disk. Must be called with mu held. +func (f *JsonFileStore) update(config *v1.Config) error { data, err := protojson.MarshalOptions{ Indent: " ", Multiline: true, diff --git a/internal/config/memstore.go b/internal/config/memstore.go index 3d03b008..a8b8e097 100644 --- a/internal/config/memstore.go +++ b/internal/config/memstore.go @@ -4,6 +4,7 @@ import ( "sync" v1 "github.com/garethgeorge/backrest/gen/go/v1" + "google.golang.org/protobuf/proto" ) type MemoryStore struct { @@ -25,3 +26,18 @@ func (c *MemoryStore) Update(config *v1.Config) error { c.Config = config return nil } + +func (c *MemoryStore) Transform(fn func(cfg *v1.Config) (*v1.Config, error)) error { + c.mu.Lock() + defer c.mu.Unlock() + + cloned := proto.Clone(c.Config).(*v1.Config) + result, err := fn(cloned) + if err != nil { + return err + } + if result != nil { + c.Config = result + } + return nil +} diff --git a/internal/config/networksanitize_test.go b/internal/config/networksanitize_test.go index 72cca5d0..341e4e4a 100644 --- a/internal/config/networksanitize_test.go +++ b/internal/config/networksanitize_test.go @@ -25,8 +25,8 @@ func TestSanitizeForNetwork(t *testing.T) { Multihost: &v1.Multihost{ Identity: &v1.PrivateKey{ Keyid: "test-key-id", - Ed25519Priv: "test-private-key", - Ed25519Pub: "test-public-key", + EcdsaPriv: "test-private-key", + EcdsaPub: "test-public-key", }, }, }, @@ -34,8 +34,8 @@ func TestSanitizeForNetwork(t *testing.T) { Multihost: &v1.Multihost{ Identity: &v1.PrivateKey{ Keyid: "test-key-id", - Ed25519Priv: "", - Ed25519Pub: "", + EcdsaPriv: "", + EcdsaPub: "", }, }, }, @@ -85,8 +85,8 @@ func TestSanitizeForNetwork(t *testing.T) { Multihost: &v1.Multihost{ Identity: &v1.PrivateKey{ Keyid: "test-key-id", - Ed25519Priv: "secret-key", - Ed25519Pub: "public-key", + EcdsaPriv: "secret-key", + EcdsaPub: "public-key", }, }, Auth: &v1.Auth{ @@ -104,8 +104,8 @@ func TestSanitizeForNetwork(t *testing.T) { Multihost: &v1.Multihost{ Identity: &v1.PrivateKey{ Keyid: "test-key-id", - Ed25519Priv: "", - Ed25519Pub: "", + EcdsaPriv: "", + EcdsaPub: "", }, }, Auth: &v1.Auth{ @@ -174,8 +174,8 @@ func TestRehydrateNetworkSanitizedConfig(t *testing.T) { Multihost: &v1.Multihost{ Identity: &v1.PrivateKey{ Keyid: "test-key-id", - Ed25519Priv: "secret-key-data", - Ed25519Pub: "public-key-data", + EcdsaPriv: "secret-key-data", + EcdsaPub: "public-key-data", }, }, }, @@ -183,8 +183,8 @@ func TestRehydrateNetworkSanitizedConfig(t *testing.T) { Multihost: &v1.Multihost{ Identity: &v1.PrivateKey{ Keyid: "test-key-id", - Ed25519Priv: "secret-key-data", - Ed25519Pub: "public-key-data", + EcdsaPriv: "secret-key-data", + EcdsaPub: "public-key-data", }, }, }, diff --git a/internal/config/validate.go b/internal/config/validate.go index 6c170957..d3f4ee32 100644 --- a/internal/config/validate.go +++ b/internal/config/validate.go @@ -13,7 +13,6 @@ import ( "github.com/garethgeorge/backrest/internal/protoutil" "github.com/hashicorp/go-multierror" "go.uber.org/zap" - "google.golang.org/protobuf/proto" ) func ValidateConfig(c *v1.Config) error { @@ -31,6 +30,9 @@ func ValidateConfig(c *v1.Config) error { err = multierror.Append(err, fmt.Errorf("auth: %w", e)) } + // Remove orphaned remote repos and plans before validating them. + cleanupOrphanedRemoteReposAndPlans(c) + repos := make(map[string]*v1.Repo) if c.Repos != nil { for _, repo := range c.Repos { @@ -111,6 +113,19 @@ func validateRepo(repo *v1.Repo) error { } } + if repo.ForgetPolicy != nil { + if repo.ForgetPolicy.GetSchedule() != nil { + if e := protoutil.ValidateSchedule(repo.ForgetPolicy.GetSchedule()); e != nil { + err = multierror.Append(err, fmt.Errorf("forget policy schedule: %w", e)) + } + } + if repo.ForgetPolicy.GetRetention() == nil { + err = multierror.Append(err, errors.New("forget policy must specify a retention policy")) + } else if e := protoutil.ValidateRetentionPolicy(repo.ForgetPolicy.GetRetention()); e != nil { + err = multierror.Append(err, fmt.Errorf("forget policy: %w", e)) + } + } + for _, env := range repo.Env { if !strings.Contains(env, "=") { err = multierror.Append(err, fmt.Errorf("invalid env var %s, must take format KEY=VALUE", env)) @@ -152,11 +167,9 @@ func validatePlan(plan *v1.Plan, repos map[string]*v1.Repo) error { err = multierror.Append(err, fmt.Errorf("repo %q not found", plan.Repo)) } - if plan.Retention != nil && plan.Retention.Policy == nil { - err = multierror.Append(err, errors.New("retention policy must be nil or must specify a policy")) - } else if policyTimeBucketed, ok := plan.Retention.GetPolicy().(*v1.RetentionPolicy_PolicyTimeBucketed); ok { - if proto.Equal(policyTimeBucketed.PolicyTimeBucketed, &v1.RetentionPolicy_TimeBucketedCounts{}) { - err = multierror.Append(err, errors.New("time bucketed policy must specify a non-empty bucket")) + if plan.Retention != nil { + if e := protoutil.ValidateRetentionPolicy(plan.Retention); e != nil { + err = multierror.Append(err, fmt.Errorf("retention: %w", e)) } } @@ -202,7 +215,7 @@ func validateMultihost(config *v1.Config) (err error) { seenInstanceIDs := make(map[string]struct{}) seenInstanceIDs[config.Instance] = struct{}{} - assertIDNew := func(id string) error { + assertInstanceIDNew := func(id string) error { if _, ok := seenInstanceIDs[id]; ok { return fmt.Errorf("instance ID %q is already used by another peer, an instance ID can only appear once as either a known host OR authorized client of the instance", id) } @@ -210,11 +223,26 @@ func validateMultihost(config *v1.Config) (err error) { return nil } + seenKeyIDs := make(map[string]struct{}) + if keyid := multihost.GetIdentity().GetKeyid(); keyid != "" { + seenKeyIDs[keyid] = struct{}{} + } + assertKeyIDNew := func(keyid string) error { + if _, ok := seenKeyIDs[keyid]; ok { + return fmt.Errorf("key ID %q is already used by another peer, a key ID can only appear once across all peers", keyid) + } + seenKeyIDs[keyid] = struct{}{} + return nil + } + for _, peer := range multihost.GetAuthorizedClients() { if e := validatePeer(peer, false); e != nil { err = multierror.Append(err, fmt.Errorf("authorized client %q: %w", peer.GetInstanceId(), e)) } - if e := assertIDNew(peer.GetInstanceId()); e != nil { + if e := assertInstanceIDNew(peer.GetInstanceId()); e != nil { + err = multierror.Append(err, fmt.Errorf("authorized client %q: %w", peer.GetInstanceId(), e)) + } + if e := assertKeyIDNew(peer.GetKeyid()); e != nil { err = multierror.Append(err, fmt.Errorf("authorized client %q: %w", peer.GetInstanceId(), e)) } } @@ -223,7 +251,10 @@ func validateMultihost(config *v1.Config) (err error) { if e := validatePeer(peer, true); e != nil { err = multierror.Append(err, fmt.Errorf("known host %q: %w", peer.GetInstanceId(), e)) } - if e := assertIDNew(peer.GetInstanceId()); e != nil { + if e := assertInstanceIDNew(peer.GetInstanceId()); e != nil { + err = multierror.Append(err, fmt.Errorf("known host %q: %w", peer.GetInstanceId(), e)) + } + if e := assertKeyIDNew(peer.GetKeyid()); e != nil { err = multierror.Append(err, fmt.Errorf("known host %q: %w", peer.GetInstanceId(), e)) } } @@ -246,10 +277,6 @@ func validatePeer(peer *v1.Multihost_Peer, isKnownHost bool) error { } } - if peer.KeyidVerified && peer.GetKeyid() == "" { - return errors.New("public key cannot be marked as verified if it is unset, the keyid must be specified at a minimum") - } - _, err := permissions.NewPermissionSet(peer.GetPermissions()) if err != nil { return fmt.Errorf("peer permissions: %w", err) @@ -257,3 +284,43 @@ func validatePeer(peer *v1.Multihost_Peer, isKnownHost bool) error { return nil } + +// cleanupOrphanedRemoteReposAndPlans removes repos whose originInstanceId no +// longer matches any peer, then removes plans that reference those deleted repos. +func cleanupOrphanedRemoteReposAndPlans(c *v1.Config) { + // Collect all peer instance IDs + peerIDs := make(map[string]struct{}) + for _, peer := range c.GetMultihost().GetAuthorizedClients() { + peerIDs[peer.GetInstanceId()] = struct{}{} + } + for _, peer := range c.GetMultihost().GetKnownHosts() { + peerIDs[peer.GetInstanceId()] = struct{}{} + } + + // Remove repos whose origin instance is no longer a peer + removedRepos := make(map[string]struct{}) + c.Repos = slices.DeleteFunc(c.Repos, func(r *v1.Repo) bool { + if r.OriginInstanceId == "" { + return false + } + if _, ok := peerIDs[r.OriginInstanceId]; ok { + return false + } + zap.S().Infof("removing orphaned remote repo %q (origin instance %q is no longer a peer)", r.Id, r.OriginInstanceId) + removedRepos[r.Id] = struct{}{} + return true + }) + + if len(removedRepos) == 0 { + return + } + + // Remove plans that reference deleted repos + c.Plans = slices.DeleteFunc(c.Plans, func(p *v1.Plan) bool { + if _, ok := removedRepos[p.Repo]; ok { + zap.S().Infof("removing plan %q referencing orphaned remote repo %q", p.Id, p.Repo) + return true + } + return false + }) +} diff --git a/internal/config/validate_test.go b/internal/config/validate_test.go new file mode 100644 index 00000000..eddb8c5b --- /dev/null +++ b/internal/config/validate_test.go @@ -0,0 +1,235 @@ +package config + +import ( + "testing" + + v1 "github.com/garethgeorge/backrest/gen/go/v1" +) + +func TestCleanupOrphanedRemoteReposAndPlans(t *testing.T) { + tests := []struct { + name string + config *v1.Config + wantRepoIDs []string + wantPlanIDs []string + }{ + { + name: "no remote repos, nothing removed", + config: &v1.Config{ + Repos: []*v1.Repo{ + {Id: "local-repo", Uri: "file:///tmp/repo", Guid: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, + }, + Plans: []*v1.Plan{ + {Id: "plan1", Repo: "local-repo", Paths: []string{"/data"}}, + }, + }, + wantRepoIDs: []string{"local-repo"}, + wantPlanIDs: []string{"plan1"}, + }, + { + name: "remote repo with valid peer is kept", + config: &v1.Config{ + Repos: []*v1.Repo{ + {Id: "local-repo", Uri: "file:///tmp/repo", Guid: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, + {Id: "remote-repo", Uri: "file:///tmp/remote", Guid: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", OriginInstanceId: "server-a"}, + }, + Plans: []*v1.Plan{ + {Id: "plan1", Repo: "local-repo", Paths: []string{"/data"}}, + {Id: "plan2", Repo: "remote-repo", Paths: []string{"/data"}}, + }, + Multihost: &v1.Multihost{ + KnownHosts: []*v1.Multihost_Peer{ + {InstanceId: "server-a", Keyid: "key-a", InstanceUrl: "http://server-a:9898"}, + }, + }, + }, + wantRepoIDs: []string{"local-repo", "remote-repo"}, + wantPlanIDs: []string{"plan1", "plan2"}, + }, + { + name: "remote repo orphaned when peer removed", + config: &v1.Config{ + Repos: []*v1.Repo{ + {Id: "local-repo", Uri: "file:///tmp/repo", Guid: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, + {Id: "remote-repo", Uri: "file:///tmp/remote", Guid: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", OriginInstanceId: "server-a"}, + }, + Plans: []*v1.Plan{ + {Id: "plan1", Repo: "local-repo", Paths: []string{"/data"}}, + {Id: "plan2", Repo: "remote-repo", Paths: []string{"/data"}}, + }, + Multihost: &v1.Multihost{}, + }, + wantRepoIDs: []string{"local-repo"}, + wantPlanIDs: []string{"plan1"}, + }, + { + name: "authorized client peer keeps remote repo", + config: &v1.Config{ + Repos: []*v1.Repo{ + {Id: "remote-repo", Uri: "file:///tmp/remote", Guid: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", OriginInstanceId: "client-b"}, + }, + Plans: []*v1.Plan{ + {Id: "plan1", Repo: "remote-repo", Paths: []string{"/data"}}, + }, + Multihost: &v1.Multihost{ + AuthorizedClients: []*v1.Multihost_Peer{ + {InstanceId: "client-b", Keyid: "key-b"}, + }, + }, + }, + wantRepoIDs: []string{"remote-repo"}, + wantPlanIDs: []string{"plan1"}, + }, + { + name: "multiple orphaned repos and plans cleaned up", + config: &v1.Config{ + Repos: []*v1.Repo{ + {Id: "local", Uri: "file:///tmp/repo", Guid: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, + {Id: "remote-a", Uri: "file:///tmp/a", Guid: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", OriginInstanceId: "gone-server"}, + {Id: "remote-b", Uri: "file:///tmp/b", Guid: "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", OriginInstanceId: "gone-server"}, + }, + Plans: []*v1.Plan{ + {Id: "local-plan", Repo: "local", Paths: []string{"/data"}}, + {Id: "plan-a", Repo: "remote-a", Paths: []string{"/data"}}, + {Id: "plan-b", Repo: "remote-b", Paths: []string{"/data"}}, + }, + Multihost: &v1.Multihost{}, + }, + wantRepoIDs: []string{"local"}, + wantPlanIDs: []string{"local-plan"}, + }, + { + name: "plan referencing local repo not removed even if remote repos cleaned", + config: &v1.Config{ + Repos: []*v1.Repo{ + {Id: "local", Uri: "file:///tmp/repo", Guid: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, + {Id: "remote", Uri: "file:///tmp/remote", Guid: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", OriginInstanceId: "gone"}, + }, + Plans: []*v1.Plan{ + {Id: "kept-plan", Repo: "local", Paths: []string{"/data"}}, + {Id: "removed-plan", Repo: "remote", Paths: []string{"/data"}}, + }, + Multihost: &v1.Multihost{}, + }, + wantRepoIDs: []string{"local"}, + wantPlanIDs: []string{"kept-plan"}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + cleanupOrphanedRemoteReposAndPlans(tc.config) + + gotRepoIDs := make([]string, len(tc.config.Repos)) + for i, r := range tc.config.Repos { + gotRepoIDs[i] = r.Id + } + + gotPlanIDs := make([]string, len(tc.config.Plans)) + for i, p := range tc.config.Plans { + gotPlanIDs[i] = p.Id + } + + if !sliceEqual(gotRepoIDs, tc.wantRepoIDs) { + t.Errorf("repos = %v, want %v", gotRepoIDs, tc.wantRepoIDs) + } + if !sliceEqual(gotPlanIDs, tc.wantPlanIDs) { + t.Errorf("plans = %v, want %v", gotPlanIDs, tc.wantPlanIDs) + } + }) + } +} + +func TestValidateRepoForgetPolicy(t *testing.T) { + validGUID := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + baseConfig := func(repo *v1.Repo) *v1.Config { + return &v1.Config{Instance: "test", Repos: []*v1.Repo{repo}} + } + + tests := []struct { + name string + repo *v1.Repo + wantErr bool + }{ + { + name: "no forget policy is valid", + repo: &v1.Repo{Id: "repo1", Uri: "file:///tmp/repo", Guid: validGUID}, + }, + { + name: "valid forget policy", + repo: &v1.Repo{ + Id: "repo1", Uri: "file:///tmp/repo", Guid: validGUID, + ForgetPolicy: &v1.ForgetPolicy{ + Schedule: &v1.Schedule{Schedule: &v1.Schedule_MaxFrequencyDays{MaxFrequencyDays: 1}}, + Retention: &v1.RetentionPolicy{Policy: &v1.RetentionPolicy_PolicyKeepLastN{PolicyKeepLastN: 5}}, + }, + }, + }, + { + name: "forget policy with nil retention", + repo: &v1.Repo{ + Id: "repo1", Uri: "file:///tmp/repo", Guid: validGUID, + ForgetPolicy: &v1.ForgetPolicy{ + Schedule: &v1.Schedule{Schedule: &v1.Schedule_MaxFrequencyDays{MaxFrequencyDays: 1}}, + }, + }, + wantErr: true, + }, + { + name: "forget policy with empty retention", + repo: &v1.Repo{ + Id: "repo1", Uri: "file:///tmp/repo", Guid: validGUID, + ForgetPolicy: &v1.ForgetPolicy{ + Schedule: &v1.Schedule{Schedule: &v1.Schedule_MaxFrequencyDays{MaxFrequencyDays: 1}}, + Retention: &v1.RetentionPolicy{}, + }, + }, + wantErr: true, + }, + { + name: "forget policy with invalid schedule", + repo: &v1.Repo{ + Id: "repo1", Uri: "file:///tmp/repo", Guid: validGUID, + ForgetPolicy: &v1.ForgetPolicy{ + Schedule: &v1.Schedule{Schedule: &v1.Schedule_Cron{Cron: "bad cron"}}, + Retention: &v1.RetentionPolicy{Policy: &v1.RetentionPolicy_PolicyKeepLastN{PolicyKeepLastN: 5}}, + }, + }, + wantErr: true, + }, + { + name: "forget policy with empty time bucketed retention", + repo: &v1.Repo{ + Id: "repo1", Uri: "file:///tmp/repo", Guid: validGUID, + ForgetPolicy: &v1.ForgetPolicy{ + Schedule: &v1.Schedule{Schedule: &v1.Schedule_MaxFrequencyDays{MaxFrequencyDays: 1}}, + Retention: &v1.RetentionPolicy{Policy: &v1.RetentionPolicy_PolicyTimeBucketed{PolicyTimeBucketed: &v1.RetentionPolicy_TimeBucketedCounts{}}}, + }, + }, + wantErr: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + err := ValidateConfig(baseConfig(tc.repo)) + if tc.wantErr && err == nil { + t.Error("expected error, got nil") + } else if !tc.wantErr && err != nil { + t.Errorf("unexpected error: %v", err) + } + }) + } +} + +func sliceEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} diff --git a/internal/cryptoutil/identity.go b/internal/cryptoutil/identity.go index bb5c1fda..307d3726 100644 --- a/internal/cryptoutil/identity.go +++ b/internal/cryptoutil/identity.go @@ -16,7 +16,7 @@ import ( ) var ( - curve = elliptic.P256() // ed25519 + curve = elliptic.P256() ) type PublicKey struct { @@ -25,7 +25,7 @@ type PublicKey struct { } func NewPublicKey(pubkey *v1.PublicKey) (*PublicKey, error) { - pubKeyBlock, _ := pem.Decode([]byte(pubkey.Ed25519Pub)) + pubKeyBlock, _ := pem.Decode([]byte(pubkey.EcdsaPub)) if pubKeyBlock == nil { return nil, errors.New("no public key found in pem") } @@ -74,7 +74,7 @@ type PrivateKey struct { } func NewPrivateKey(privkey *v1.PrivateKey) (*PrivateKey, error) { - privKeyBlock, _ := pem.Decode([]byte(privkey.Ed25519Priv)) + privKeyBlock, _ := pem.Decode([]byte(privkey.EcdsaPriv)) if privKeyBlock == nil { return nil, errors.New("no private key found in pem") } @@ -86,7 +86,7 @@ func NewPrivateKey(privkey *v1.PrivateKey) (*PrivateKey, error) { pubKey, err := NewPublicKey(&v1.PublicKey{ Keyid: privkey.Keyid, - Ed25519Pub: privkey.Ed25519Pub, + EcdsaPub: privkey.EcdsaPub, }) if err != nil { return nil, err @@ -123,8 +123,8 @@ func GeneratePrivateKey() (*v1.PrivateKey, error) { return &v1.PrivateKey{ Keyid: deriveKeyId(&privKey.PublicKey), - Ed25519Priv: string(pemPrivateKeyBytes), - Ed25519Pub: string(pemPublicKeyBytes), + EcdsaPriv: string(pemPrivateKeyBytes), + EcdsaPub: string(pemPublicKeyBytes), }, nil } diff --git a/internal/cryptoutil/identity_test.go b/internal/cryptoutil/identity_test.go index 2ecec15c..cb19ebd5 100644 --- a/internal/cryptoutil/identity_test.go +++ b/internal/cryptoutil/identity_test.go @@ -10,11 +10,11 @@ func TestGenerateKeypair(t *testing.T) { t.Fatalf("failed to generate key pair: %v", err) } - if len(privateKey.Ed25519Priv) == 0 { + if len(privateKey.EcdsaPriv) == 0 { t.Fatalf("must populate private key") } - if len(privateKey.Ed25519Pub) == 0 { + if len(privateKey.EcdsaPub) == 0 { t.Fatalf("must populate public key") } } diff --git a/internal/cryptoutil/pairingtoken.go b/internal/cryptoutil/pairingtoken.go new file mode 100644 index 00000000..38fa85d2 --- /dev/null +++ b/internal/cryptoutil/pairingtoken.go @@ -0,0 +1,70 @@ +package cryptoutil + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "strings" +) + +// PairingToken format: ":#" +// The secret is a random hex string that the client sends during the sync handshake +// to prove it holds a valid pairing token. + +const pairingSecretBytes = 32 + +// GeneratePairingSecret generates a cryptographically random secret for use in a pairing token. +func GeneratePairingSecret() (string, error) { + b := make([]byte, pairingSecretBytes) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("generate pairing secret: %w", err) + } + return hex.EncodeToString(b), nil +} + +// FormatPairingToken formats the components into a pairing token string. +func FormatPairingToken(keyID, secret, instanceID string) string { + return fmt.Sprintf("%s:%s#%s", keyID, secret, instanceID) +} + +// ParsedPairingToken holds the parsed components of a pairing token. +type ParsedPairingToken struct { + KeyID string + Secret string + InstanceID string +} + +// ParsePairingToken parses a pairing token string into its components. +func ParsePairingToken(token string) (*ParsedPairingToken, error) { + // Split on '#' to get the instance ID suffix + hashIdx := strings.LastIndex(token, "#") + if hashIdx == -1 { + return nil, fmt.Errorf("invalid pairing token: missing '#' separator") + } + instanceID := token[hashIdx+1:] + remainder := token[:hashIdx] + + // Split remainder on ':' to get key ID and secret + colonIdx := strings.Index(remainder, ":") + if colonIdx == -1 { + return nil, fmt.Errorf("invalid pairing token: missing ':' separator") + } + keyID := remainder[:colonIdx] + secret := remainder[colonIdx+1:] + + if keyID == "" { + return nil, fmt.Errorf("invalid pairing token: empty key ID") + } + if secret == "" { + return nil, fmt.Errorf("invalid pairing token: empty secret") + } + if instanceID == "" { + return nil, fmt.Errorf("invalid pairing token: empty instance ID") + } + + return &ParsedPairingToken{ + KeyID: keyID, + Secret: secret, + InstanceID: instanceID, + }, nil +} diff --git a/internal/cryptoutil/pairingtoken_test.go b/internal/cryptoutil/pairingtoken_test.go new file mode 100644 index 00000000..90a3fcdd --- /dev/null +++ b/internal/cryptoutil/pairingtoken_test.go @@ -0,0 +1,117 @@ +package cryptoutil + +import ( + "testing" +) + +func TestFormatAndParsePairingToken(t *testing.T) { + keyID := "ecdsa.abc123" + secret := "deadbeef1234567890abcdef" + instanceID := "my-server" + + token := FormatPairingToken(keyID, secret, instanceID) + want := "ecdsa.abc123:deadbeef1234567890abcdef#my-server" + if token != want { + t.Fatalf("FormatPairingToken() = %q, want %q", token, want) + } + + parsed, err := ParsePairingToken(token) + if err != nil { + t.Fatalf("ParsePairingToken() error: %v", err) + } + if parsed.KeyID != keyID { + t.Errorf("KeyID = %q, want %q", parsed.KeyID, keyID) + } + if parsed.Secret != secret { + t.Errorf("Secret = %q, want %q", parsed.Secret, secret) + } + if parsed.InstanceID != instanceID { + t.Errorf("InstanceID = %q, want %q", parsed.InstanceID, instanceID) + } +} + +func TestParsePairingTokenWithColonsInKeyID(t *testing.T) { + // Key IDs contain base64url which shouldn't have colons, but test robustness + token := "ecdsa.key:secretvalue#server-1" + parsed, err := ParsePairingToken(token) + if err != nil { + t.Fatalf("ParsePairingToken() error: %v", err) + } + if parsed.KeyID != "ecdsa.key" { + t.Errorf("KeyID = %q, want %q", parsed.KeyID, "ecdsa.key") + } + if parsed.Secret != "secretvalue" { + t.Errorf("Secret = %q, want %q", parsed.Secret, "secretvalue") + } + if parsed.InstanceID != "server-1" { + t.Errorf("InstanceID = %q, want %q", parsed.InstanceID, "server-1") + } +} + +func TestParsePairingTokenWithHashInInstanceID(t *testing.T) { + // Instance ID with a '#' — we use LastIndex so the last '#' is the delimiter + token := "ecdsa.key:secret#inst#2" + parsed, err := ParsePairingToken(token) + if err != nil { + t.Fatalf("ParsePairingToken() error: %v", err) + } + if parsed.InstanceID != "2" { + t.Errorf("InstanceID = %q, want %q", parsed.InstanceID, "2") + } +} + +func TestParsePairingTokenErrors(t *testing.T) { + tests := []struct { + name string + token string + }{ + {"missing hash", "ecdsa.key:secret"}, + {"missing colon", "ecdsa.keysecret#server"}, + {"empty key ID", ":secret#server"}, + {"empty secret", "ecdsa.key:#server"}, + {"empty instance ID", "ecdsa.key:secret#"}, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + _, err := ParsePairingToken(tc.token) + if err == nil { + t.Errorf("ParsePairingToken(%q) should have returned error", tc.token) + } + }) + } +} + +func TestGeneratePairingSecret(t *testing.T) { + secret, err := GeneratePairingSecret() + if err != nil { + t.Fatalf("GeneratePairingSecret() error: %v", err) + } + if len(secret) != pairingSecretBytes*2 { // hex encoding doubles length + t.Errorf("secret length = %d, want %d", len(secret), pairingSecretBytes*2) + } + + // Ensure two secrets are different (probabilistic but extremely reliable) + secret2, _ := GeneratePairingSecret() + if secret == secret2 { + t.Error("two generated secrets should not be equal") + } +} + +func TestRoundTrip(t *testing.T) { + secret, err := GeneratePairingSecret() + if err != nil { + t.Fatalf("GeneratePairingSecret() error: %v", err) + } + + keyID := "ecdsa.test-key-id-1234" + instanceID := "my-backrest-server" + + token := FormatPairingToken(keyID, secret, instanceID) + parsed, err := ParsePairingToken(token) + if err != nil { + t.Fatalf("ParsePairingToken() error: %v", err) + } + if parsed.KeyID != keyID || parsed.Secret != secret || parsed.InstanceID != instanceID { + t.Errorf("round trip failed: got %+v", parsed) + } +} diff --git a/internal/oplog/memstore/memstore.go b/internal/oplog/memstore/memstore.go index 6ca00c16..ca5e30a6 100644 --- a/internal/oplog/memstore/memstore.go +++ b/internal/oplog/memstore/memstore.go @@ -82,6 +82,9 @@ func (m *MemStore) Query(q oplog.Query, f func(*v1.Operation) error) error { } func (m *MemStore) QueryMetadata(q oplog.Query, f func(meta oplog.OpMetadata) error) error { + m.mu.Lock() + defer m.mu.Unlock() + for _, id := range m.idsForQuery(q) { op := m.operations[id] if err := f(oplog.OpMetadata{ @@ -91,6 +94,9 @@ func (m *MemStore) QueryMetadata(q oplog.Query, f func(meta oplog.OpMetadata) er OriginalID: op.OriginalId, OriginalFlowID: op.OriginalFlowId, Status: op.Status, + RepoID: op.RepoId, + RepoGUID: op.RepoGuid, + PlanID: op.PlanId, }); err != nil { return err } @@ -165,12 +171,53 @@ func (m *MemStore) Delete(opID ...int64) ([]*v1.Operation, error) { defer m.mu.Unlock() ops := make([]*v1.Operation, 0, len(opID)) for _, id := range opID { - ops = append(ops, m.operations[id]) + if op, ok := m.operations[id]; ok { + ops = append(ops, op) + } delete(m.operations, id) } return ops, nil } +func (m *MemStore) Set(opts oplog.SetOptions, op ...*v1.Operation) error { + m.mu.Lock() + defer m.mu.Unlock() + for _, o := range op { + if o.Id == 0 { + m.nextID++ + o.Id = m.nextID + if o.Modno == 0 { + m.nextModno++ + o.Modno = m.nextModno + } else if o.Modno > m.nextModno { + m.nextModno = o.Modno + } + if o.FlowId == 0 { + o.FlowId = o.Id + } + if err := protoutil.ValidateOperation(o); err != nil { + return err + } + m.operations[o.Id] = o + } else { + if o.Modno == 0 { + m.nextModno++ + o.Modno = m.nextModno + } else if o.Modno > m.nextModno { + m.nextModno = o.Modno + } + if err := protoutil.ValidateOperation(o); err != nil { + return err + } + if _, ok := m.operations[o.Id]; !ok { + return oplog.ErrNotExist + } + m.operations[o.Id] = o + } + } + return nil +} + func (m *MemStore) Update(op ...*v1.Operation) error { m.mu.Lock() defer m.mu.Unlock() diff --git a/internal/oplog/oplog.go b/internal/oplog/oplog.go index 179c36d4..f04e2bef 100644 --- a/internal/oplog/oplog.go +++ b/internal/oplog/oplog.go @@ -2,6 +2,7 @@ package oplog import ( "errors" + "fmt" "slices" "sync" @@ -162,6 +163,43 @@ func (o *OpLog) Update(ops ...*v1.Operation) error { return nil } +// SetOptions configures the behavior of Set. +type SetOptions struct { + InsertOnly bool // If true, only insert; fail if the operation already exists (Id != 0). + UpdateOnly bool // If true, only update; fail if the operation does not exist (Id == 0). + AllocateID bool // If true, allocate a new Id for operations with Id == 0. +} + +func (o *OpLog) Set(opts SetOptions, ops ...*v1.Operation) error { + if opts.InsertOnly && opts.UpdateOnly { + return errors.New("InsertOnly and UpdateOnly are mutually exclusive") + } + for _, op := range ops { + if opts.InsertOnly && op.Id != 0 { + return fmt.Errorf("InsertOnly but operation already has Id %d", op.Id) + } + if opts.UpdateOnly && op.Id == 0 { + return errors.New("UpdateOnly but operation has no Id") + } + } + + isNew := make([]bool, len(ops)) + for i, op := range ops { + isNew[i] = op.Id == 0 + } + if err := o.store.Set(opts, ops...); err != nil { + return err + } + for i, op := range ops { + if isNew[i] { + o.notify([]*v1.Operation{op}, OPERATION_ADDED) + } else { + o.notify([]*v1.Operation{op}, OPERATION_UPDATED) + } + } + return nil +} + func (o *OpLog) Delete(opID ...int64) error { removedOps, err := o.store.Delete(opID...) if err != nil { @@ -194,6 +232,10 @@ type OpStore interface { Add(op ...*v1.Operation) error // Update updates the given operations in the store. Update(op ...*v1.Operation) error + // Set inserts or updates operations. Zero-valued fields (Id, Modno, FlowId) are + // allocated automatically (like Add/Update), but non-zero values provided by the + // caller are preserved. If Id is non-zero, it updates; if Id is zero, it inserts. + Set(opts SetOptions, op ...*v1.Operation) error // Delete removes the operations with the given IDs from the store, and returns the removed operations. Delete(opID ...int64) ([]*v1.Operation, error) // Transform applies the given function to each operation that matches the query. @@ -212,4 +254,7 @@ type OpMetadata struct { OriginalID int64 OriginalFlowID int64 Status v1.OperationStatus + RepoID string + RepoGUID string + PlanID string } diff --git a/internal/oplog/query.go b/internal/oplog/query.go index ea92c987..3a9566b8 100644 --- a/internal/oplog/query.go +++ b/internal/oplog/query.go @@ -136,6 +136,10 @@ func (q *Query) Match(op *v1.Operation) bool { return false } + if q.OriginalInstanceKeyid != nil && op.OriginalInstanceKeyid != *q.OriginalInstanceKeyid { + return false + } + if q.ModnoGte != nil && op.Modno < *q.ModnoGte { return false } diff --git a/internal/oplog/sqlitestore/sqlitestore.go b/internal/oplog/sqlitestore/sqlitestore.go index cb9f495a..7248601c 100644 --- a/internal/oplog/sqlitestore/sqlitestore.go +++ b/internal/oplog/sqlitestore/sqlitestore.go @@ -342,7 +342,7 @@ func (m *SqliteStore) Query(q oplog.Query, f func(*v1.Operation) error) error { func (m *SqliteStore) QueryMetadata(q oplog.Query, f func(oplog.OpMetadata) error) error { where, args := m.buildQueryWhereClause(q, false) - rows, err := m.dbpool.QueryContext(context.Background(), "SELECT operations.id, operations.modno, operations.original_id, operations.flow_id, operations.original_flow_id, operations.status FROM operations JOIN operation_groups ON operations.ogid = operation_groups.ogid WHERE "+where, args...) + rows, err := m.dbpool.QueryContext(context.Background(), "SELECT operations.id, operations.modno, operations.original_id, operations.flow_id, operations.original_flow_id, operations.status, operations.ogid, operation_groups.repo_id, operation_groups.repo_guid, operation_groups.plan_id FROM operations JOIN operation_groups ON operations.ogid = operation_groups.ogid WHERE "+where, args...) if err != nil { return fmt.Errorf("query metadata: %v", err) } @@ -350,7 +350,8 @@ func (m *SqliteStore) QueryMetadata(q oplog.Query, f func(oplog.OpMetadata) erro for rows.Next() { var meta oplog.OpMetadata - if err := rows.Scan(&meta.ID, &meta.Modno, &meta.OriginalID, &meta.FlowID, &meta.OriginalFlowID, &meta.Status); err != nil { + var ogid int64 + if err := rows.Scan(&meta.ID, &meta.Modno, &meta.OriginalID, &meta.FlowID, &meta.OriginalFlowID, &meta.Status, &ogid, &meta.RepoID, &meta.RepoGUID, &meta.PlanID); err != nil { return fmt.Errorf("query metadata: scan: %v", err) } if err := f(meta); err != nil { @@ -427,7 +428,7 @@ func (m *SqliteStore) Transform(q oplog.Query, f func(*v1.Operation) (*v1.Operat continue } - if err := m.updateInternal(tx, newOp); err != nil { + if err := m.updateInternal(tx, false, newOp); err != nil { return err } } @@ -490,6 +491,52 @@ func (m *SqliteStore) Add(op ...*v1.Operation) error { return tx.Commit() } +func (m *SqliteStore) Set(opts oplog.SetOptions, op ...*v1.Operation) error { + tx, err := m.dbpool.BeginTx(context.Background(), &sql.TxOptions{Isolation: sql.LevelSerializable}) + if err != nil { + return fmt.Errorf("set operation: begin tx: %v", err) + } + defer tx.Rollback() + + for _, o := range op { + if o.Id == 0 { + // Insert path: allocate Id if requested or by default + if opts.AllocateID || true { + o.Id = m.highestOpID.Add(1) + } + if o.Modno == 0 { + o.Modno = m.highestModno.Add(1) + } else if o.Modno > m.highestModno.Load() { + m.highestModno.Store(o.Modno) + } + if o.FlowId == 0 { + o.FlowId = o.Id + } + if err := protoutil.ValidateOperation(o); err != nil { + return err + } + if err := m.addInternal(tx, o); err != nil { + return err + } + } else { + // Update path: preserve Id, allocate modno only if zero + if o.Modno == 0 { + o.Modno = m.highestModno.Add(1) + } else if o.Modno > m.highestModno.Load() { + m.highestModno.Store(o.Modno) + } + if err := protoutil.ValidateOperation(o); err != nil { + return err + } + if err := m.updateInternal(tx, true, o); err != nil { + return err + } + } + } + + return tx.Commit() +} + func (m *SqliteStore) Update(op ...*v1.Operation) error { tx, err := m.dbpool.BeginTx(context.Background(), &sql.TxOptions{Isolation: sql.LevelSerializable}) if err != nil { @@ -497,16 +544,18 @@ func (m *SqliteStore) Update(op ...*v1.Operation) error { } defer tx.Rollback() - if err := m.updateInternal(tx, op...); err != nil { + if err := m.updateInternal(tx, false, op...); err != nil { return err } return tx.Commit() } -func (m *SqliteStore) updateInternal(tx *sql.Tx, op ...*v1.Operation) error { +func (m *SqliteStore) updateInternal(tx *sql.Tx, preserveModno bool, op ...*v1.Operation) error { for _, o := range op { - o.Modno = m.highestModno.Add(1) + if !preserveModno { + o.Modno = m.highestModno.Add(1) + } if err := protoutil.ValidateOperation(o); err != nil { return err } diff --git a/internal/oplog/storetests/storecontract_test.go b/internal/oplog/storetests/storecontract_test.go index 57f835af..a0c69568 100644 --- a/internal/oplog/storetests/storecontract_test.go +++ b/internal/oplog/storetests/storecontract_test.go @@ -789,6 +789,9 @@ func TestQueryMetadata(t *testing.T) { OriginalID: 3, OriginalFlowID: 4, Status: v1.OperationStatus_STATUS_INPROGRESS, + RepoID: "repo1", + RepoGUID: "repo1-guid", + PlanID: "plan1", }); diff != "" { t.Errorf("unexpected diff: %v", diff) } diff --git a/internal/orchestrator/orchestrator.go b/internal/orchestrator/orchestrator.go index 3b9d9520..97d33b11 100644 --- a/internal/orchestrator/orchestrator.go +++ b/internal/orchestrator/orchestrator.go @@ -168,9 +168,13 @@ func (o *Orchestrator) autoInitReposIfNeeded(resticBin string) error { if err != nil { return fmt.Errorf("get config: %w", err) } - cfg = proto.Clone(cfg).(*v1.Config) - initializedRepo := false + // Perform network I/O (repo init) outside the config lock, collecting results. + type initResult struct { + repoID string + guid string + } + var results []initResult for _, r := range cfg.Repos { if !r.AutoInitialize { continue @@ -192,14 +196,23 @@ func (o *Orchestrator) autoInitReposIfNeeded(resticBin string) error { if err != nil { return fmt.Errorf("get repo %q guid: %w", r.Id, err) } - r.Guid = guid - r.AutoInitialize = false - initializedRepo = true + results = append(results, initResult{repoID: r.Id, guid: guid}) } - if initializedRepo && fullErr == nil { - cfg.Modno++ - if err := o.configMgr.Update(cfg); err != nil { + if len(results) > 0 && fullErr == nil { + if err := o.configMgr.Transform(func(cfg *v1.Config) (*v1.Config, error) { + for _, res := range results { + for _, r := range cfg.Repos { + if r.Id == res.repoID { + r.Guid = res.guid + r.AutoInitialize = false + break + } + } + } + cfg.Modno++ + return cfg, nil + }); err != nil { return fmt.Errorf("update config: %w", err) } } @@ -275,6 +288,12 @@ func (o *Orchestrator) ScheduleDefaultTasks(config *v1.Config) error { } for _, repo := range config.Repos { + // Skip repos managed by a remote instance; the remote instance's + // orchestrator owns prune/check scheduling for those. + if repo.GetOriginInstanceId() != "" { + continue + } + // Schedule a prune task for the repo t := tasks.NewPruneTask(repo, tasks.PlanForSystemTasks, false) if err := o.ScheduleTask(t, tasks.TaskPriorityPrune); err != nil { @@ -286,6 +305,12 @@ func (o *Orchestrator) ScheduleDefaultTasks(config *v1.Config) error { if err := o.ScheduleTask(t, tasks.TaskPriorityCheck); err != nil { return fmt.Errorf("schedule check task for repo %q: %w", repo.GetId(), err) } + + // Schedule a scheduled forget task for the repo + t = tasks.NewScheduledForgetTask(repo, tasks.PlanForSystemTasks, false) + if err := o.ScheduleTask(t, tasks.TaskPriorityForget); err != nil { + return fmt.Errorf("schedule scheduled forget task for repo %q: %w", repo.GetId(), err) + } } return nil diff --git a/internal/orchestrator/repo/repo.go b/internal/orchestrator/repo/repo.go index 82b2355e..5dcdafc7 100644 --- a/internal/orchestrator/repo/repo.go +++ b/internal/orchestrator/repo/repo.go @@ -213,24 +213,22 @@ func (r *RepoOrchestrator) ListSnapshotFiles(ctx context.Context, snapshotId str return lsEnts, nil } -func (r *RepoOrchestrator) Forget(ctx context.Context, plan *v1.Plan, tags []string) ([]*v1.ResticSnapshot, error) { +func (r *RepoOrchestrator) Forget(ctx context.Context, policy *v1.RetentionPolicy, opts ...restic.GenericOption) ([]*v1.ResticSnapshot, error) { r.mu.Lock() defer r.mu.Unlock() ctx, flush := forwardResticLogs(ctx) defer flush() - policy := plan.Retention if policy == nil { - return nil, fmt.Errorf("plan %q has no retention policy", plan.Id) + return nil, fmt.Errorf("repo %q: forget called with nil retention policy", r.repoConfig.Id) } result, err := r.repo.Forget( - ctx, protoutil.RetentionPolicyFromProto(plan.Retention), - restic.WithFlags("--tag", strings.Join(tags, ",")), - restic.WithFlags("--group-by", ""), + ctx, protoutil.RetentionPolicyFromProto(policy), + opts..., ) if err != nil { - return nil, fmt.Errorf("forget snapshots for repo %v: %w", r.repoConfig.Id, err) + return nil, fmt.Errorf("forget for repo %v: %w", r.repoConfig.Id, err) } var forgotten []*v1.ResticSnapshot @@ -242,7 +240,7 @@ func (r *RepoOrchestrator) Forget(ctx context.Context, plan *v1.Plan, tags []str forgotten = append(forgotten, snapshotProto) } - r.logger(ctx).Debug("forget snapshots", zap.String("plan", plan.Id), zap.Int("count", len(forgotten)), zap.Any("policy", policy)) + r.logger(ctx).Debug("forget snapshots", zap.Int("count", len(forgotten)), zap.Any("policy", policy)) return forgotten, nil } diff --git a/internal/orchestrator/taskrunnerimpl.go b/internal/orchestrator/taskrunnerimpl.go index 116900b6..934e8f7f 100644 --- a/internal/orchestrator/taskrunnerimpl.go +++ b/internal/orchestrator/taskrunnerimpl.go @@ -11,7 +11,6 @@ import ( "github.com/garethgeorge/backrest/internal/hook" "github.com/garethgeorge/backrest/internal/oplog" "github.com/garethgeorge/backrest/internal/orchestrator/logging" - "github.com/garethgeorge/backrest/internal/orchestrator/repo" "github.com/garethgeorge/backrest/internal/orchestrator/tasks" "github.com/google/uuid" "go.uber.org/zap" @@ -78,10 +77,6 @@ func (t *taskRunnerImpl) DeleteOperation(id ...int64) error { return t.orchestrator.OpLog.Delete(id...) } -func (t *taskRunnerImpl) Orchestrator() *Orchestrator { - return t.orchestrator -} - func (t *taskRunnerImpl) QueryOperations(q oplog.Query, fn func(*v1.Operation) error) error { return t.orchestrator.OpLog.Query(q, fn) } @@ -151,7 +146,7 @@ func (t *taskRunnerImpl) GetPlan(planID string) (*v1.Plan, error) { return t.orchestrator.GetPlan(planID) } -func (t *taskRunnerImpl) GetRepoOrchestrator(repoID string) (*repo.RepoOrchestrator, error) { +func (t *taskRunnerImpl) GetRepoOrchestrator(repoID string) (tasks.RepoOrchestrator, error) { return t.orchestrator.GetRepoOrchestrator(repoID) } diff --git a/internal/orchestrator/tasks/task.go b/internal/orchestrator/tasks/task.go index eac2471c..be48a72c 100644 --- a/internal/orchestrator/tasks/task.go +++ b/internal/orchestrator/tasks/task.go @@ -10,11 +10,27 @@ import ( v1 "github.com/garethgeorge/backrest/gen/go/v1" "github.com/garethgeorge/backrest/internal/config" "github.com/garethgeorge/backrest/internal/oplog" - "github.com/garethgeorge/backrest/internal/orchestrator/repo" + "github.com/garethgeorge/backrest/pkg/restic" "go.uber.org/zap" "google.golang.org/protobuf/proto" ) +// RepoOrchestrator is the interface for repo operations that tasks depend on. +// The concrete implementation is in the repo package. +type RepoOrchestrator interface { + UnlockIfAutoEnabled(ctx context.Context) error + Backup(ctx context.Context, plan *v1.Plan, dryRun bool, progressCallback func(event *restic.BackupProgressEntry)) (*restic.BackupProgressEntry, error) + Forget(ctx context.Context, policy *v1.RetentionPolicy, opts ...restic.GenericOption) ([]*v1.ResticSnapshot, error) + ForgetSnapshot(ctx context.Context, snapshotId string) error + Prune(ctx context.Context, output io.Writer) error + Check(ctx context.Context, output io.Writer) error + Stats(ctx context.Context) (*v1.RepoStats, error) + Restore(ctx context.Context, snapshotId string, snapshotPath string, target string, progressCallback func(event *v1.RestoreProgressEntry)) (*v1.RestoreProgressEntry, error) + Snapshots(ctx context.Context) ([]*restic.Snapshot, error) + AddTags(ctx context.Context, snapshotIDs []string, tags []string) error + RunCommand(ctx context.Context, command string, writer io.Writer) error +} + var NeverScheduledTask = ScheduledTask{} const ( @@ -52,7 +68,7 @@ type TaskRunner interface { // GetPlan returns the plan with the given ID. GetPlan(planID string) (*v1.Plan, error) // GetRepoOrchestrator returns the orchestrator for the repo with the given ID. - GetRepoOrchestrator(repoID string) (*repo.RepoOrchestrator, error) + GetRepoOrchestrator(repoID string) (RepoOrchestrator, error) // ScheduleTask schedules a task to run at a specific time. ScheduleTask(task Task, priority int) error // Config returns the current config. @@ -178,8 +194,24 @@ func curTimeMillis() int64 { } type testTaskRunner struct { - config *v1.Config // the config to use for the task runner. + config *v1.Config oplog *oplog.OpLog + + // Configurable for Run() testing + orchestrator RepoOrchestrator + hookCalls []hookCall + scheduledTasks []scheduledTaskCall + onExecuteHooks func(ctx context.Context, events []v1.Hook_Condition, vars HookVars) error +} + +type hookCall struct { + Events []v1.Hook_Condition + Vars HookVars +} + +type scheduledTaskCall struct { + Task Task + Priority int } var _ TaskRunner = &testTaskRunner{} @@ -224,7 +256,11 @@ func (t *testTaskRunner) DeleteOperation(id ...int64) error { } func (t *testTaskRunner) ExecuteHooks(ctx context.Context, events []v1.Hook_Condition, vars HookVars) error { - panic("not implemented") + t.hookCalls = append(t.hookCalls, hookCall{Events: events, Vars: vars}) + if t.onExecuteHooks != nil { + return t.onExecuteHooks(ctx, events, vars) + } + return nil } func (t *testTaskRunner) QueryOperations(q oplog.Query, fn func(*v1.Operation) error) error { @@ -250,12 +286,16 @@ func (t *testTaskRunner) GetPlan(planID string) (*v1.Plan, error) { return cfg, nil } -func (t *testTaskRunner) GetRepoOrchestrator(repoID string) (*repo.RepoOrchestrator, error) { - panic("not implemented") +func (t *testTaskRunner) GetRepoOrchestrator(repoID string) (RepoOrchestrator, error) { + if t.orchestrator == nil { + return nil, errors.New("no repo orchestrator configured") + } + return t.orchestrator, nil } func (t *testTaskRunner) ScheduleTask(task Task, priority int) error { - panic("not implemented") + t.scheduledTasks = append(t.scheduledTasks, scheduledTaskCall{Task: task, Priority: priority}) + return nil } func (t *testTaskRunner) Config() *v1.Config { @@ -266,6 +306,12 @@ func (t *testTaskRunner) Logger(ctx context.Context) *zap.Logger { return zap.L() } -func (t *testTaskRunner) LogrefWriter() (id string, w io.WriteCloser, err error) { - panic("not implemented") +type nopWriteCloser struct { + io.Writer +} + +func (nopWriteCloser) Close() error { return nil } + +func (t *testTaskRunner) LogrefWriter() (id string, w io.WriteCloser, err error) { + return "test-logref", &nopWriteCloser{io.Discard}, nil } diff --git a/internal/orchestrator/tasks/taskbackup.go b/internal/orchestrator/tasks/taskbackup.go index 70eeb560..d2b3dc5f 100644 --- a/internal/orchestrator/tasks/taskbackup.go +++ b/internal/orchestrator/tasks/taskbackup.go @@ -265,9 +265,13 @@ func (t *BackupTask) Run(ctx context.Context, st ScheduledTask, runner TaskRunne if !skipFollowUpTasks { // schedule followup tasks if a snapshot was added at := time.Now() - if _, ok := plan.Retention.GetPolicy().(*v1.RetentionPolicy_PolicyKeepAll); plan.Retention != nil && !ok { - if err := runner.ScheduleTask(NewOneoffForgetTask(t.Repo(), t.PlanID(), op.FlowId, at), TaskPriorityForget); err != nil { - return fmt.Errorf("failed to schedule forget task: %w", err) + // Skip per-plan forget if the repo has a scheduled forget policy + repoConfig, _ := runner.GetRepo(t.RepoID()) + if repoConfig.GetForgetPolicy().GetSchedule() == nil { + if _, ok := plan.Retention.GetPolicy().(*v1.RetentionPolicy_PolicyKeepAll); plan.Retention != nil && !ok { + if err := runner.ScheduleTask(NewOneoffForgetTask(t.Repo(), t.PlanID(), op.FlowId, at), TaskPriorityForget); err != nil { + return fmt.Errorf("failed to schedule forget task: %w", err) + } } } if err := runner.ScheduleTask(NewOneoffIndexSnapshotsTask(t.Repo(), at), TaskPriorityIndexSnapshots); err != nil { diff --git a/internal/orchestrator/tasks/taskcollectgarbage.go b/internal/orchestrator/tasks/taskcollectgarbage.go index 7c1decdb..8f3e81c5 100644 --- a/internal/orchestrator/tasks/taskcollectgarbage.go +++ b/internal/orchestrator/tasks/taskcollectgarbage.go @@ -48,6 +48,11 @@ var gcSettings = map[reflect.Type]gcSettingsForType{ keepMin: 1, keepMax: 12, }, + reflect.TypeOf(&v1.Operation_OperationForget{}): { + maxAge: 30 * 24 * time.Hour, + keepMin: 1, + keepMax: 30, + }, } var defaultGcSettings = gcSettingsForType{ diff --git a/internal/orchestrator/tasks/taskforget.go b/internal/orchestrator/tasks/taskforget.go index 4c3fac13..11243746 100644 --- a/internal/orchestrator/tasks/taskforget.go +++ b/internal/orchestrator/tasks/taskforget.go @@ -2,23 +2,29 @@ package tasks import ( "context" + "errors" "fmt" + "strings" "time" v1 "github.com/garethgeorge/backrest/gen/go/v1" "github.com/garethgeorge/backrest/internal/oplog" "github.com/garethgeorge/backrest/internal/orchestrator/repo" + "github.com/garethgeorge/backrest/internal/protoutil" + "github.com/garethgeorge/backrest/pkg/restic" "github.com/hashicorp/go-multierror" "go.uber.org/zap" ) -func NewOneoffForgetTask(repo *v1.Repo, planID string, flowID int64, at time.Time) Task { +// NewOneoffForgetTask creates a per-plan forget task that runs once after a backup. +// It applies the plan's retention policy scoped to snapshots tagged for that plan. +func NewOneoffForgetTask(repoProto *v1.Repo, planID string, flowID int64, at time.Time) Task { return &GenericOneoffTask{ OneoffTask: OneoffTask{ BaseTask: BaseTask{ TaskType: "forget", - TaskName: fmt.Sprintf("forget for plan %q in repo %q", planID, repo.Id), - TaskRepo: repo, + TaskName: fmt.Sprintf("forget for plan %q in repo %q", planID, repoProto.Id), + TaskRepo: repoProto, TaskPlanID: planID, }, FlowID: flowID, @@ -27,109 +33,278 @@ func NewOneoffForgetTask(repo *v1.Repo, planID string, flowID int64, at time.Tim Op: &v1.Operation_OperationForget{}, }, }, - Do: func(ctx context.Context, st ScheduledTask, taskRunner TaskRunner) error { - op := st.Op - forgetOp := op.GetOperationForget() - if forgetOp == nil { + Do: func(ctx context.Context, st ScheduledTask, runner TaskRunner) error { + if st.Op.GetOperationForget() == nil { panic("forget task with non-forget operation") } - return forgetHelper(ctx, st, taskRunner) + t := st.Task + l := runner.Logger(ctx) + + plan, err := runner.GetPlan(t.PlanID()) + if err != nil { + return fmt.Errorf("get plan %q: %w", t.PlanID(), err) + } + + tags := []string{repo.TagForPlan(t.PlanID())} + if compat, err := UseLegacyCompatMode(l, runner, t.Repo().GetGuid(), t.PlanID()); err != nil { + return fmt.Errorf("check legacy compat mode: %w", err) + } else if !compat { + tags = append(tags, repo.TagForInstance(runner.Config().Instance)) + } else { + l.Warn("forgetting snapshots without instance ID, using legacy behavior (e.g. --tags not including instance ID)") + l.Sugar().Warnf("to avoid this warning, tag all snapshots with the instance ID e.g. by running: \r\n"+ + "restic tag --set '%s' --set '%s' --tag '%s'", repo.TagForPlan(t.PlanID()), repo.TagForInstance(runner.Config().Instance), repo.TagForPlan(t.PlanID())) + } + + return forgetHelper(ctx, st, runner, plan.Retention, + restic.WithFlags("--tag", strings.Join(tags, ",")), + restic.WithFlags("--group-by", ""), + ) }, } } -func forgetHelper(ctx context.Context, st ScheduledTask, taskRunner TaskRunner) error { - t := st.Task - l := taskRunner.Logger(ctx) +// ScheduledForgetTask is a repo-level forget task that runs on a schedule. +// It applies the repo's forget policy retention to all snapshots, grouped by tags. +type ScheduledForgetTask struct { + BaseTask + force bool + didRun bool +} - // Helper to notify of errors - notifyError := func(err error) error { - return NotifyError(ctx, taskRunner, t.Name(), err, v1.Hook_CONDITION_FORGET_ERROR) +func NewScheduledForgetTask(repoProto *v1.Repo, planID string, force bool) Task { + return &ScheduledForgetTask{ + BaseTask: BaseTask{ + TaskType: "scheduled_forget", + TaskName: fmt.Sprintf("scheduled forget for repo %q", repoProto.Id), + TaskRepo: repoProto, + TaskPlanID: planID, + }, + force: force, + } +} + +func (t *ScheduledForgetTask) Next(now time.Time, runner TaskRunner) (ScheduledTask, error) { + if t.force { + if t.didRun { + return NeverScheduledTask, nil + } + t.didRun = true + return ScheduledTask{ + Task: t, + RunAt: now, + Op: &v1.Operation{ + Op: &v1.Operation_OperationForget{}, + }, + }, nil } - r, err := taskRunner.GetRepoOrchestrator(t.RepoID()) + repoProto, err := runner.GetRepo(t.RepoID()) + if err != nil { + return ScheduledTask{}, fmt.Errorf("get repo %v: %w", t.RepoID(), err) + } + + if repoProto.GetForgetPolicy().GetSchedule() == nil { + return NeverScheduledTask, nil + } + + var lastRan time.Time + var foundBackup bool + if err := runner.QueryOperations(oplog.Query{}. + SetInstanceID(runner.InstanceID()). + SetRepoGUID(repoProto.GetGuid()). + SetPlanID(PlanForSystemTasks). + SetReversed(true), func(op *v1.Operation) error { + if op.Status == v1.OperationStatus_STATUS_PENDING || op.Status == v1.OperationStatus_STATUS_SYSTEM_CANCELLED { + return nil + } + if _, ok := op.Op.(*v1.Operation_OperationForget); ok && op.UnixTimeEndMs != 0 { + lastRan = time.Unix(0, op.UnixTimeEndMs*int64(time.Millisecond)) + return oplog.ErrStopIteration + } + if _, ok := op.Op.(*v1.Operation_OperationBackup); ok { + foundBackup = true + } + return nil + }); err != nil { + return NeverScheduledTask, fmt.Errorf("finding last scheduled forget run time: %w", err) + } else if !foundBackup { + lastRan = now + } + + runAt, err := protoutil.ResolveSchedule(repoProto.GetForgetPolicy().GetSchedule(), lastRan, now) + if errors.Is(err, protoutil.ErrScheduleDisabled) { + return NeverScheduledTask, nil + } else if err != nil { + return NeverScheduledTask, fmt.Errorf("resolve schedule: %w", err) + } + + return ScheduledTask{ + Task: t, + RunAt: runAt, + Op: &v1.Operation{ + Op: &v1.Operation_OperationForget{}, + }, + }, nil +} + +// shouldSkip returns true if there are no new successful backups since the last scheduled forget. +func (t *ScheduledForgetTask) shouldSkip(runner TaskRunner, repoProto *v1.Repo) bool { + var lastForgetEndMs int64 + var hasNewBackup bool + + _ = runner.QueryOperations(oplog.Query{}. + SetInstanceID(runner.InstanceID()). + SetRepoGUID(repoProto.GetGuid()). + SetPlanID(PlanForSystemTasks). + SetReversed(true), func(op *v1.Operation) error { + if op.Status != v1.OperationStatus_STATUS_SUCCESS { + return nil + } + if _, ok := op.Op.(*v1.Operation_OperationForget); ok && op.UnixTimeEndMs != 0 { + lastForgetEndMs = op.UnixTimeEndMs + return oplog.ErrStopIteration + } + return nil + }) + + if lastForgetEndMs == 0 { + return false // no previous forget, don't skip + } + + // Check if any backup completed after the last forget. + // Intentionally not scoped by instance ID: in a sync setup the server receives + // backup operations from remote clients. We want forget to run whenever new + // snapshots appear in the repo regardless of which instance created them. + _ = runner.QueryOperations(oplog.Query{}. + SetRepoGUID(repoProto.GetGuid()). + SetReversed(true), func(op *v1.Operation) error { + if op.UnixTimeEndMs < lastForgetEndMs { + return oplog.ErrStopIteration // older than last forget, stop looking + } + if op.Status == v1.OperationStatus_STATUS_SUCCESS { + if _, ok := op.Op.(*v1.Operation_OperationBackup); ok { + hasNewBackup = true + return oplog.ErrStopIteration + } + } + return nil + }) + + return !hasNewBackup +} + +func (t *ScheduledForgetTask) Run(ctx context.Context, st ScheduledTask, runner TaskRunner) error { + op := st.Op + + repoProto, err := runner.GetRepo(t.RepoID()) + if err != nil { + return NotifyError(ctx, runner, t.Name(), fmt.Errorf("get repo %q: %w", t.RepoID(), err), v1.Hook_CONDITION_FORGET_ERROR) + } + + // Skip if no new backups since last forget run. + // Mark as system-cancelled so it doesn't count as a successful run + // and the next schedule is computed from the last actual forget. + if t.shouldSkip(runner, repoProto) { + op.Op = &v1.Operation_OperationForget{ + OperationForget: &v1.OperationForget{}, + } + op.Status = v1.OperationStatus_STATUS_SYSTEM_CANCELLED + op.DisplayMessage = "Skipped: no new backups since last forget" + return nil + } + + err = forgetHelper(ctx, st, runner, repoProto.GetForgetPolicy().GetRetention(), + restic.WithFlags("--group-by", "tags"), + ) + if err != nil { + return err + } + + // Schedule a stats task after successful forget + if e := runner.ScheduleTask(NewStatsTask(t.Repo(), PlanForSystemTasks, false), TaskPriorityStats); e != nil { + zap.L().Error("schedule stats task", zap.Error(e)) + } + + return nil +} + +// forgetHelper contains the shared logic for running a forget operation. +// It handles unlock, hooks, calling restic forget, and marking forgotten snapshots in the oplog. +func forgetHelper(ctx context.Context, st ScheduledTask, runner TaskRunner, policy *v1.RetentionPolicy, opts ...restic.GenericOption) error { + t := st.Task + + notifyError := func(err error) error { + return NotifyError(ctx, runner, t.Name(), err, v1.Hook_CONDITION_FORGET_ERROR) + } + + r, err := runner.GetRepoOrchestrator(t.RepoID()) if err != nil { return notifyError(fmt.Errorf("get repo %q: %w", t.RepoID(), err)) } - err = r.UnlockIfAutoEnabled(ctx) - if err != nil { + if err := r.UnlockIfAutoEnabled(ctx); err != nil { return notifyError(fmt.Errorf("auto unlock repo %q: %w", t.RepoID(), err)) } - plan, err := taskRunner.GetPlan(t.PlanID()) - if err != nil { - return notifyError(fmt.Errorf("get plan %q: %w", t.PlanID(), err)) - } - - // execute hooks - if err := taskRunner.ExecuteHooks(ctx, []v1.Hook_Condition{ + if err := runner.ExecuteHooks(ctx, []v1.Hook_Condition{ v1.Hook_CONDITION_FORGET_START, - }, HookVars{Plan: plan}); err != nil { + }, HookVars{}); err != nil { return notifyError(fmt.Errorf("forget start hook: %w", err)) } - tags := []string{repo.TagForPlan(t.PlanID())} - if compat, err := useLegacyCompatMode(l, taskRunner, t.Repo().GetGuid(), t.PlanID()); err != nil { - return notifyError(fmt.Errorf("check legacy compat mode: %w", err)) - } else if !compat { - tags = append(tags, repo.TagForInstance(taskRunner.Config().Instance)) - } else { - l.Warn("forgetting snapshots without instance ID, using legacy behavior (e.g. --tags not including instance ID)") - l.Sugar().Warnf("to avoid this warning, tag all snapshots with the instance ID e.g. by running: \r\n"+ - "restic tag --set '%s' --set '%s' --tag '%s'", repo.TagForPlan(t.PlanID()), repo.TagForInstance(taskRunner.Config().Instance), repo.TagForPlan(t.PlanID())) - } - - // check if any other instance IDs exist in the repo (unassociated don't count) - forgot, err := r.Forget(ctx, plan, tags) + forgot, err := r.Forget(ctx, policy, opts...) forgetOp := &v1.Operation_OperationForget{ - OperationForget: &v1.OperationForget{}, + OperationForget: &v1.OperationForget{ + Forget: forgot, + Policy: policy, + }, } st.Op.Op = forgetOp - forgetOp.OperationForget.Forget = append(forgetOp.OperationForget.Forget, forgot...) - forgetOp.OperationForget.Policy = plan.Retention - + // Mark forgotten snapshots in the oplog var ops []*v1.Operation - for _, forgot := range forgot { - if e := taskRunner.QueryOperations(oplog.Query{}. + for _, f := range forgot { + if e := runner.QueryOperations(oplog.Query{}. SetRepoGUID(t.Repo().GetGuid()). - SetSnapshotID(forgot.Id), func(op *v1.Operation) error { + SetSnapshotID(f.Id), func(op *v1.Operation) error { ops = append(ops, op) return nil }); e != nil { - err = multierror.Append(err, fmt.Errorf("cleanup snapshot %v: %w", forgot.Id, e)) + err = multierror.Append(err, fmt.Errorf("lookup snapshot %v: %w", f.Id, e)) } } + l := runner.Logger(ctx) l.Sugar().Debugf("found %v snapshots were forgotten, marking this in oplog", len(ops)) for _, op := range ops { if indexOp, ok := op.Op.(*v1.Operation_OperationIndexSnapshot); ok { indexOp.OperationIndexSnapshot.Forgot = true - if e := taskRunner.UpdateOperation(op); err != nil { + if e := runner.UpdateOperation(op); e != nil { err = multierror.Append(err, fmt.Errorf("mark index snapshot %v as forgotten: %w", op.Id, e)) - continue } } } if err != nil { return notifyError(fmt.Errorf("forget: %w", err)) - } else if e := taskRunner.ExecuteHooks(ctx, []v1.Hook_Condition{ + } + + if e := runner.ExecuteHooks(ctx, []v1.Hook_Condition{ v1.Hook_CONDITION_FORGET_SUCCESS, }, HookVars{}); e != nil { return fmt.Errorf("forget end hook: %w", e) } - return err + return nil } -// useLegacyCompatMode checks if there are any snapshots that were created without a `created-by` tag still exist in the repo. +// UseLegacyCompatMode checks if there are any snapshots that were created without a `created-by` tag still exist in the repo. // The property is overridden if mixed `created-by` tag values are found. -func useLegacyCompatMode(l *zap.Logger, taskRunner TaskRunner, repoGUID, planID string) (bool, error) { +func UseLegacyCompatMode(l *zap.Logger, taskRunner TaskRunner, repoGUID, planID string) (bool, error) { instanceIDs := make(map[string]struct{}) if err := taskRunner.QueryOperations(oplog.Query{}.SetRepoGUID(repoGUID).SetPlanID(planID).SetReversed(true), func(op *v1.Operation) error { if snapshotOp, ok := op.Op.(*v1.Operation_OperationIndexSnapshot); ok && !snapshotOp.OperationIndexSnapshot.GetForgot() { diff --git a/internal/orchestrator/tasks/taskindexsnapshots.go b/internal/orchestrator/tasks/taskindexsnapshots.go index 1a36c654..0a5931a7 100644 --- a/internal/orchestrator/tasks/taskindexsnapshots.go +++ b/internal/orchestrator/tasks/taskindexsnapshots.go @@ -3,15 +3,12 @@ package tasks import ( "context" "fmt" - "slices" - "strings" "time" v1 "github.com/garethgeorge/backrest/gen/go/v1" "github.com/garethgeorge/backrest/internal/oplog" "github.com/garethgeorge/backrest/internal/orchestrator/repo" "github.com/garethgeorge/backrest/internal/protoutil" - "github.com/garethgeorge/backrest/pkg/restic" "go.uber.org/zap" ) @@ -166,42 +163,3 @@ func instanceIDForSnapshot(snapshot *v1.ResticSnapshot) string { return InstanceIDForUnassociatedOperations } -// tryMigrate checks if the snapshots use the latest backrest tag set and migrates them if necessary. -func tryMigrate(ctx context.Context, repo *repo.RepoOrchestrator, config *v1.Config, snapshots []*restic.Snapshot) (bool, error) { - if config.Instance == "" { - zap.S().Warnf("Instance ID not set. Skipping migration.") - return false, nil - } - - planIDs := make(map[string]struct{}) - for _, plan := range config.Plans { - planIDs[plan.Id] = struct{}{} - } - - needsCreatedBy := []string{} - for _, snapshot := range snapshots { - // Check if snapshot is already tagged with `created-by:`` - if idx := slices.IndexFunc(snapshot.Tags, func(tag string) bool { - return strings.HasPrefix(tag, "created-by:") - }); idx != -1 { - continue - } - // Check that snapshot is included in a plan for this instance. Backrest will not take ownership of snapshots belonging to it isn't aware of. - if _, ok := planIDs[planForSnapshot(protoutil.SnapshotToProto(snapshot))]; !ok { - continue - } - needsCreatedBy = append(needsCreatedBy, snapshot.Id) - } - - if len(needsCreatedBy) == 0 { - return false, nil - } - - zap.S().Warnf("Found %v snapshots without created-by tag but included in a plan for this instance. Taking ownership and adding created-by tag.", len(needsCreatedBy)) - - if err := repo.AddTags(ctx, needsCreatedBy, []string{fmt.Sprintf("created-by:%v", config.Instance)}); err != nil { - return false, fmt.Errorf("add created-by tag to snapshots: %w", err) - } - - return true, nil -} diff --git a/internal/orchestrator/tasks/taskrun_test.go b/internal/orchestrator/tasks/taskrun_test.go new file mode 100644 index 00000000..62b083bf --- /dev/null +++ b/internal/orchestrator/tasks/taskrun_test.go @@ -0,0 +1,648 @@ +package tasks + +import ( + "context" + "fmt" + "testing" + "time" + + v1 "github.com/garethgeorge/backrest/gen/go/v1" + "github.com/garethgeorge/backrest/internal/oplog" + "github.com/garethgeorge/backrest/internal/oplog/sqlitestore" + "github.com/garethgeorge/backrest/pkg/restic" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const testSnapshotID = "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" + +func newTestConfig(repo *v1.Repo, plans ...*v1.Plan) *v1.Config { + return &v1.Config{ + Instance: "test-instance", + Repos: []*v1.Repo{repo}, + Plans: plans, + } +} + +func setupTestRunner(t *testing.T, cfg *v1.Config, fake *fakeRepoOrchestrator) *testTaskRunner { + t.Helper() + opstore, err := sqlitestore.NewMemorySqliteStore(t) + require.NoError(t, err) + ol, err := oplog.NewOpLog(opstore) + require.NoError(t, err) + runner := newTestTaskRunner(t, cfg, ol) + runner.orchestrator = fake + return runner +} + +func nextAndCreate(t *testing.T, task Task, runner *testTaskRunner) ScheduledTask { + t.Helper() + st, err := task.Next(time.Now(), runner) + require.NoError(t, err) + st.Task = task + if st.Op != nil { + // Populate fields the orchestrator normally sets before storing. + if st.Op.RepoId == "" && task.Repo() != nil { + st.Op.RepoId = task.Repo().Id + } + if st.Op.RepoGuid == "" && task.Repo() != nil { + st.Op.RepoGuid = task.Repo().Guid + } + if st.Op.PlanId == "" { + st.Op.PlanId = task.PlanID() + } + if st.Op.InstanceId == "" { + st.Op.InstanceId = runner.InstanceID() + } + if st.Op.FlowId == 0 { + st.Op.FlowId = 1 + } + if st.Op.UnixTimeStartMs == 0 { + st.Op.UnixTimeStartMs = time.Now().UnixMilli() + } + require.NoError(t, runner.CreateOperation(st.Op)) + } + return st +} + +func hookContains(calls []hookCall, cond v1.Hook_Condition) bool { + for _, c := range calls { + for _, e := range c.Events { + if e == cond { + return true + } + } + } + return false +} + +// --- PruneTask tests --- + +func TestPruneTaskRun(t *testing.T) { + tests := []struct { + name string + fake *fakeRepoOrchestrator + wantErr bool + wantHooks []v1.Hook_Condition + wantScheduled int + scheduledType string + }{ + { + name: "success", + fake: &fakeRepoOrchestrator{}, + wantHooks: []v1.Hook_Condition{v1.Hook_CONDITION_PRUNE_START, v1.Hook_CONDITION_PRUNE_SUCCESS}, + wantScheduled: 1, + scheduledType: "stats", + }, + { + name: "prune error", + fake: &fakeRepoOrchestrator{pruneErr: fmt.Errorf("prune failed")}, + wantErr: true, + wantHooks: []v1.Hook_Condition{v1.Hook_CONDITION_PRUNE_START, v1.Hook_CONDITION_PRUNE_ERROR, v1.Hook_CONDITION_ANY_ERROR}, + }, + { + name: "unlock error", + fake: &fakeRepoOrchestrator{unlockErr: fmt.Errorf("unlock failed")}, + wantErr: true, + wantHooks: []v1.Hook_Condition{v1.Hook_CONDITION_PRUNE_ERROR, v1.Hook_CONDITION_ANY_ERROR}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + repo := &v1.Repo{Id: "repo1", Guid: "guid1"} + cfg := newTestConfig(repo) + runner := setupTestRunner(t, cfg, tc.fake) + + task := NewPruneTask(repo, PlanForSystemTasks, true) + st := nextAndCreate(t, task, runner) + + err := task.Run(context.Background(), st, runner) + if tc.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + + for _, cond := range tc.wantHooks { + assert.True(t, hookContains(runner.hookCalls, cond), "expected hook %v", cond) + } + + assert.Len(t, runner.scheduledTasks, tc.wantScheduled) + if tc.scheduledType != "" && len(runner.scheduledTasks) > 0 { + assert.Equal(t, tc.scheduledType, runner.scheduledTasks[0].Task.Type()) + } + }) + } +} + +// --- CheckTask tests --- + +func TestCheckTaskRun(t *testing.T) { + tests := []struct { + name string + fake *fakeRepoOrchestrator + wantErr bool + wantHooks []v1.Hook_Condition + }{ + { + name: "success", + fake: &fakeRepoOrchestrator{}, + wantHooks: []v1.Hook_Condition{v1.Hook_CONDITION_CHECK_START, v1.Hook_CONDITION_CHECK_SUCCESS}, + }, + { + name: "check error", + fake: &fakeRepoOrchestrator{checkErr: fmt.Errorf("check failed")}, + wantErr: true, + wantHooks: []v1.Hook_Condition{v1.Hook_CONDITION_CHECK_START, v1.Hook_CONDITION_CHECK_ERROR, v1.Hook_CONDITION_ANY_ERROR}, + }, + { + name: "unlock error", + fake: &fakeRepoOrchestrator{unlockErr: fmt.Errorf("unlock failed")}, + wantErr: true, + wantHooks: []v1.Hook_Condition{v1.Hook_CONDITION_CHECK_ERROR, v1.Hook_CONDITION_ANY_ERROR}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + repo := &v1.Repo{Id: "repo1", Guid: "guid1"} + cfg := newTestConfig(repo) + runner := setupTestRunner(t, cfg, tc.fake) + + task := NewCheckTask(repo, PlanForSystemTasks, true) + st := nextAndCreate(t, task, runner) + + err := task.Run(context.Background(), st, runner) + if tc.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + + for _, cond := range tc.wantHooks { + assert.True(t, hookContains(runner.hookCalls, cond), "expected hook %v", cond) + } + }) + } +} + +// --- StatsTask tests --- + +func TestStatsTaskRun(t *testing.T) { + tests := []struct { + name string + fake *fakeRepoOrchestrator + wantErr bool + }{ + { + name: "success", + fake: &fakeRepoOrchestrator{ + statsResult: &v1.RepoStats{ + TotalSize: 1000, + }, + }, + }, + { + name: "stats error", + fake: &fakeRepoOrchestrator{statsErr: fmt.Errorf("stats failed")}, + wantErr: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + repo := &v1.Repo{Id: "repo1", Guid: "guid1"} + cfg := newTestConfig(repo) + runner := setupTestRunner(t, cfg, tc.fake) + + task := NewStatsTask(repo, PlanForSystemTasks, true) + st := nextAndCreate(t, task, runner) + + err := task.Run(context.Background(), st, runner) + if tc.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + statsOp := st.Op.GetOperationStats() + require.NotNil(t, statsOp) + assert.Equal(t, tc.fake.statsResult.TotalSize, statsOp.Stats.TotalSize) + } + }) + } +} + +// --- BackupTask tests --- + +func TestBackupTaskRun(t *testing.T) { + tests := []struct { + name string + dryRun bool + fake *fakeRepoOrchestrator + repo *v1.Repo + plan *v1.Plan + wantErr bool + wantHooks []v1.Hook_Condition + wantNotHooks []v1.Hook_Condition + wantScheduled []string // expected scheduled task types + }{ + { + name: "successful backup with retention", + fake: &fakeRepoOrchestrator{ + backupResult: &restic.BackupProgressEntry{ + MessageType: "summary", + SnapshotId: testSnapshotID, + TotalBytesProcessed: 1000, + }, + }, + plan: &v1.Plan{ + Id: "plan1", + Repo: "repo1", + Retention: &v1.RetentionPolicy{ + Policy: &v1.RetentionPolicy_PolicyKeepLastN{PolicyKeepLastN: 5}, + }, + }, + wantHooks: []v1.Hook_Condition{v1.Hook_CONDITION_SNAPSHOT_START, v1.Hook_CONDITION_SNAPSHOT_SUCCESS, v1.Hook_CONDITION_SNAPSHOT_END}, + wantScheduled: []string{"forget", "index_snapshots"}, + }, + { + name: "successful backup no retention", + fake: &fakeRepoOrchestrator{ + backupResult: &restic.BackupProgressEntry{ + MessageType: "summary", + SnapshotId: testSnapshotID, + TotalBytesProcessed: 1000, + }, + }, + plan: &v1.Plan{ + Id: "plan1", + Repo: "repo1", + }, + wantHooks: []v1.Hook_Condition{v1.Hook_CONDITION_SNAPSHOT_START, v1.Hook_CONDITION_SNAPSHOT_SUCCESS, v1.Hook_CONDITION_SNAPSHOT_END}, + wantScheduled: []string{"index_snapshots"}, + }, + { + name: "successful backup with repo-level scheduled forget skips per-plan forget", + repo: &v1.Repo{ + Id: "repo1", Guid: "guid1", + ForgetPolicy: &v1.ForgetPolicy{ + Schedule: &v1.Schedule{ + Schedule: &v1.Schedule_MaxFrequencyDays{MaxFrequencyDays: 1}, + }, + }, + }, + fake: &fakeRepoOrchestrator{ + backupResult: &restic.BackupProgressEntry{ + MessageType: "summary", + SnapshotId: testSnapshotID, + }, + }, + plan: &v1.Plan{ + Id: "plan1", + Repo: "repo1", + Retention: &v1.RetentionPolicy{ + Policy: &v1.RetentionPolicy_PolicyKeepLastN{PolicyKeepLastN: 5}, + }, + }, + wantScheduled: []string{"index_snapshots"}, // no forget + }, + { + name: "backup error", + fake: &fakeRepoOrchestrator{backupErr: fmt.Errorf("backup failed")}, + plan: &v1.Plan{Id: "plan1", Repo: "repo1"}, + wantErr: true, + wantHooks: []v1.Hook_Condition{ + v1.Hook_CONDITION_SNAPSHOT_START, + v1.Hook_CONDITION_SNAPSHOT_ERROR, + v1.Hook_CONDITION_ANY_ERROR, + v1.Hook_CONDITION_SNAPSHOT_END, + }, + }, + { + name: "unlock error", + fake: &fakeRepoOrchestrator{unlockErr: fmt.Errorf("unlock failed")}, + plan: &v1.Plan{Id: "plan1", Repo: "repo1"}, + wantErr: true, + wantHooks: []v1.Hook_Condition{ + v1.Hook_CONDITION_SNAPSHOT_ERROR, + v1.Hook_CONDITION_ANY_ERROR, + }, + }, + { + name: "dry run backup", + dryRun: true, + fake: &fakeRepoOrchestrator{ + backupResult: &restic.BackupProgressEntry{ + MessageType: "summary", + SnapshotId: testSnapshotID, + }, + }, + plan: &v1.Plan{Id: "plan1", Repo: "repo1"}, + wantScheduled: nil, + }, + { + name: "skip if unchanged", + fake: &fakeRepoOrchestrator{ + backupResult: &restic.BackupProgressEntry{ + MessageType: "summary", + SnapshotId: "", // empty = no changes + }, + }, + plan: &v1.Plan{Id: "plan1", Repo: "repo1"}, + wantHooks: []v1.Hook_Condition{v1.Hook_CONDITION_SNAPSHOT_START, v1.Hook_CONDITION_SNAPSHOT_SKIPPED, v1.Hook_CONDITION_SNAPSHOT_END}, + wantScheduled: nil, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + repo := tc.repo + if repo == nil { + repo = &v1.Repo{Id: "repo1", Guid: "guid1"} + } + cfg := newTestConfig(repo, tc.plan) + runner := setupTestRunner(t, cfg, tc.fake) + + task := NewOneoffBackupTask(repo, tc.plan, time.Now(), tc.dryRun) + st := nextAndCreate(t, task, runner) + + err := task.Run(context.Background(), st, runner) + if tc.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + + for _, cond := range tc.wantHooks { + assert.True(t, hookContains(runner.hookCalls, cond), "expected hook %v", cond) + } + for _, cond := range tc.wantNotHooks { + assert.False(t, hookContains(runner.hookCalls, cond), "unexpected hook %v", cond) + } + + var scheduledTypes []string + for _, s := range runner.scheduledTasks { + scheduledTypes = append(scheduledTypes, s.Task.Type()) + } + if tc.wantScheduled != nil { + assert.Equal(t, tc.wantScheduled, scheduledTypes) + } + }) + } +} + +// --- ForgetSnapshot task tests --- + +func TestForgetSnapshotTaskRun(t *testing.T) { + tests := []struct { + name string + fake *fakeRepoOrchestrator + wantErr bool + }{ + { + name: "success", + fake: &fakeRepoOrchestrator{}, + }, + { + name: "forget snapshot error", + fake: &fakeRepoOrchestrator{forgetSnapshotErr: fmt.Errorf("forget failed")}, + wantErr: true, + }, + { + name: "unlock error", + fake: &fakeRepoOrchestrator{unlockErr: fmt.Errorf("unlock failed")}, + wantErr: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + repo := &v1.Repo{Id: "repo1", Guid: "guid1"} + cfg := newTestConfig(repo) + runner := setupTestRunner(t, cfg, tc.fake) + + task := NewOneoffForgetSnapshotTask(repo, "plan1", 1, time.Now(), testSnapshotID) + st := nextAndCreate(t, task, runner) + + err := task.Run(context.Background(), st, runner) + if tc.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + // On success, the task schedules an index snapshots task + require.Len(t, runner.scheduledTasks, 1) + assert.Equal(t, "index_snapshots", runner.scheduledTasks[0].Task.Type()) + } + }) + } +} + +// --- ScheduledForgetTask tests --- + +func TestScheduledForgetTaskRun(t *testing.T) { + tests := []struct { + name string + fake *fakeRepoOrchestrator + wantErr bool + wantHooks []v1.Hook_Condition + wantScheduled int + }{ + { + name: "success", + fake: &fakeRepoOrchestrator{}, + wantHooks: []v1.Hook_Condition{v1.Hook_CONDITION_FORGET_START, v1.Hook_CONDITION_FORGET_SUCCESS}, + wantScheduled: 1, // stats task + }, + { + name: "forget error", + fake: &fakeRepoOrchestrator{forgetErr: fmt.Errorf("forget failed")}, + wantErr: true, + wantHooks: []v1.Hook_Condition{ + v1.Hook_CONDITION_FORGET_START, + v1.Hook_CONDITION_FORGET_ERROR, + v1.Hook_CONDITION_ANY_ERROR, + }, + }, + { + name: "unlock error", + fake: &fakeRepoOrchestrator{unlockErr: fmt.Errorf("unlock failed")}, + wantErr: true, + wantHooks: []v1.Hook_Condition{ + v1.Hook_CONDITION_FORGET_ERROR, + v1.Hook_CONDITION_ANY_ERROR, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + repo := &v1.Repo{ + Id: "repo1", Guid: "guid1", + ForgetPolicy: &v1.ForgetPolicy{ + Retention: &v1.RetentionPolicy{ + Policy: &v1.RetentionPolicy_PolicyKeepLastN{PolicyKeepLastN: 5}, + }, + }, + } + cfg := newTestConfig(repo) + runner := setupTestRunner(t, cfg, tc.fake) + + task := NewScheduledForgetTask(repo, PlanForSystemTasks, true) + st := nextAndCreate(t, task, runner) + + err := task.Run(context.Background(), st, runner) + if tc.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + + for _, cond := range tc.wantHooks { + assert.True(t, hookContains(runner.hookCalls, cond), "expected hook %v", cond) + } + + assert.Len(t, runner.scheduledTasks, tc.wantScheduled) + }) + } +} + +// --- RestoreTask tests --- + +func TestRestoreTaskRun(t *testing.T) { + tests := []struct { + name string + fake *fakeRepoOrchestrator + wantErr bool + }{ + { + name: "success", + fake: &fakeRepoOrchestrator{ + restoreResult: &v1.RestoreProgressEntry{ + MessageType: "summary", + TotalFiles: 10, + TotalBytes: 5000, + FilesRestored: 10, + BytesRestored: 5000, + }, + }, + }, + { + name: "restore error", + fake: &fakeRepoOrchestrator{restoreErr: fmt.Errorf("restore failed")}, + wantErr: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + repo := &v1.Repo{Id: "repo1", Guid: "guid1"} + cfg := newTestConfig(repo) + runner := setupTestRunner(t, cfg, tc.fake) + + task := NewOneoffRestoreTask(repo, "plan1", 1, time.Now(), testSnapshotID, "/data", "/tmp/restore") + st := nextAndCreate(t, task, runner) + + err := task.Run(context.Background(), st, runner) + if tc.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + restoreOp := st.Op.GetOperationRestore() + require.NotNil(t, restoreOp) + assert.NotNil(t, restoreOp.LastStatus) + } + }) + } +} + +// --- RunCommand tests --- + +func TestRunCommandTaskRun(t *testing.T) { + tests := []struct { + name string + fake *fakeRepoOrchestrator + wantErr bool + }{ + { + name: "success", + fake: &fakeRepoOrchestrator{}, + }, + { + name: "command error", + fake: &fakeRepoOrchestrator{runCommandErr: fmt.Errorf("command failed")}, + wantErr: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + repo := &v1.Repo{Id: "repo1", Guid: "guid1"} + cfg := newTestConfig(repo) + runner := setupTestRunner(t, cfg, tc.fake) + + task := NewOneoffRunCommandTask(repo, "plan1", 1, time.Now(), "echo hello") + st := nextAndCreate(t, task, runner) + + err := task.Run(context.Background(), st, runner) + if tc.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +// --- IndexSnapshots tests --- + +func TestIndexSnapshotsTaskRun(t *testing.T) { + tests := []struct { + name string + fake *fakeRepoOrchestrator + wantErr bool + }{ + { + name: "no snapshots", + fake: &fakeRepoOrchestrator{ + snapshots: []*restic.Snapshot{}, + }, + }, + { + name: "indexes new snapshots", + fake: &fakeRepoOrchestrator{ + snapshots: []*restic.Snapshot{ + { + Id: testSnapshotID, + Time: time.Now().Format(time.RFC3339Nano), + Tags: []string{"plan:plan1", "created-by:test-instance"}, + SnapshotSummary: restic.SnapshotSummary{}, + }, + }, + }, + }, + { + name: "snapshots error", + fake: &fakeRepoOrchestrator{snapshotsErr: fmt.Errorf("snapshots failed")}, + wantErr: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + repo := &v1.Repo{Id: "repo1", Guid: "guid1"} + plan := &v1.Plan{Id: "plan1", Repo: "repo1"} + cfg := newTestConfig(repo, plan) + runner := setupTestRunner(t, cfg, tc.fake) + + task := NewOneoffIndexSnapshotsTask(repo, time.Now()) + st := nextAndCreate(t, task, runner) + + err := task.Run(context.Background(), st, runner) + if tc.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/internal/orchestrator/tasks/testhelpers_test.go b/internal/orchestrator/tasks/testhelpers_test.go new file mode 100644 index 00000000..1f90dfcb --- /dev/null +++ b/internal/orchestrator/tasks/testhelpers_test.go @@ -0,0 +1,91 @@ +package tasks + +import ( + "context" + "io" + + v1 "github.com/garethgeorge/backrest/gen/go/v1" + "github.com/garethgeorge/backrest/pkg/restic" +) + +// fakeRepoOrchestrator is a test double for the RepoOrchestrator interface. +// Each method returns the corresponding configured result/error fields. +type fakeRepoOrchestrator struct { + unlockErr error + + backupResult *restic.BackupProgressEntry + backupErr error + + forgetResult []*v1.ResticSnapshot + forgetErr error + + forgetSnapshotErr error + + pruneErr error + checkErr error + + statsResult *v1.RepoStats + statsErr error + + restoreResult *v1.RestoreProgressEntry + restoreErr error + + snapshots []*restic.Snapshot + snapshotsErr error + + addTagsErr error + + runCommandErr error +} + +var _ RepoOrchestrator = &fakeRepoOrchestrator{} + +func (f *fakeRepoOrchestrator) UnlockIfAutoEnabled(ctx context.Context) error { + return f.unlockErr +} + +func (f *fakeRepoOrchestrator) Backup(ctx context.Context, plan *v1.Plan, dryRun bool, cb func(event *restic.BackupProgressEntry)) (*restic.BackupProgressEntry, error) { + if cb != nil && f.backupResult != nil { + cb(f.backupResult) + } + return f.backupResult, f.backupErr +} + +func (f *fakeRepoOrchestrator) Forget(ctx context.Context, policy *v1.RetentionPolicy, opts ...restic.GenericOption) ([]*v1.ResticSnapshot, error) { + return f.forgetResult, f.forgetErr +} + +func (f *fakeRepoOrchestrator) ForgetSnapshot(ctx context.Context, snapshotId string) error { + return f.forgetSnapshotErr +} + +func (f *fakeRepoOrchestrator) Prune(ctx context.Context, output io.Writer) error { + return f.pruneErr +} + +func (f *fakeRepoOrchestrator) Check(ctx context.Context, output io.Writer) error { + return f.checkErr +} + +func (f *fakeRepoOrchestrator) Stats(ctx context.Context) (*v1.RepoStats, error) { + return f.statsResult, f.statsErr +} + +func (f *fakeRepoOrchestrator) Restore(ctx context.Context, snapshotId string, snapshotPath string, target string, cb func(event *v1.RestoreProgressEntry)) (*v1.RestoreProgressEntry, error) { + if cb != nil && f.restoreResult != nil { + cb(f.restoreResult) + } + return f.restoreResult, f.restoreErr +} + +func (f *fakeRepoOrchestrator) Snapshots(ctx context.Context) ([]*restic.Snapshot, error) { + return f.snapshots, f.snapshotsErr +} + +func (f *fakeRepoOrchestrator) AddTags(ctx context.Context, snapshotIDs []string, tags []string) error { + return f.addTagsErr +} + +func (f *fakeRepoOrchestrator) RunCommand(ctx context.Context, command string, writer io.Writer) error { + return f.runCommandErr +} diff --git a/internal/protoutil/conversion.go b/internal/protoutil/conversion.go index 1976ac36..2ba0e8a7 100644 --- a/internal/protoutil/conversion.go +++ b/internal/protoutil/conversion.go @@ -5,6 +5,7 @@ import ( v1 "github.com/garethgeorge/backrest/gen/go/v1" "github.com/garethgeorge/backrest/pkg/restic" + "google.golang.org/protobuf/proto" ) func SnapshotToProto(s *restic.Snapshot) *v1.ResticSnapshot { @@ -109,6 +110,18 @@ func BackupProgressEntryToBackupError(b *restic.BackupProgressEntry) (*v1.Backup }, nil } +func ValidateRetentionPolicy(p *v1.RetentionPolicy) error { + if p.Policy == nil { + return errors.New("retention policy must specify a policy") + } + if policyTimeBucketed, ok := p.GetPolicy().(*v1.RetentionPolicy_PolicyTimeBucketed); ok { + if proto.Equal(policyTimeBucketed.PolicyTimeBucketed, &v1.RetentionPolicy_TimeBucketedCounts{}) { + return errors.New("time bucketed policy must specify a non-empty bucket") + } + } + return nil +} + func RetentionPolicyFromProto(p *v1.RetentionPolicy) *restic.RetentionPolicy { switch p := p.GetPolicy().(type) { case *v1.RetentionPolicy_PolicyKeepAll: diff --git a/pkg/restic/restic.go b/pkg/restic/restic.go index d039634b..3305600f 100644 --- a/pkg/restic/restic.go +++ b/pkg/restic/restic.go @@ -360,17 +360,26 @@ func (r *Repo) Forget(ctx context.Context, policy *RetentionPolicy, opts ...Gene return nil, err } - if len(results) != 1 { - return nil, fmt.Errorf("expected 1 output from forget, got %v", len(results)) + if len(results) == 0 { + return nil, fmt.Errorf("expected at least 1 output from forget, got 0") } - if err := results[0].Validate(); err != nil { - return nil, fmt.Errorf("invalid forget result: %w", err) + // Merge all groups into a single result. Restic returns one ForgetResult + // per group (e.g. when using --group-by tags), each with independent + // keep/remove lists. + merged := &ForgetResult{} + for _, r := range results { + if err := r.Validate(); err != nil { + return nil, fmt.Errorf("invalid forget result: %w", err) + } + merged.Keep = append(merged.Keep, r.Keep...) + merged.Remove = append(merged.Remove, r.Remove...) } - return &results[0], nil + return merged, nil } + func (r *Repo) ForgetSnapshot(ctx context.Context, snapshotId string, opts ...GenericOption) error { args := []string{"forget", "--json", snapshotId} cmd := r.commandWithContext(ctx, args, opts...) diff --git a/pkg/restic/restic_test.go b/pkg/restic/restic_test.go index ee5c39b2..ea4afc63 100644 --- a/pkg/restic/restic_test.go +++ b/pkg/restic/restic_test.go @@ -436,6 +436,65 @@ func TestResticForget(t *testing.T) { } } +func TestResticForgetMultiGroup(t *testing.T) { + t.Parallel() + + repoDir := t.TempDir() + r := NewRepo(helpers.ResticBinary(t), repoDir, WithFlags("--no-cache"), WithEnv("RESTIC_PASSWORD=test")) + if err := r.Init(context.Background()); err != nil { + t.Fatalf("failed to init repo: %v", err) + } + + testData := helpers.CreateTestData(t) + + // Create snapshots with two different tags to produce multiple groups + var groupAIDs, groupBIDs []string + for i := 0; i < 5; i++ { + output, err := r.Backup(context.Background(), []string{testData}, nil, WithTags("group-a")) + if err != nil { + t.Fatalf("failed to backup group-a snapshot %d: %v", i, err) + } + groupAIDs = append(groupAIDs, output.SnapshotId) + } + for i := 0; i < 4; i++ { + output, err := r.Backup(context.Background(), []string{testData}, nil, WithTags("group-b")) + if err != nil { + t.Fatalf("failed to backup group-b snapshot %d: %v", i, err) + } + groupBIDs = append(groupBIDs, output.SnapshotId) + } + + // Forget with --group-by tags, keeping 2 per group + res, err := r.Forget(context.Background(), &RetentionPolicy{KeepLastN: 2}, WithFlags("--group-by", "tags")) + if err != nil { + t.Fatalf("failed to forget snapshots: %v", err) + } + + // Should keep 2 from each group = 4 total kept, 5 total removed (3 from A + 2 from B) + if len(res.Keep) != 4 { + t.Errorf("wanted 4 kept snapshots (2 per group), got: %d", len(res.Keep)) + } + if len(res.Remove) != 5 { + t.Errorf("wanted 5 removed snapshots (3 from A + 2 from B), got: %d", len(res.Remove)) + } + + // Verify the kept snapshots are the most recent from each group + keptIDs := make(map[string]bool) + for _, s := range res.Keep { + keptIDs[s.Id] = true + } + for _, id := range groupAIDs[3:] { + if !keptIDs[id] { + t.Errorf("expected group-a snapshot %v to be kept", id) + } + } + for _, id := range groupBIDs[2:] { + if !keptIDs[id] { + t.Errorf("expected group-b snapshot %v to be kept", id) + } + } +} + func TestForgetSnapshotId(t *testing.T) { t.Parallel() diff --git a/proto/v1/config.proto b/proto/v1/config.proto index 6c8f1be6..dcb055a8 100644 --- a/proto/v1/config.proto +++ b/proto/v1/config.proto @@ -28,15 +28,27 @@ message Multihost { PrivateKey identity = 1; repeated Peer known_hosts = 2 [json_name="knownHosts"]; repeated Peer authorized_clients = 3 [json_name="authorizedClients"]; + repeated PairingToken pairing_tokens = 4 [json_name="pairingTokens"]; // active pairing tokens generated by this instance (server-side only) message Peer { string instance_id = 1 [json_name="instanceId"]; // a human readable name for the peer, typically the same as its instance ID. string keyid = 2 [json_name="keyId"]; // the key ID of the peer. This must match the sha256 of the public key the client provides in handshake. - bool keyid_verified = 3 [json_name="keyIdVerified"]; // marks whether the key ID was visually verified by the user, this must be done for authorized clients. Not required for known hosts but recommended. + reserved 3; // was keyid_verified, removed in favor of pairing tokens repeated Permission permissions = 5 [json_name="permissions"]; // permissions granted to this peer. // Known host only fields string instance_url = 4 [json_name="instanceUrl"]; // instance URL, required for a known host. Otherwise meaningless. + string initial_pairing_secret = 6 [json_name="initialPairingSecret"]; // one-time pairing secret sent during first handshake to auto-authorize with the server. Cleared after successful pairing. + } + + message PairingToken { + string secret = 1 [json_name="secret"]; // the one-time secret used to validate the pairing request + string label = 2 [json_name="label"]; // human-readable label for this token + int64 created_at_unix = 3 [json_name="createdAtUnix"]; // unix timestamp when the token was created + int64 expires_at_unix = 4 [json_name="expiresAtUnix"]; // unix timestamp when the token expires + int32 max_uses = 5 [json_name="maxUses"]; // maximum number of clients that can pair with this token, 0 means unlimited + int32 uses = 6 [json_name="uses"]; // number of times this token has been used + repeated Permission permissions = 7 [json_name="permissions"]; // permissions granted to clients that pair with this token } message Permission { @@ -53,6 +65,10 @@ message Multihost { // When granted to an authorizedClient, the client will be able to write the configuration to the server. // When granted to a knownHost, the known host will be able to write configuration. PERMISSION_READ_WRITE_CONFIG = 3; // read and write configuration for the resource in scope. + + // When granted to an authorizedClient, the server will push repos marked as 'shared' to the client. + // This permission does not use scopes — if present, all shared repos are pushed. + PERMISSION_RECEIVE_SHARED_REPOS = 4; } // Scopes are any of '*', 'repo:' or 'plan:','-repo:','-plan:'. // '*' means all repos and plans, 'repo:' means the repo with the given ID, 'plan:' means the plan with the given ID. @@ -75,6 +91,9 @@ message Repo { bool auto_unlock = 8 [json_name="autoUnlock"]; // automatically unlock the repo when needed. bool auto_initialize = 12 [json_name="autoInitialize"]; // whether the repo should be auto-initialized if not found. CommandPrefix command_prefix = 10 [json_name="commandPrefix"]; // modifiers for the restic commands + bool shared = 13 [json_name="shared"]; // if true, this repo is pushed to all authorized clients with read-config permission + string origin_instance_id = 14 [json_name="originInstanceId"]; // set when this repo was pushed from a remote instance; marks it as non-editable + ForgetPolicy forget_policy = 15 [json_name="forgetPolicy"]; // optional repo-level forget policy. If set, overrides per-plan retention policies. } message Plan { @@ -126,6 +145,11 @@ message RetentionPolicy { } } +message ForgetPolicy { + Schedule schedule = 1 [json_name="schedule"]; + RetentionPolicy retention = 2 [json_name="retention"]; +} + message PrunePolicy { Schedule schedule = 2 [json_name="schedule"]; int64 max_unused_bytes = 3 [json_name="maxUnusedBytes"]; // max unused bytes before running prune. diff --git a/proto/v1/crypto.proto b/proto/v1/crypto.proto index 97b38709..778fd15a 100644 --- a/proto/v1/crypto.proto +++ b/proto/v1/crypto.proto @@ -13,11 +13,11 @@ message SignedMessage { message PublicKey { string keyid = 1 [json_name="keyId"]; // a unique identifier generated as the SHA256 of the public key. - string ed25519pub = 2 [json_name="ed25519pub"]; + string ecdsa_pub = 2 [json_name="ecdsaPub"]; } message PrivateKey { string keyid = 1 [json_name="keyId"]; // a unique identifier generated as the SHA256 of the public key - string ed25519priv = 2 [json_name="ed25519priv"]; - string ed25519pub = 3 [json_name="ed25519pub"]; + string ecdsa_priv = 2 [json_name="ecdsaPriv"]; + string ecdsa_pub = 3 [json_name="ecdsaPub"]; } diff --git a/proto/v1/service.proto b/proto/v1/service.proto index 3d779f99..00f2aef9 100644 --- a/proto/v1/service.proto +++ b/proto/v1/service.proto @@ -69,6 +69,10 @@ service Backrest { // GetSummaryDashboard returns data for the dashboard view. rpc GetSummaryDashboard(google.protobuf.Empty) returns (SummaryDashboardResponse) {} + + // GeneratePairingToken creates a new pairing token on the server that can be shared with clients to simplify peering. + // The token format is ":#" — an opaque string the client pastes when adding a known host. + rpc GeneratePairingToken(GeneratePairingTokenRequest) returns (GeneratePairingTokenResponse) {} } // OpSelector is a message that can be used to select operations e.g. by query. @@ -222,3 +226,14 @@ message SummaryDashboardResponse { repeated int64 bytes_added = 5; } } + +message GeneratePairingTokenRequest { + string label = 1; // human-readable label for the token + int64 ttl_seconds = 2; // time-to-live in seconds (e.g. 3600 for 1 hour) + int32 max_uses = 3; // max number of clients that can pair with this token, 0 for unlimited + repeated Multihost.Permission permissions = 4; // permissions to grant to clients that pair with this token +} + +message GeneratePairingTokenResponse { + string token = 1; // the opaque pairing token string: ":#" +} diff --git a/proto/v1sync/syncservice.proto b/proto/v1sync/syncservice.proto index c9c39fa2..00c73437 100644 --- a/proto/v1sync/syncservice.proto +++ b/proto/v1sync/syncservice.proto @@ -26,6 +26,8 @@ service BackrestSyncService { // This service should be served behind authentication and authorization. service BackrestSyncStateService { rpc GetPeerSyncStatesStream(SyncStateStreamRequest) returns (stream PeerState) {} + // SetRemoteClientConfig pushes a config change to a connected authorized client peer. + rpc SetRemoteClientConfig(SetRemoteClientConfigRequest) returns (SetRemoteClientConfigResponse) {} } @@ -94,6 +96,16 @@ message SetConfigRequest { repeated string plans_to_delete = 4; // The plan IDs to delete. } +message SetRemoteClientConfigRequest { + string peer_keyid = 1; // The key ID of the connected peer to push config to. + repeated v1.Repo repos = 2; // Repos to create or update on the peer. + repeated v1.Plan plans = 3; // Plans to create or update on the peer. + repeated string repos_to_delete = 4; // Repo IDs to delete on the peer. + repeated string plans_to_delete = 5; // Plan IDs to delete on the peer. +} + +message SetRemoteClientConfigResponse {} + message RemoteConfig { int32 modno = 1; // The modno of the config. int32 version = 2; // The storage version of the config. @@ -112,9 +124,10 @@ message SyncStreamItem { SyncActionHandshake handshake = 3; // note: mostly deprecated, sent through headers rather than stream. SyncActionHeartbeat heartbeat = 4; - SyncActionRequestOperations request_operations = 20; + SyncActionOperationManifest operation_manifest = 20; SyncActionReceiveOperations receive_operations = 21; - SyncActionReceiveConfig receive_config = 22; + SyncActionRequestOperationData request_operation_data = 22; + SyncActionReceiveConfig receive_config = 23; SyncActionSetConfig set_config = 24; SyncActionRequestResources request_resources = 25; // request a list of available resources. Only used by the server. SyncActionReceiveResources receive_resources = 26; // receiving a list of available resources. @@ -128,6 +141,7 @@ message SyncStreamItem { int64 protocol_version = 1; v1.PublicKey public_key = 2; v1.SignedMessage instance_id = 3; + string pairing_secret = 4; // optional one-time secret from a pairing token, used to auto-authorize a new client } // SyncActionHeartbeat is sent periodically to keep the connection alive. @@ -163,9 +177,13 @@ message SyncStreamItem { CONNECTION_STATE_NOT_FOUND = 4; } - message SyncActionRequestOperations { - int64 high_opid = 1; // The highest operation ID the requester has. - int64 high_modno = 2; // The highest modno the requester has. + message SyncActionOperationManifest { + repeated int64 op_ids = 1; + repeated int64 modnos = 2; + } + + message SyncActionRequestOperationData { + repeated int64 op_ids = 1; } message SyncActionReceiveOperations { @@ -195,8 +213,8 @@ message SyncStreamItem { } message SyncEstablishSharedSecret { - // a one-time-use ed25519 public key with a matching unshared private key. Used to perform a key exchange. + // a one-time-use ECDSA public key with a matching unshared private key. Used to perform a key exchange. // See https://pkg.go.dev/crypto/ecdh#PrivateKey.ECDH . - string ed25519 = 2 [json_name="ed25519pub"]; // base64 encoded public key + string ecdsa_pub = 2 [json_name="ecdsaPub"]; // base64 encoded public key } } diff --git a/scripts/testing/run-named.sh b/scripts/testing/run-named.sh new file mode 100755 index 00000000..9e8b7baf --- /dev/null +++ b/scripts/testing/run-named.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +# +# Run a named instance of backrest with its own data directory and ports. +# Multiple instances can run side-by-side for testing sync, multihost, etc. +# +# Usage: ./run-named.sh [backend-port] [vite-port] +# +# Examples: +# ./run-named.sh alice # backend :9901, vite :5181 +# ./run-named.sh bob # backend :9902, vite :5182 +# ./run-named.sh alice 9910 5190 # explicit ports +# +# Data is stored in /tmp/backrest-/ and persists across runs. + +set -euo pipefail + +BASEDIR="$(cd "$(dirname "$0")/../.." && pwd)" +NAME="${1:?Usage: $0 [backend-port] [vite-port]}" + +# Derive deterministic ports from name if not provided. +# Hash the name to a number in a small range to avoid collisions. +name_hash() { + printf '%s' "$1" | cksum | awk '{print $1 % 100}' +} + +OFFSET=$(name_hash "$NAME") +BACKEND_PORT="${2:-$((9900 + OFFSET))}" +VITE_PORT="${3:-$((5180 + OFFSET))}" + +DATADIR="/tmp/backrest-${NAME}" +mkdir -p "$DATADIR" + +PIDS=() + +cleanup() { + echo "" + echo "Shutting down instance '$NAME'..." + for pid in "${PIDS[@]}"; do + kill "$pid" 2>/dev/null || true + done + wait 2>/dev/null || true + echo "Done." +} + +trap cleanup EXIT INT TERM + +echo "=== backrest instance: $NAME ===" +echo " data dir: $DATADIR" +echo " backend: http://127.0.0.1:${BACKEND_PORT}" +echo " webui (vite): http://localhost:${VITE_PORT}" +echo "" + +# Start the Go backend +( + cd "$BASEDIR" + go run ./cmd/backrest \ + -bind-address "127.0.0.1:${BACKEND_PORT}" \ + -config-file "${DATADIR}/config.json" \ + -data-dir "${DATADIR}/data" +) & +PIDS+=($!) + +# Start the vite dev server pointing at this backend +( + cd "$BASEDIR/webui" + UI_BACKEND_URL="http://127.0.0.1:${BACKEND_PORT}" \ + npx vite --port "$VITE_PORT" --strictPort +) & +PIDS+=($!) + +# Wait for any child to exit — if one dies, the trap cleans up the other. +wait -n 2>/dev/null || true diff --git a/shell.nix b/shell.nix new file mode 100644 index 00000000..3c4aff5b --- /dev/null +++ b/shell.nix @@ -0,0 +1,3 @@ +# Backward-compatible wrapper for users without flakes enabled. +# The canonical definition lives in flake.nix. +(builtins.getFlake (toString ./.)).devShells.${builtins.currentSystem}.default diff --git a/webui/gen/ts/google/api/annotations_pb.ts b/webui/gen/ts/google/api/annotations_pb.ts index 405b7c1a..311cd4d5 100644 --- a/webui/gen/ts/google/api/annotations_pb.ts +++ b/webui/gen/ts/google/api/annotations_pb.ts @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// @generated by protoc-gen-es v2.10.0 with parameter "target=ts" +// @generated by protoc-gen-es v2.10.1 with parameter "target=ts" // @generated from file google/api/annotations.proto (package google.api, syntax proto3) /* eslint-disable */ diff --git a/webui/gen/ts/google/api/http_pb.ts b/webui/gen/ts/google/api/http_pb.ts index 78cc0ed4..e04ddc69 100644 --- a/webui/gen/ts/google/api/http_pb.ts +++ b/webui/gen/ts/google/api/http_pb.ts @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// @generated by protoc-gen-es v2.10.0 with parameter "target=ts" +// @generated by protoc-gen-es v2.10.1 with parameter "target=ts" // @generated from file google/api/http.proto (package google.api, syntax proto3) /* eslint-disable */ diff --git a/webui/gen/ts/types/value_pb.ts b/webui/gen/ts/types/value_pb.ts index 8912a97c..146a5d8e 100644 --- a/webui/gen/ts/types/value_pb.ts +++ b/webui/gen/ts/types/value_pb.ts @@ -1,4 +1,4 @@ -// @generated by protoc-gen-es v2.10.0 with parameter "target=ts" +// @generated by protoc-gen-es v2.10.1 with parameter "target=ts" // @generated from file types/value.proto (package types, syntax proto3) /* eslint-disable */ diff --git a/webui/gen/ts/v1/authentication_pb.ts b/webui/gen/ts/v1/authentication_pb.ts index bba495cd..ee2dc4ae 100644 --- a/webui/gen/ts/v1/authentication_pb.ts +++ b/webui/gen/ts/v1/authentication_pb.ts @@ -1,4 +1,4 @@ -// @generated by protoc-gen-es v2.10.0 with parameter "target=ts" +// @generated by protoc-gen-es v2.10.1 with parameter "target=ts" // @generated from file v1/authentication.proto (package v1, syntax proto3) /* eslint-disable */ diff --git a/webui/gen/ts/v1/config_pb.ts b/webui/gen/ts/v1/config_pb.ts index 832b997f..d975fb32 100644 --- a/webui/gen/ts/v1/config_pb.ts +++ b/webui/gen/ts/v1/config_pb.ts @@ -1,4 +1,4 @@ -// @generated by protoc-gen-es v2.10.0 with parameter "target=ts" +// @generated by protoc-gen-es v2.10.1 with parameter "target=ts" // @generated from file v1/config.proto (package v1, syntax proto3) /* eslint-disable */ @@ -13,7 +13,7 @@ import type { Message } from "@bufbuild/protobuf"; * Describes the file v1/config.proto. */ export const file_v1_config: GenFile = /*@__PURE__*/ - fileDesc("Cg92MS9jb25maWcucHJvdG8SAnYxIqwBCgZDb25maWcSDQoFbW9kbm8YASABKAUSDwoHdmVyc2lvbhgGIAEoBRIQCghpbnN0YW5jZRgCIAEoCRIXCgVyZXBvcxgDIAMoCzIILnYxLlJlcG8SFwoFcGxhbnMYBCADKAsyCC52MS5QbGFuEhYKBGF1dGgYBSABKAsyCC52MS5BdXRoEiYKCW11bHRpaG9zdBgHIAEoCzINLnYxLk11bHRpaG9zdFIEc3luYyLwAwoJTXVsdGlob3N0EiAKCGlkZW50aXR5GAEgASgLMg4udjEuUHJpdmF0ZUtleRInCgtrbm93bl9ob3N0cxgCIAMoCzISLnYxLk11bHRpaG9zdC5QZWVyEi4KEmF1dGhvcml6ZWRfY2xpZW50cxgDIAMoCzISLnYxLk11bHRpaG9zdC5QZWVyGp0BCgRQZWVyEhMKC2luc3RhbmNlX2lkGAEgASgJEhQKBWtleWlkGAIgASgJUgVrZXlJZBIlCg5rZXlpZF92ZXJpZmllZBgDIAEoCFINa2V5SWRWZXJpZmllZBItCgtwZXJtaXNzaW9ucxgFIAMoCzIYLnYxLk11bHRpaG9zdC5QZXJtaXNzaW9uEhQKDGluc3RhbmNlX3VybBgEIAEoCRrHAQoKUGVybWlzc2lvbhIrCgR0eXBlGAEgASgOMh0udjEuTXVsdGlob3N0LlBlcm1pc3Npb24uVHlwZRIOCgZzY29wZXMYAiADKAkifAoEVHlwZRIWChJQRVJNSVNTSU9OX1VOS05PV04QABIeChpQRVJNSVNTSU9OX1JFQURfT1BFUkFUSU9OUxABEhoKFlBFUk1JU1NJT05fUkVBRF9DT05GSUcQAhIgChxQRVJNSVNTSU9OX1JFQURfV1JJVEVfQ09ORklHEAMimwIKBFJlcG8SCgoCaWQYASABKAkSCwoDdXJpGAIgASgJEgwKBGd1aWQYCyABKAkSEAoIcGFzc3dvcmQYAyABKAkSCwoDZW52GAQgAygJEg0KBWZsYWdzGAUgAygJEiUKDHBydW5lX3BvbGljeRgGIAEoCzIPLnYxLlBydW5lUG9saWN5EiUKDGNoZWNrX3BvbGljeRgJIAEoCzIPLnYxLkNoZWNrUG9saWN5EhcKBWhvb2tzGAcgAygLMggudjEuSG9vaxITCgthdXRvX3VubG9jaxgIIAEoCBIXCg9hdXRvX2luaXRpYWxpemUYDCABKAgSKQoOY29tbWFuZF9wcmVmaXgYCiABKAsyES52MS5Db21tYW5kUHJlZml4IoYCCgRQbGFuEgoKAmlkGAEgASgJEgwKBHJlcG8YAiABKAkSDQoFcGF0aHMYBCADKAkSEAoIZXhjbHVkZXMYBSADKAkSEQoJaWV4Y2x1ZGVzGAkgAygJEh4KCHNjaGVkdWxlGAwgASgLMgwudjEuU2NoZWR1bGUSJgoJcmV0ZW50aW9uGAcgASgLMhMudjEuUmV0ZW50aW9uUG9saWN5EhcKBWhvb2tzGAggAygLMggudjEuSG9vaxIiCgxiYWNrdXBfZmxhZ3MYCiADKAlSDGJhY2t1cF9mbGFncxIZChFza2lwX2lmX3VuY2hhbmdlZBgNIAEoCEoECAMQBEoECAYQB0oECAsQDCKKAgoNQ29tbWFuZFByZWZpeBIuCgdpb19uaWNlGAEgASgOMh0udjEuQ29tbWFuZFByZWZpeC5JT05pY2VMZXZlbBIwCghjcHVfbmljZRgCIAEoDjIeLnYxLkNvbW1hbmRQcmVmaXguQ1BVTmljZUxldmVsIlsKC0lPTmljZUxldmVsEg4KCklPX0RFRkFVTFQQABIWChJJT19CRVNUX0VGRk9SVF9MT1cQARIXChNJT19CRVNUX0VGRk9SVF9ISUdIEAISCwoHSU9fSURMRRADIjoKDENQVU5pY2VMZXZlbBIPCgtDUFVfREVGQVVMVBAAEgwKCENQVV9ISUdIEAESCwoHQ1BVX0xPVxACIpcCCg9SZXRlbnRpb25Qb2xpY3kSHAoScG9saWN5X2tlZXBfbGFzdF9uGAogASgFSAASRgoUcG9saWN5X3RpbWVfYnVja2V0ZWQYCyABKAsyJi52MS5SZXRlbnRpb25Qb2xpY3kuVGltZUJ1Y2tldGVkQ291bnRzSAASGQoPcG9saWN5X2tlZXBfYWxsGAwgASgISAAaeQoSVGltZUJ1Y2tldGVkQ291bnRzEg4KBmhvdXJseRgBIAEoBRINCgVkYWlseRgCIAEoBRIOCgZ3ZWVrbHkYAyABKAUSDwoHbW9udGhseRgEIAEoBRIOCgZ5ZWFybHkYBSABKAUSEwoLa2VlcF9sYXN0X24YBiABKAVCCAoGcG9saWN5ImMKC1BydW5lUG9saWN5Eh4KCHNjaGVkdWxlGAIgASgLMgwudjEuU2NoZWR1bGUSGAoQbWF4X3VudXNlZF9ieXRlcxgDIAEoAxIaChJtYXhfdW51c2VkX3BlcmNlbnQYBCABKAEicwoLQ2hlY2tQb2xpY3kSHgoIc2NoZWR1bGUYASABKAsyDC52MS5TY2hlZHVsZRIYCg5zdHJ1Y3R1cmVfb25seRhkIAEoCEgAEiIKGHJlYWRfZGF0YV9zdWJzZXRfcGVyY2VudBhlIAEoAUgAQgYKBG1vZGUi6wEKCFNjaGVkdWxlEhIKCGRpc2FibGVkGAEgASgISAASDgoEY3JvbhgCIAEoCUgAEhoKEG1heEZyZXF1ZW5jeURheXMYAyABKAVIABIbChFtYXhGcmVxdWVuY3lIb3VycxgEIAEoBUgAEiEKBWNsb2NrGAUgASgOMhIudjEuU2NoZWR1bGUuQ2xvY2siUwoFQ2xvY2sSEQoNQ0xPQ0tfREVGQVVMVBAAEg8KC0NMT0NLX0xPQ0FMEAESDQoJQ0xPQ0tfVVRDEAISFwoTQ0xPQ0tfTEFTVF9SVU5fVElNRRADQgoKCHNjaGVkdWxlIoANCgRIb29rEiYKCmNvbmRpdGlvbnMYASADKA4yEi52MS5Ib29rLkNvbmRpdGlvbhIiCghvbl9lcnJvchgCIAEoDjIQLnYxLkhvb2suT25FcnJvchIqCg5hY3Rpb25fY29tbWFuZBhkIAEoCzIQLnYxLkhvb2suQ29tbWFuZEgAEioKDmFjdGlvbl93ZWJob29rGGUgASgLMhAudjEuSG9vay5XZWJob29rSAASKgoOYWN0aW9uX2Rpc2NvcmQYZiABKAsyEC52MS5Ib29rLkRpc2NvcmRIABIoCg1hY3Rpb25fZ290aWZ5GGcgASgLMg8udjEuSG9vay5Hb3RpZnlIABImCgxhY3Rpb25fc2xhY2sYaCABKAsyDi52MS5Ib29rLlNsYWNrSAASLAoPYWN0aW9uX3Nob3V0cnJyGGkgASgLMhEudjEuSG9vay5TaG91dHJyckgAEjQKE2FjdGlvbl9oZWFsdGhjaGVja3MYaiABKAsyFS52MS5Ib29rLkhlYWx0aGNoZWNrc0gAEiwKD2FjdGlvbl90ZWxlZ3JhbRhrIAEoCzIRLnYxLkhvb2suVGVsZWdyYW1IABoaCgdDb21tYW5kEg8KB2NvbW1hbmQYASABKAkagwEKB1dlYmhvb2sSEwoLd2ViaG9va191cmwYASABKAkSJwoGbWV0aG9kGAIgASgOMhcudjEuSG9vay5XZWJob29rLk1ldGhvZBIQCgh0ZW1wbGF0ZRhkIAEoCSIoCgZNZXRob2QSCwoHVU5LTk9XThAAEgcKA0dFVBABEggKBFBPU1QQAhowCgdEaXNjb3JkEhMKC3dlYmhvb2tfdXJsGAEgASgJEhAKCHRlbXBsYXRlGAIgASgJGmUKBkdvdGlmeRIQCghiYXNlX3VybBgBIAEoCRINCgV0b2tlbhgDIAEoCRIQCgh0ZW1wbGF0ZRhkIAEoCRIWCg50aXRsZV90ZW1wbGF0ZRhlIAEoCRIQCghwcmlvcml0eRhmIAEoBRouCgVTbGFjaxITCgt3ZWJob29rX3VybBgBIAEoCRIQCgh0ZW1wbGF0ZRgCIAEoCRoyCghTaG91dHJychIUCgxzaG91dHJycl91cmwYASABKAkSEAoIdGVtcGxhdGUYAiABKAkaNQoMSGVhbHRoY2hlY2tzEhMKC3dlYmhvb2tfdXJsGAEgASgJEhAKCHRlbXBsYXRlGAIgASgJGkAKCFRlbGVncmFtEhEKCWJvdF90b2tlbhgBIAEoCRIPCgdjaGF0X2lkGAIgASgJEhAKCHRlbXBsYXRlGAMgASgJIvUDCglDb25kaXRpb24SFQoRQ09ORElUSU9OX1VOS05PV04QABIXChNDT05ESVRJT05fQU5ZX0VSUk9SEAESHAoYQ09ORElUSU9OX1NOQVBTSE9UX1NUQVJUEAISGgoWQ09ORElUSU9OX1NOQVBTSE9UX0VORBADEhwKGENPTkRJVElPTl9TTkFQU0hPVF9FUlJPUhAEEh4KGkNPTkRJVElPTl9TTkFQU0hPVF9XQVJOSU5HEAUSHgoaQ09ORElUSU9OX1NOQVBTSE9UX1NVQ0NFU1MQBhIeChpDT05ESVRJT05fU05BUFNIT1RfU0tJUFBFRBAHEhkKFUNPTkRJVElPTl9QUlVORV9TVEFSVBBkEhkKFUNPTkRJVElPTl9QUlVORV9FUlJPUhBlEhsKF0NPTkRJVElPTl9QUlVORV9TVUNDRVNTEGYSGgoVQ09ORElUSU9OX0NIRUNLX1NUQVJUEMgBEhoKFUNPTkRJVElPTl9DSEVDS19FUlJPUhDJARIcChdDT05ESVRJT05fQ0hFQ0tfU1VDQ0VTUxDKARIbChZDT05ESVRJT05fRk9SR0VUX1NUQVJUEKwCEhsKFkNPTkRJVElPTl9GT1JHRVRfRVJST1IQrQISHQoYQ09ORElUSU9OX0ZPUkdFVF9TVUNDRVNTEK4CIqkBCgdPbkVycm9yEhMKD09OX0VSUk9SX0lHTk9SRRAAEhMKD09OX0VSUk9SX0NBTkNFTBABEhIKDk9OX0VSUk9SX0ZBVEFMEAISGgoWT05fRVJST1JfUkVUUllfMU1JTlVURRBkEhwKGE9OX0VSUk9SX1JFVFJZXzEwTUlOVVRFUxBlEiYKIk9OX0VSUk9SX1JFVFJZX0VYUE9ORU5USUFMX0JBQ0tPRkYQZ0IICgZhY3Rpb24iMQoEQXV0aBIQCghkaXNhYmxlZBgBIAEoCBIXCgV1c2VycxgCIAMoCzIILnYxLlVzZXIiOwoEVXNlchIMCgRuYW1lGAEgASgJEhkKD3Bhc3N3b3JkX2JjcnlwdBgCIAEoCUgAQgoKCHBhc3N3b3JkQixaKmdpdGh1Yi5jb20vZ2FyZXRoZ2VvcmdlL2JhY2tyZXN0L2dlbi9nby92MWIGcHJvdG8z", [file_google_protobuf_empty, file_v1_crypto]); + fileDesc("Cg92MS9jb25maWcucHJvdG8SAnYxIqwBCgZDb25maWcSDQoFbW9kbm8YASABKAUSDwoHdmVyc2lvbhgGIAEoBRIQCghpbnN0YW5jZRgCIAEoCRIXCgVyZXBvcxgDIAMoCzIILnYxLlJlcG8SFwoFcGxhbnMYBCADKAsyCC52MS5QbGFuEhYKBGF1dGgYBSABKAsyCC52MS5BdXRoEiYKCW11bHRpaG9zdBgHIAEoCzINLnYxLk11bHRpaG9zdFIEc3luYyL6BQoJTXVsdGlob3N0EiAKCGlkZW50aXR5GAEgASgLMg4udjEuUHJpdmF0ZUtleRInCgtrbm93bl9ob3N0cxgCIAMoCzISLnYxLk11bHRpaG9zdC5QZWVyEi4KEmF1dGhvcml6ZWRfY2xpZW50cxgDIAMoCzISLnYxLk11bHRpaG9zdC5QZWVyEjIKDnBhaXJpbmdfdG9rZW5zGAQgAygLMhoudjEuTXVsdGlob3N0LlBhaXJpbmdUb2tlbhqcAQoEUGVlchITCgtpbnN0YW5jZV9pZBgBIAEoCRIUCgVrZXlpZBgCIAEoCVIFa2V5SWQSLQoLcGVybWlzc2lvbnMYBSADKAsyGC52MS5NdWx0aWhvc3QuUGVybWlzc2lvbhIUCgxpbnN0YW5jZV91cmwYBCABKAkSHgoWaW5pdGlhbF9wYWlyaW5nX3NlY3JldBgGIAEoCUoECAMQBBquAQoMUGFpcmluZ1Rva2VuEg4KBnNlY3JldBgBIAEoCRINCgVsYWJlbBgCIAEoCRIXCg9jcmVhdGVkX2F0X3VuaXgYAyABKAMSFwoPZXhwaXJlc19hdF91bml4GAQgASgDEhAKCG1heF91c2VzGAUgASgFEgwKBHVzZXMYBiABKAUSLQoLcGVybWlzc2lvbnMYByADKAsyGC52MS5NdWx0aWhvc3QuUGVybWlzc2lvbhrtAQoKUGVybWlzc2lvbhIrCgR0eXBlGAEgASgOMh0udjEuTXVsdGlob3N0LlBlcm1pc3Npb24uVHlwZRIOCgZzY29wZXMYAiADKAkioQEKBFR5cGUSFgoSUEVSTUlTU0lPTl9VTktOT1dOEAASHgoaUEVSTUlTU0lPTl9SRUFEX09QRVJBVElPTlMQARIaChZQRVJNSVNTSU9OX1JFQURfQ09ORklHEAISIAocUEVSTUlTU0lPTl9SRUFEX1dSSVRFX0NPTkZJRxADEiMKH1BFUk1JU1NJT05fUkVDRUlWRV9TSEFSRURfUkVQT1MQBCLwAgoEUmVwbxIKCgJpZBgBIAEoCRILCgN1cmkYAiABKAkSDAoEZ3VpZBgLIAEoCRIQCghwYXNzd29yZBgDIAEoCRILCgNlbnYYBCADKAkSDQoFZmxhZ3MYBSADKAkSJQoMcHJ1bmVfcG9saWN5GAYgASgLMg8udjEuUHJ1bmVQb2xpY3kSJQoMY2hlY2tfcG9saWN5GAkgASgLMg8udjEuQ2hlY2tQb2xpY3kSFwoFaG9va3MYByADKAsyCC52MS5Ib29rEhMKC2F1dG9fdW5sb2NrGAggASgIEhcKD2F1dG9faW5pdGlhbGl6ZRgMIAEoCBIpCg5jb21tYW5kX3ByZWZpeBgKIAEoCzIRLnYxLkNvbW1hbmRQcmVmaXgSDgoGc2hhcmVkGA0gASgIEhoKEm9yaWdpbl9pbnN0YW5jZV9pZBgOIAEoCRInCg1mb3JnZXRfcG9saWN5GA8gASgLMhAudjEuRm9yZ2V0UG9saWN5IoYCCgRQbGFuEgoKAmlkGAEgASgJEgwKBHJlcG8YAiABKAkSDQoFcGF0aHMYBCADKAkSEAoIZXhjbHVkZXMYBSADKAkSEQoJaWV4Y2x1ZGVzGAkgAygJEh4KCHNjaGVkdWxlGAwgASgLMgwudjEuU2NoZWR1bGUSJgoJcmV0ZW50aW9uGAcgASgLMhMudjEuUmV0ZW50aW9uUG9saWN5EhcKBWhvb2tzGAggAygLMggudjEuSG9vaxIiCgxiYWNrdXBfZmxhZ3MYCiADKAlSDGJhY2t1cF9mbGFncxIZChFza2lwX2lmX3VuY2hhbmdlZBgNIAEoCEoECAMQBEoECAYQB0oECAsQDCKKAgoNQ29tbWFuZFByZWZpeBIuCgdpb19uaWNlGAEgASgOMh0udjEuQ29tbWFuZFByZWZpeC5JT05pY2VMZXZlbBIwCghjcHVfbmljZRgCIAEoDjIeLnYxLkNvbW1hbmRQcmVmaXguQ1BVTmljZUxldmVsIlsKC0lPTmljZUxldmVsEg4KCklPX0RFRkFVTFQQABIWChJJT19CRVNUX0VGRk9SVF9MT1cQARIXChNJT19CRVNUX0VGRk9SVF9ISUdIEAISCwoHSU9fSURMRRADIjoKDENQVU5pY2VMZXZlbBIPCgtDUFVfREVGQVVMVBAAEgwKCENQVV9ISUdIEAESCwoHQ1BVX0xPVxACIpcCCg9SZXRlbnRpb25Qb2xpY3kSHAoScG9saWN5X2tlZXBfbGFzdF9uGAogASgFSAASRgoUcG9saWN5X3RpbWVfYnVja2V0ZWQYCyABKAsyJi52MS5SZXRlbnRpb25Qb2xpY3kuVGltZUJ1Y2tldGVkQ291bnRzSAASGQoPcG9saWN5X2tlZXBfYWxsGAwgASgISAAaeQoSVGltZUJ1Y2tldGVkQ291bnRzEg4KBmhvdXJseRgBIAEoBRINCgVkYWlseRgCIAEoBRIOCgZ3ZWVrbHkYAyABKAUSDwoHbW9udGhseRgEIAEoBRIOCgZ5ZWFybHkYBSABKAUSEwoLa2VlcF9sYXN0X24YBiABKAVCCAoGcG9saWN5IlYKDEZvcmdldFBvbGljeRIeCghzY2hlZHVsZRgBIAEoCzIMLnYxLlNjaGVkdWxlEiYKCXJldGVudGlvbhgCIAEoCzITLnYxLlJldGVudGlvblBvbGljeSJjCgtQcnVuZVBvbGljeRIeCghzY2hlZHVsZRgCIAEoCzIMLnYxLlNjaGVkdWxlEhgKEG1heF91bnVzZWRfYnl0ZXMYAyABKAMSGgoSbWF4X3VudXNlZF9wZXJjZW50GAQgASgBInMKC0NoZWNrUG9saWN5Eh4KCHNjaGVkdWxlGAEgASgLMgwudjEuU2NoZWR1bGUSGAoOc3RydWN0dXJlX29ubHkYZCABKAhIABIiChhyZWFkX2RhdGFfc3Vic2V0X3BlcmNlbnQYZSABKAFIAEIGCgRtb2RlIusBCghTY2hlZHVsZRISCghkaXNhYmxlZBgBIAEoCEgAEg4KBGNyb24YAiABKAlIABIaChBtYXhGcmVxdWVuY3lEYXlzGAMgASgFSAASGwoRbWF4RnJlcXVlbmN5SG91cnMYBCABKAVIABIhCgVjbG9jaxgFIAEoDjISLnYxLlNjaGVkdWxlLkNsb2NrIlMKBUNsb2NrEhEKDUNMT0NLX0RFRkFVTFQQABIPCgtDTE9DS19MT0NBTBABEg0KCUNMT0NLX1VUQxACEhcKE0NMT0NLX0xBU1RfUlVOX1RJTUUQA0IKCghzY2hlZHVsZSKADQoESG9vaxImCgpjb25kaXRpb25zGAEgAygOMhIudjEuSG9vay5Db25kaXRpb24SIgoIb25fZXJyb3IYAiABKA4yEC52MS5Ib29rLk9uRXJyb3ISKgoOYWN0aW9uX2NvbW1hbmQYZCABKAsyEC52MS5Ib29rLkNvbW1hbmRIABIqCg5hY3Rpb25fd2ViaG9vaxhlIAEoCzIQLnYxLkhvb2suV2ViaG9va0gAEioKDmFjdGlvbl9kaXNjb3JkGGYgASgLMhAudjEuSG9vay5EaXNjb3JkSAASKAoNYWN0aW9uX2dvdGlmeRhnIAEoCzIPLnYxLkhvb2suR290aWZ5SAASJgoMYWN0aW9uX3NsYWNrGGggASgLMg4udjEuSG9vay5TbGFja0gAEiwKD2FjdGlvbl9zaG91dHJychhpIAEoCzIRLnYxLkhvb2suU2hvdXRycnJIABI0ChNhY3Rpb25faGVhbHRoY2hlY2tzGGogASgLMhUudjEuSG9vay5IZWFsdGhjaGVja3NIABIsCg9hY3Rpb25fdGVsZWdyYW0YayABKAsyES52MS5Ib29rLlRlbGVncmFtSAAaGgoHQ29tbWFuZBIPCgdjb21tYW5kGAEgASgJGoMBCgdXZWJob29rEhMKC3dlYmhvb2tfdXJsGAEgASgJEicKBm1ldGhvZBgCIAEoDjIXLnYxLkhvb2suV2ViaG9vay5NZXRob2QSEAoIdGVtcGxhdGUYZCABKAkiKAoGTWV0aG9kEgsKB1VOS05PV04QABIHCgNHRVQQARIICgRQT1NUEAIaMAoHRGlzY29yZBITCgt3ZWJob29rX3VybBgBIAEoCRIQCgh0ZW1wbGF0ZRgCIAEoCRplCgZHb3RpZnkSEAoIYmFzZV91cmwYASABKAkSDQoFdG9rZW4YAyABKAkSEAoIdGVtcGxhdGUYZCABKAkSFgoOdGl0bGVfdGVtcGxhdGUYZSABKAkSEAoIcHJpb3JpdHkYZiABKAUaLgoFU2xhY2sSEwoLd2ViaG9va191cmwYASABKAkSEAoIdGVtcGxhdGUYAiABKAkaMgoIU2hvdXRycnISFAoMc2hvdXRycnJfdXJsGAEgASgJEhAKCHRlbXBsYXRlGAIgASgJGjUKDEhlYWx0aGNoZWNrcxITCgt3ZWJob29rX3VybBgBIAEoCRIQCgh0ZW1wbGF0ZRgCIAEoCRpACghUZWxlZ3JhbRIRCglib3RfdG9rZW4YASABKAkSDwoHY2hhdF9pZBgCIAEoCRIQCgh0ZW1wbGF0ZRgDIAEoCSL1AwoJQ29uZGl0aW9uEhUKEUNPTkRJVElPTl9VTktOT1dOEAASFwoTQ09ORElUSU9OX0FOWV9FUlJPUhABEhwKGENPTkRJVElPTl9TTkFQU0hPVF9TVEFSVBACEhoKFkNPTkRJVElPTl9TTkFQU0hPVF9FTkQQAxIcChhDT05ESVRJT05fU05BUFNIT1RfRVJST1IQBBIeChpDT05ESVRJT05fU05BUFNIT1RfV0FSTklORxAFEh4KGkNPTkRJVElPTl9TTkFQU0hPVF9TVUNDRVNTEAYSHgoaQ09ORElUSU9OX1NOQVBTSE9UX1NLSVBQRUQQBxIZChVDT05ESVRJT05fUFJVTkVfU1RBUlQQZBIZChVDT05ESVRJT05fUFJVTkVfRVJST1IQZRIbChdDT05ESVRJT05fUFJVTkVfU1VDQ0VTUxBmEhoKFUNPTkRJVElPTl9DSEVDS19TVEFSVBDIARIaChVDT05ESVRJT05fQ0hFQ0tfRVJST1IQyQESHAoXQ09ORElUSU9OX0NIRUNLX1NVQ0NFU1MQygESGwoWQ09ORElUSU9OX0ZPUkdFVF9TVEFSVBCsAhIbChZDT05ESVRJT05fRk9SR0VUX0VSUk9SEK0CEh0KGENPTkRJVElPTl9GT1JHRVRfU1VDQ0VTUxCuAiKpAQoHT25FcnJvchITCg9PTl9FUlJPUl9JR05PUkUQABITCg9PTl9FUlJPUl9DQU5DRUwQARISCg5PTl9FUlJPUl9GQVRBTBACEhoKFk9OX0VSUk9SX1JFVFJZXzFNSU5VVEUQZBIcChhPTl9FUlJPUl9SRVRSWV8xME1JTlVURVMQZRImCiJPTl9FUlJPUl9SRVRSWV9FWFBPTkVOVElBTF9CQUNLT0ZGEGdCCAoGYWN0aW9uIjEKBEF1dGgSEAoIZGlzYWJsZWQYASABKAgSFwoFdXNlcnMYAiADKAsyCC52MS5Vc2VyIjsKBFVzZXISDAoEbmFtZRgBIAEoCRIZCg9wYXNzd29yZF9iY3J5cHQYAiABKAlIAEIKCghwYXNzd29yZEIsWipnaXRodWIuY29tL2dhcmV0aGdlb3JnZS9iYWNrcmVzdC9nZW4vZ28vdjFiBnByb3RvMw", [file_google_protobuf_empty, file_v1_crypto]); /** * Config is the top level config object for restic UI. @@ -89,6 +89,13 @@ export type Multihost = Message<"v1.Multihost"> & { * @generated from field: repeated v1.Multihost.Peer authorized_clients = 3; */ authorizedClients: Multihost_Peer[]; + + /** + * active pairing tokens generated by this instance (server-side only) + * + * @generated from field: repeated v1.Multihost.PairingToken pairing_tokens = 4; + */ + pairingTokens: Multihost_PairingToken[]; }; /** @@ -116,13 +123,6 @@ export type Multihost_Peer = Message<"v1.Multihost.Peer"> & { */ keyid: string; - /** - * marks whether the key ID was visually verified by the user, this must be done for authorized clients. Not required for known hosts but recommended. - * - * @generated from field: bool keyid_verified = 3 [json_name = "keyIdVerified"]; - */ - keyidVerified: boolean; - /** * permissions granted to this peer. * @@ -138,6 +138,13 @@ export type Multihost_Peer = Message<"v1.Multihost.Peer"> & { * @generated from field: string instance_url = 4; */ instanceUrl: string; + + /** + * one-time pairing secret sent during first handshake to auto-authorize with the server. Cleared after successful pairing. + * + * @generated from field: string initial_pairing_secret = 6; + */ + initialPairingSecret: string; }; /** @@ -147,6 +154,67 @@ export type Multihost_Peer = Message<"v1.Multihost.Peer"> & { export const Multihost_PeerSchema: GenMessage = /*@__PURE__*/ messageDesc(file_v1_config, 1, 0); +/** + * @generated from message v1.Multihost.PairingToken + */ +export type Multihost_PairingToken = Message<"v1.Multihost.PairingToken"> & { + /** + * the one-time secret used to validate the pairing request + * + * @generated from field: string secret = 1; + */ + secret: string; + + /** + * human-readable label for this token + * + * @generated from field: string label = 2; + */ + label: string; + + /** + * unix timestamp when the token was created + * + * @generated from field: int64 created_at_unix = 3; + */ + createdAtUnix: bigint; + + /** + * unix timestamp when the token expires + * + * @generated from field: int64 expires_at_unix = 4; + */ + expiresAtUnix: bigint; + + /** + * maximum number of clients that can pair with this token, 0 means unlimited + * + * @generated from field: int32 max_uses = 5; + */ + maxUses: number; + + /** + * number of times this token has been used + * + * @generated from field: int32 uses = 6; + */ + uses: number; + + /** + * permissions granted to clients that pair with this token + * + * @generated from field: repeated v1.Multihost.Permission permissions = 7; + */ + permissions: Multihost_Permission[]; +}; + +/** + * Describes the message v1.Multihost.PairingToken. + * Use `create(Multihost_PairingTokenSchema)` to create a new message. + */ +export const Multihost_PairingTokenSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_v1_config, 1, 1); + /** * @generated from message v1.Multihost.Permission */ @@ -171,7 +239,7 @@ export type Multihost_Permission = Message<"v1.Multihost.Permission"> & { * Use `create(Multihost_PermissionSchema)` to create a new message. */ export const Multihost_PermissionSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1_config, 1, 1); + messageDesc(file_v1_config, 1, 2); /** * @generated from enum v1.Multihost.Permission.Type @@ -208,13 +276,21 @@ export enum Multihost_Permission_Type { * @generated from enum value: PERMISSION_READ_WRITE_CONFIG = 3; */ PERMISSION_READ_WRITE_CONFIG = 3, + + /** + * When granted to an authorizedClient, the server will push repos marked as 'shared' to the client. + * This permission does not use scopes — if present, all shared repos are pushed. + * + * @generated from enum value: PERMISSION_RECEIVE_SHARED_REPOS = 4; + */ + PERMISSION_RECEIVE_SHARED_REPOS = 4, } /** * Describes the enum v1.Multihost.Permission.Type. */ export const Multihost_Permission_TypeSchema: GenEnum = /*@__PURE__*/ - enumDesc(file_v1_config, 1, 1, 0); + enumDesc(file_v1_config, 1, 2, 0); /** * @generated from message v1.Repo @@ -303,6 +379,27 @@ export type Repo = Message<"v1.Repo"> & { * @generated from field: v1.CommandPrefix command_prefix = 10; */ commandPrefix?: CommandPrefix; + + /** + * if true, this repo is pushed to all authorized clients with read-config permission + * + * @generated from field: bool shared = 13; + */ + shared: boolean; + + /** + * set when this repo was pushed from a remote instance; marks it as non-editable + * + * @generated from field: string origin_instance_id = 14; + */ + originInstanceId: string; + + /** + * optional repo-level forget policy. If set, overrides per-plan retention policies. + * + * @generated from field: v1.ForgetPolicy forget_policy = 15; + */ + forgetPolicy?: ForgetPolicy; }; /** @@ -566,6 +663,28 @@ export type RetentionPolicy_TimeBucketedCounts = Message<"v1.RetentionPolicy.Tim export const RetentionPolicy_TimeBucketedCountsSchema: GenMessage = /*@__PURE__*/ messageDesc(file_v1_config, 5, 0); +/** + * @generated from message v1.ForgetPolicy + */ +export type ForgetPolicy = Message<"v1.ForgetPolicy"> & { + /** + * @generated from field: v1.Schedule schedule = 1; + */ + schedule?: Schedule; + + /** + * @generated from field: v1.RetentionPolicy retention = 2; + */ + retention?: RetentionPolicy; +}; + +/** + * Describes the message v1.ForgetPolicy. + * Use `create(ForgetPolicySchema)` to create a new message. + */ +export const ForgetPolicySchema: GenMessage = /*@__PURE__*/ + messageDesc(file_v1_config, 6); + /** * @generated from message v1.PrunePolicy */ @@ -595,7 +714,7 @@ export type PrunePolicy = Message<"v1.PrunePolicy"> & { * Use `create(PrunePolicySchema)` to create a new message. */ export const PrunePolicySchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1_config, 6); + messageDesc(file_v1_config, 7); /** * @generated from message v1.CheckPolicy @@ -633,7 +752,7 @@ export type CheckPolicy = Message<"v1.CheckPolicy"> & { * Use `create(CheckPolicySchema)` to create a new message. */ export const CheckPolicySchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1_config, 7); + messageDesc(file_v1_config, 8); /** * @generated from message v1.Schedule @@ -689,7 +808,7 @@ export type Schedule = Message<"v1.Schedule"> & { * Use `create(ScheduleSchema)` to create a new message. */ export const ScheduleSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1_config, 8); + messageDesc(file_v1_config, 9); /** * @generated from enum v1.Schedule.Clock @@ -722,7 +841,7 @@ export enum Schedule_Clock { * Describes the enum v1.Schedule.Clock. */ export const Schedule_ClockSchema: GenEnum = /*@__PURE__*/ - enumDesc(file_v1_config, 8, 0); + enumDesc(file_v1_config, 9, 0); /** * @generated from message v1.Hook @@ -797,7 +916,7 @@ export type Hook = Message<"v1.Hook"> & { * Use `create(HookSchema)` to create a new message. */ export const HookSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1_config, 9); + messageDesc(file_v1_config, 10); /** * @generated from message v1.Hook.Command @@ -814,7 +933,7 @@ export type Hook_Command = Message<"v1.Hook.Command"> & { * Use `create(Hook_CommandSchema)` to create a new message. */ export const Hook_CommandSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1_config, 9, 0); + messageDesc(file_v1_config, 10, 0); /** * @generated from message v1.Hook.Webhook @@ -841,7 +960,7 @@ export type Hook_Webhook = Message<"v1.Hook.Webhook"> & { * Use `create(Hook_WebhookSchema)` to create a new message. */ export const Hook_WebhookSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1_config, 9, 1); + messageDesc(file_v1_config, 10, 1); /** * @generated from enum v1.Hook.Webhook.Method @@ -867,7 +986,7 @@ export enum Hook_Webhook_Method { * Describes the enum v1.Hook.Webhook.Method. */ export const Hook_Webhook_MethodSchema: GenEnum = /*@__PURE__*/ - enumDesc(file_v1_config, 9, 1, 0); + enumDesc(file_v1_config, 10, 1, 0); /** * @generated from message v1.Hook.Discord @@ -891,7 +1010,7 @@ export type Hook_Discord = Message<"v1.Hook.Discord"> & { * Use `create(Hook_DiscordSchema)` to create a new message. */ export const Hook_DiscordSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1_config, 9, 2); + messageDesc(file_v1_config, 10, 2); /** * @generated from message v1.Hook.Gotify @@ -934,7 +1053,7 @@ export type Hook_Gotify = Message<"v1.Hook.Gotify"> & { * Use `create(Hook_GotifySchema)` to create a new message. */ export const Hook_GotifySchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1_config, 9, 3); + messageDesc(file_v1_config, 10, 3); /** * @generated from message v1.Hook.Slack @@ -958,7 +1077,7 @@ export type Hook_Slack = Message<"v1.Hook.Slack"> & { * Use `create(Hook_SlackSchema)` to create a new message. */ export const Hook_SlackSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1_config, 9, 4); + messageDesc(file_v1_config, 10, 4); /** * @generated from message v1.Hook.Shoutrrr @@ -980,7 +1099,7 @@ export type Hook_Shoutrrr = Message<"v1.Hook.Shoutrrr"> & { * Use `create(Hook_ShoutrrrSchema)` to create a new message. */ export const Hook_ShoutrrrSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1_config, 9, 5); + messageDesc(file_v1_config, 10, 5); /** * @generated from message v1.Hook.Healthchecks @@ -1002,7 +1121,7 @@ export type Hook_Healthchecks = Message<"v1.Hook.Healthchecks"> & { * Use `create(Hook_HealthchecksSchema)` to create a new message. */ export const Hook_HealthchecksSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1_config, 9, 6); + messageDesc(file_v1_config, 10, 6); /** * @generated from message v1.Hook.Telegram @@ -1031,7 +1150,7 @@ export type Hook_Telegram = Message<"v1.Hook.Telegram"> & { * Use `create(Hook_TelegramSchema)` to create a new message. */ export const Hook_TelegramSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1_config, 9, 7); + messageDesc(file_v1_config, 10, 7); /** * @generated from enum v1.Hook.Condition @@ -1165,7 +1284,7 @@ export enum Hook_Condition { * Describes the enum v1.Hook.Condition. */ export const Hook_ConditionSchema: GenEnum = /*@__PURE__*/ - enumDesc(file_v1_config, 9, 0); + enumDesc(file_v1_config, 10, 0); /** * @generated from enum v1.Hook.OnError @@ -1216,7 +1335,7 @@ export enum Hook_OnError { * Describes the enum v1.Hook.OnError. */ export const Hook_OnErrorSchema: GenEnum = /*@__PURE__*/ - enumDesc(file_v1_config, 9, 1); + enumDesc(file_v1_config, 10, 1); /** * @generated from message v1.Auth @@ -1242,7 +1361,7 @@ export type Auth = Message<"v1.Auth"> & { * Use `create(AuthSchema)` to create a new message. */ export const AuthSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1_config, 10); + messageDesc(file_v1_config, 11); /** * @generated from message v1.User @@ -1270,5 +1389,5 @@ export type User = Message<"v1.User"> & { * Use `create(UserSchema)` to create a new message. */ export const UserSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1_config, 11); + messageDesc(file_v1_config, 12); diff --git a/webui/gen/ts/v1/crypto_pb.ts b/webui/gen/ts/v1/crypto_pb.ts index 087ccf40..302bdc9b 100644 --- a/webui/gen/ts/v1/crypto_pb.ts +++ b/webui/gen/ts/v1/crypto_pb.ts @@ -1,4 +1,4 @@ -// @generated by protoc-gen-es v2.10.0 with parameter "target=ts" +// @generated by protoc-gen-es v2.10.1 with parameter "target=ts" // @generated from file v1/crypto.proto (package v1, syntax proto3) /* eslint-disable */ @@ -10,7 +10,7 @@ import type { Message } from "@bufbuild/protobuf"; * Describes the file v1/crypto.proto. */ export const file_v1_crypto: GenFile = /*@__PURE__*/ - fileDesc("Cg92MS9jcnlwdG8ucHJvdG8SAnYxIlsKDVNpZ25lZE1lc3NhZ2USDQoFa2V5aWQYASABKAkSDwoHcGF5bG9hZBgCIAEoDBIRCglzaWduYXR1cmUYAyABKAwSFwoPdGltZXN0YW1wTWlsbGlzGAQgASgDIjUKCVB1YmxpY0tleRIUCgVrZXlpZBgBIAEoCVIFa2V5SWQSEgoKZWQyNTUxOXB1YhgCIAEoCSJLCgpQcml2YXRlS2V5EhQKBWtleWlkGAEgASgJUgVrZXlJZBITCgtlZDI1NTE5cHJpdhgCIAEoCRISCgplZDI1NTE5cHViGAMgASgJQixaKmdpdGh1Yi5jb20vZ2FyZXRoZ2VvcmdlL2JhY2tyZXN0L2dlbi9nby92MWIGcHJvdG8z"); + fileDesc("Cg92MS9jcnlwdG8ucHJvdG8SAnYxIlsKDVNpZ25lZE1lc3NhZ2USDQoFa2V5aWQYASABKAkSDwoHcGF5bG9hZBgCIAEoDBIRCglzaWduYXR1cmUYAyABKAwSFwoPdGltZXN0YW1wTWlsbGlzGAQgASgDIjQKCVB1YmxpY0tleRIUCgVrZXlpZBgBIAEoCVIFa2V5SWQSEQoJZWNkc2FfcHViGAIgASgJIkkKClByaXZhdGVLZXkSFAoFa2V5aWQYASABKAlSBWtleUlkEhIKCmVjZHNhX3ByaXYYAiABKAkSEQoJZWNkc2FfcHViGAMgASgJQixaKmdpdGh1Yi5jb20vZ2FyZXRoZ2VvcmdlL2JhY2tyZXN0L2dlbi9nby92MWIGcHJvdG8z"); /** * @generated from message v1.SignedMessage @@ -64,9 +64,9 @@ export type PublicKey = Message<"v1.PublicKey"> & { keyid: string; /** - * @generated from field: string ed25519pub = 2; + * @generated from field: string ecdsa_pub = 2; */ - ed25519pub: string; + ecdsaPub: string; }; /** @@ -88,14 +88,14 @@ export type PrivateKey = Message<"v1.PrivateKey"> & { keyid: string; /** - * @generated from field: string ed25519priv = 2; + * @generated from field: string ecdsa_priv = 2; */ - ed25519priv: string; + ecdsaPriv: string; /** - * @generated from field: string ed25519pub = 3; + * @generated from field: string ecdsa_pub = 3; */ - ed25519pub: string; + ecdsaPub: string; }; /** diff --git a/webui/gen/ts/v1/operations_pb.ts b/webui/gen/ts/v1/operations_pb.ts index f3b81f6a..2547f6b9 100644 --- a/webui/gen/ts/v1/operations_pb.ts +++ b/webui/gen/ts/v1/operations_pb.ts @@ -1,4 +1,4 @@ -// @generated by protoc-gen-es v2.10.0 with parameter "target=ts" +// @generated by protoc-gen-es v2.10.1 with parameter "target=ts" // @generated from file v1/operations.proto (package v1, syntax proto3) /* eslint-disable */ diff --git a/webui/gen/ts/v1/restic_pb.ts b/webui/gen/ts/v1/restic_pb.ts index 90133e65..251e560c 100644 --- a/webui/gen/ts/v1/restic_pb.ts +++ b/webui/gen/ts/v1/restic_pb.ts @@ -1,4 +1,4 @@ -// @generated by protoc-gen-es v2.10.0 with parameter "target=ts" +// @generated by protoc-gen-es v2.10.1 with parameter "target=ts" // @generated from file v1/restic.proto (package v1, syntax proto3) /* eslint-disable */ diff --git a/webui/gen/ts/v1/service_pb.ts b/webui/gen/ts/v1/service_pb.ts index e2117f47..56c2394b 100644 --- a/webui/gen/ts/v1/service_pb.ts +++ b/webui/gen/ts/v1/service_pb.ts @@ -1,10 +1,10 @@ -// @generated by protoc-gen-es v2.10.0 with parameter "target=ts" +// @generated by protoc-gen-es v2.10.1 with parameter "target=ts" // @generated from file v1/service.proto (package v1, syntax proto3) /* eslint-disable */ import type { GenEnum, GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv2"; import { enumDesc, fileDesc, messageDesc, serviceDesc } from "@bufbuild/protobuf/codegenv2"; -import type { ConfigSchema, Repo } from "./config_pb"; +import type { ConfigSchema, Multihost_Permission, Repo } from "./config_pb"; import { file_v1_config } from "./config_pb"; import type { ResticSnapshotListSchema } from "./restic_pb"; import { file_v1_restic } from "./restic_pb"; @@ -21,7 +21,7 @@ import type { Message } from "@bufbuild/protobuf"; * Describes the file v1/service.proto. */ export const file_v1_service: GenFile = /*@__PURE__*/ - fileDesc("ChB2MS9zZXJ2aWNlLnByb3RvEgJ2MSIvCg1CYWNrdXBSZXF1ZXN0Eg0KBXZhbHVlGAEgASgJEg8KB2RyeV9ydW4YAiABKAgivwIKCk9wU2VsZWN0b3ISCwoDaWRzGAEgAygDEhgKC2luc3RhbmNlX2lkGAYgASgJSACIAQESJAoXb3JpZ2luYWxfaW5zdGFuY2Vfa2V5aWQYCCABKAlIAYgBARIWCglyZXBvX2d1aWQYByABKAlIAogBARIUCgdwbGFuX2lkGAMgASgJSAOIAQESGAoLc25hcHNob3RfaWQYBCABKAlIBIgBARIUCgdmbG93X2lkGAUgASgDSAWIAQESFgoJbW9kbm9fZ3RlGAkgASgDSAaIAQFCDgoMX2luc3RhbmNlX2lkQhoKGF9vcmlnaW5hbF9pbnN0YW5jZV9rZXlpZEIMCgpfcmVwb19ndWlkQgoKCF9wbGFuX2lkQg4KDF9zbmFwc2hvdF9pZEIKCghfZmxvd19pZEIMCgpfbW9kbm9fZ3RlImQKEFNldHVwU2Z0cFJlcXVlc3QSDAoEaG9zdBgBIAEoCRIMCgRwb3J0GAIgASgJEhAKCHVzZXJuYW1lGAMgASgJEhUKCHBhc3N3b3JkGAQgASgJSACIAQFCCwoJX3Bhc3N3b3JkImIKEVNldHVwU2Z0cFJlc3BvbnNlEhIKCnB1YmxpY19rZXkYASABKAkSEAoIa2V5X3BhdGgYAiABKAkSGAoQa25vd25faG9zdHNfcGF0aBgDIAEoCRINCgVlcnJvchgEIAEoCSIwChZDaGVja1JlcG9FeGlzdHNSZXF1ZXN0EhYKBHJlcG8YASABKAsyCC52MS5SZXBvIlQKF0NoZWNrUmVwb0V4aXN0c1Jlc3BvbnNlEg4KBmV4aXN0cxgBIAEoCBINCgVlcnJvchgCIAEoCRIaChJob3N0X2tleV91bnRydXN0ZWQYBSABKAgiKAoOQWRkUmVwb1JlcXVlc3QSFgoEcmVwbxgBIAEoCzIILnYxLlJlcG8iwAEKEURvUmVwb1Rhc2tSZXF1ZXN0Eg8KB3JlcG9faWQYASABKAkSKAoEdGFzaxgCIAEoDjIaLnYxLkRvUmVwb1Rhc2tSZXF1ZXN0LlRhc2sicAoEVGFzaxINCglUQVNLX05PTkUQABIYChRUQVNLX0lOREVYX1NOQVBTSE9UUxABEg4KClRBU0tfUFJVTkUQAhIOCgpUQVNLX0NIRUNLEAMSDgoKVEFTS19TVEFUUxAEEg8KC1RBU0tfVU5MT0NLEAUiTAoTQ2xlYXJIaXN0b3J5UmVxdWVzdBIgCghzZWxlY3RvchgBIAEoCzIOLnYxLk9wU2VsZWN0b3ISEwoLb25seV9mYWlsZWQYAiABKAgiRgoNRm9yZ2V0UmVxdWVzdBIPCgdyZXBvX2lkGAEgASgJEg8KB3BsYW5faWQYAiABKAkSEwoLc25hcHNob3RfaWQYAyABKAkiOAoUTGlzdFNuYXBzaG90c1JlcXVlc3QSDwoHcmVwb19pZBgBIAEoCRIPCgdwbGFuX2lkGAIgASgJIkgKFEdldE9wZXJhdGlvbnNSZXF1ZXN0EiAKCHNlbGVjdG9yGAEgASgLMg4udjEuT3BTZWxlY3RvchIOCgZsYXN0X24YAiABKAMibQoWUmVzdG9yZVNuYXBzaG90UmVxdWVzdBIPCgdwbGFuX2lkGAEgASgJEg8KB3JlcG9faWQYBSABKAkSEwoLc25hcHNob3RfaWQYAiABKAkSDAoEcGF0aBgDIAEoCRIOCgZ0YXJnZXQYBCABKAkiUAoYTGlzdFNuYXBzaG90RmlsZXNSZXF1ZXN0EhEKCXJlcG9fZ3VpZBgBIAEoCRITCgtzbmFwc2hvdF9pZBgCIAEoCRIMCgRwYXRoGAMgASgJIkcKGUxpc3RTbmFwc2hvdEZpbGVzUmVzcG9uc2USDAoEcGF0aBgBIAEoCRIcCgdlbnRyaWVzGAIgAygLMgsudjEuTHNFbnRyeSIdCg5Mb2dEYXRhUmVxdWVzdBILCgNyZWYYASABKAkiOQoVR2V0RG93bmxvYWRVUkxSZXF1ZXN0Eg0KBW9wX2lkGAEgASgDEhEKCWZpbGVfcGF0aBgCIAEoCSKWAQoHTHNFbnRyeRIMCgRuYW1lGAEgASgJEgwKBHR5cGUYAiABKAkSDAoEcGF0aBgDIAEoCRILCgN1aWQYBCABKAMSCwoDZ2lkGAUgASgDEgwKBHNpemUYBiABKAMSDAoEbW9kZRgHIAEoAxINCgVtdGltZRgIIAEoCRINCgVhdGltZRgJIAEoCRINCgVjdGltZRgKIAEoCSI1ChFSdW5Db21tYW5kUmVxdWVzdBIPCgdyZXBvX2lkGAEgASgJEg8KB2NvbW1hbmQYAiABKAkitQUKGFN1bW1hcnlEYXNoYm9hcmRSZXNwb25zZRI8Cg5yZXBvX3N1bW1hcmllcxgBIAMoCzIkLnYxLlN1bW1hcnlEYXNoYm9hcmRSZXNwb25zZS5TdW1tYXJ5EjwKDnBsYW5fc3VtbWFyaWVzGAIgAygLMiQudjEuU3VtbWFyeURhc2hib2FyZFJlc3BvbnNlLlN1bW1hcnkSEwoLY29uZmlnX3BhdGgYCiABKAkSEQoJZGF0YV9wYXRoGAsgASgJGu4CCgdTdW1tYXJ5EgoKAmlkGAEgASgJEh0KFWJhY2t1cHNfZmFpbGVkXzMwZGF5cxgCIAEoAxIjChtiYWNrdXBzX3dhcm5pbmdfbGFzdF8zMGRheXMYAyABKAMSIwobYmFja3Vwc19zdWNjZXNzX2xhc3RfMzBkYXlzGAQgASgDEiEKGWJ5dGVzX3NjYW5uZWRfbGFzdF8zMGRheXMYBSABKAMSHwoXYnl0ZXNfYWRkZWRfbGFzdF8zMGRheXMYBiABKAMSFwoPdG90YWxfc25hcHNob3RzGAcgASgDEhkKEWJ5dGVzX3NjYW5uZWRfYXZnGAggASgDEhcKD2J5dGVzX2FkZGVkX2F2ZxgJIAEoAxIbChNuZXh0X2JhY2t1cF90aW1lX21zGAogASgDEkAKDnJlY2VudF9iYWNrdXBzGAsgASgLMigudjEuU3VtbWFyeURhc2hib2FyZFJlc3BvbnNlLkJhY2t1cENoYXJ0GoMBCgtCYWNrdXBDaGFydBIPCgdmbG93X2lkGAEgAygDEhQKDHRpbWVzdGFtcF9tcxgCIAMoAxITCgtkdXJhdGlvbl9tcxgDIAMoAxIjCgZzdGF0dXMYBCADKA4yEy52MS5PcGVyYXRpb25TdGF0dXMSEwoLYnl0ZXNfYWRkZWQYBSADKAMykQoKCEJhY2tyZXN0EjEKCUdldENvbmZpZxIWLmdvb2dsZS5wcm90b2J1Zi5FbXB0eRoKLnYxLkNvbmZpZyIAEiUKCVNldENvbmZpZxIKLnYxLkNvbmZpZxoKLnYxLkNvbmZpZyIAEjoKCVNldHVwU2Z0cBIULnYxLlNldHVwU2Z0cFJlcXVlc3QaFS52MS5TZXR1cFNmdHBSZXNwb25zZSIAEkwKD0NoZWNrUmVwb0V4aXN0cxIaLnYxLkNoZWNrUmVwb0V4aXN0c1JlcXVlc3QaGy52MS5DaGVja1JlcG9FeGlzdHNSZXNwb25zZSIAEisKB0FkZFJlcG8SEi52MS5BZGRSZXBvUmVxdWVzdBoKLnYxLkNvbmZpZyIAEi4KClJlbW92ZVJlcG8SEi50eXBlcy5TdHJpbmdWYWx1ZRoKLnYxLkNvbmZpZyIAEkQKEkdldE9wZXJhdGlvbkV2ZW50cxIWLmdvb2dsZS5wcm90b2J1Zi5FbXB0eRoSLnYxLk9wZXJhdGlvbkV2ZW50IgAwARI+Cg1HZXRPcGVyYXRpb25zEhgudjEuR2V0T3BlcmF0aW9uc1JlcXVlc3QaES52MS5PcGVyYXRpb25MaXN0IgASQwoNTGlzdFNuYXBzaG90cxIYLnYxLkxpc3RTbmFwc2hvdHNSZXF1ZXN0GhYudjEuUmVzdGljU25hcHNob3RMaXN0IgASUgoRTGlzdFNuYXBzaG90RmlsZXMSHC52MS5MaXN0U25hcHNob3RGaWxlc1JlcXVlc3QaHS52MS5MaXN0U25hcHNob3RGaWxlc1Jlc3BvbnNlIgASNQoGQmFja3VwEhEudjEuQmFja3VwUmVxdWVzdBoWLmdvb2dsZS5wcm90b2J1Zi5FbXB0eSIAEj0KCkRvUmVwb1Rhc2sSFS52MS5Eb1JlcG9UYXNrUmVxdWVzdBoWLmdvb2dsZS5wcm90b2J1Zi5FbXB0eSIAEjUKBkZvcmdldBIRLnYxLkZvcmdldFJlcXVlc3QaFi5nb29nbGUucHJvdG9idWYuRW1wdHkiABI/CgdSZXN0b3JlEhoudjEuUmVzdG9yZVNuYXBzaG90UmVxdWVzdBoWLmdvb2dsZS5wcm90b2J1Zi5FbXB0eSIAEjUKBkNhbmNlbBIRLnR5cGVzLkludDY0VmFsdWUaFi5nb29nbGUucHJvdG9idWYuRW1wdHkiABI0CgdHZXRMb2dzEhIudjEuTG9nRGF0YVJlcXVlc3QaES50eXBlcy5CeXRlc1ZhbHVlIgAwARI4CgpSdW5Db21tYW5kEhUudjEuUnVuQ29tbWFuZFJlcXVlc3QaES50eXBlcy5JbnQ2NFZhbHVlIgASQQoOR2V0RG93bmxvYWRVUkwSGS52MS5HZXREb3dubG9hZFVSTFJlcXVlc3QaEi50eXBlcy5TdHJpbmdWYWx1ZSIAEkEKDENsZWFySGlzdG9yeRIXLnYxLkNsZWFySGlzdG9yeVJlcXVlc3QaFi5nb29nbGUucHJvdG9idWYuRW1wdHkiABI7ChBQYXRoQXV0b2NvbXBsZXRlEhIudHlwZXMuU3RyaW5nVmFsdWUaES50eXBlcy5TdHJpbmdMaXN0IgASTQoTR2V0U3VtbWFyeURhc2hib2FyZBIWLmdvb2dsZS5wcm90b2J1Zi5FbXB0eRocLnYxLlN1bW1hcnlEYXNoYm9hcmRSZXNwb25zZSIAQixaKmdpdGh1Yi5jb20vZ2FyZXRoZ2VvcmdlL2JhY2tyZXN0L2dlbi9nby92MWIGcHJvdG8z", [file_v1_config, file_v1_restic, file_v1_operations, file_types_value, file_google_protobuf_empty, file_google_api_annotations]); + fileDesc("ChB2MS9zZXJ2aWNlLnByb3RvEgJ2MSIvCg1CYWNrdXBSZXF1ZXN0Eg0KBXZhbHVlGAEgASgJEg8KB2RyeV9ydW4YAiABKAgivwIKCk9wU2VsZWN0b3ISCwoDaWRzGAEgAygDEhgKC2luc3RhbmNlX2lkGAYgASgJSACIAQESJAoXb3JpZ2luYWxfaW5zdGFuY2Vfa2V5aWQYCCABKAlIAYgBARIWCglyZXBvX2d1aWQYByABKAlIAogBARIUCgdwbGFuX2lkGAMgASgJSAOIAQESGAoLc25hcHNob3RfaWQYBCABKAlIBIgBARIUCgdmbG93X2lkGAUgASgDSAWIAQESFgoJbW9kbm9fZ3RlGAkgASgDSAaIAQFCDgoMX2luc3RhbmNlX2lkQhoKGF9vcmlnaW5hbF9pbnN0YW5jZV9rZXlpZEIMCgpfcmVwb19ndWlkQgoKCF9wbGFuX2lkQg4KDF9zbmFwc2hvdF9pZEIKCghfZmxvd19pZEIMCgpfbW9kbm9fZ3RlImQKEFNldHVwU2Z0cFJlcXVlc3QSDAoEaG9zdBgBIAEoCRIMCgRwb3J0GAIgASgJEhAKCHVzZXJuYW1lGAMgASgJEhUKCHBhc3N3b3JkGAQgASgJSACIAQFCCwoJX3Bhc3N3b3JkImIKEVNldHVwU2Z0cFJlc3BvbnNlEhIKCnB1YmxpY19rZXkYASABKAkSEAoIa2V5X3BhdGgYAiABKAkSGAoQa25vd25faG9zdHNfcGF0aBgDIAEoCRINCgVlcnJvchgEIAEoCSIwChZDaGVja1JlcG9FeGlzdHNSZXF1ZXN0EhYKBHJlcG8YASABKAsyCC52MS5SZXBvIlQKF0NoZWNrUmVwb0V4aXN0c1Jlc3BvbnNlEg4KBmV4aXN0cxgBIAEoCBINCgVlcnJvchgCIAEoCRIaChJob3N0X2tleV91bnRydXN0ZWQYBSABKAgiKAoOQWRkUmVwb1JlcXVlc3QSFgoEcmVwbxgBIAEoCzIILnYxLlJlcG8iwAEKEURvUmVwb1Rhc2tSZXF1ZXN0Eg8KB3JlcG9faWQYASABKAkSKAoEdGFzaxgCIAEoDjIaLnYxLkRvUmVwb1Rhc2tSZXF1ZXN0LlRhc2sicAoEVGFzaxINCglUQVNLX05PTkUQABIYChRUQVNLX0lOREVYX1NOQVBTSE9UUxABEg4KClRBU0tfUFJVTkUQAhIOCgpUQVNLX0NIRUNLEAMSDgoKVEFTS19TVEFUUxAEEg8KC1RBU0tfVU5MT0NLEAUiTAoTQ2xlYXJIaXN0b3J5UmVxdWVzdBIgCghzZWxlY3RvchgBIAEoCzIOLnYxLk9wU2VsZWN0b3ISEwoLb25seV9mYWlsZWQYAiABKAgiRgoNRm9yZ2V0UmVxdWVzdBIPCgdyZXBvX2lkGAEgASgJEg8KB3BsYW5faWQYAiABKAkSEwoLc25hcHNob3RfaWQYAyABKAkiOAoUTGlzdFNuYXBzaG90c1JlcXVlc3QSDwoHcmVwb19pZBgBIAEoCRIPCgdwbGFuX2lkGAIgASgJIkgKFEdldE9wZXJhdGlvbnNSZXF1ZXN0EiAKCHNlbGVjdG9yGAEgASgLMg4udjEuT3BTZWxlY3RvchIOCgZsYXN0X24YAiABKAMibQoWUmVzdG9yZVNuYXBzaG90UmVxdWVzdBIPCgdwbGFuX2lkGAEgASgJEg8KB3JlcG9faWQYBSABKAkSEwoLc25hcHNob3RfaWQYAiABKAkSDAoEcGF0aBgDIAEoCRIOCgZ0YXJnZXQYBCABKAkiUAoYTGlzdFNuYXBzaG90RmlsZXNSZXF1ZXN0EhEKCXJlcG9fZ3VpZBgBIAEoCRITCgtzbmFwc2hvdF9pZBgCIAEoCRIMCgRwYXRoGAMgASgJIkcKGUxpc3RTbmFwc2hvdEZpbGVzUmVzcG9uc2USDAoEcGF0aBgBIAEoCRIcCgdlbnRyaWVzGAIgAygLMgsudjEuTHNFbnRyeSIdCg5Mb2dEYXRhUmVxdWVzdBILCgNyZWYYASABKAkiOQoVR2V0RG93bmxvYWRVUkxSZXF1ZXN0Eg0KBW9wX2lkGAEgASgDEhEKCWZpbGVfcGF0aBgCIAEoCSKWAQoHTHNFbnRyeRIMCgRuYW1lGAEgASgJEgwKBHR5cGUYAiABKAkSDAoEcGF0aBgDIAEoCRILCgN1aWQYBCABKAMSCwoDZ2lkGAUgASgDEgwKBHNpemUYBiABKAMSDAoEbW9kZRgHIAEoAxINCgVtdGltZRgIIAEoCRINCgVhdGltZRgJIAEoCRINCgVjdGltZRgKIAEoCSI1ChFSdW5Db21tYW5kUmVxdWVzdBIPCgdyZXBvX2lkGAEgASgJEg8KB2NvbW1hbmQYAiABKAkitQUKGFN1bW1hcnlEYXNoYm9hcmRSZXNwb25zZRI8Cg5yZXBvX3N1bW1hcmllcxgBIAMoCzIkLnYxLlN1bW1hcnlEYXNoYm9hcmRSZXNwb25zZS5TdW1tYXJ5EjwKDnBsYW5fc3VtbWFyaWVzGAIgAygLMiQudjEuU3VtbWFyeURhc2hib2FyZFJlc3BvbnNlLlN1bW1hcnkSEwoLY29uZmlnX3BhdGgYCiABKAkSEQoJZGF0YV9wYXRoGAsgASgJGu4CCgdTdW1tYXJ5EgoKAmlkGAEgASgJEh0KFWJhY2t1cHNfZmFpbGVkXzMwZGF5cxgCIAEoAxIjChtiYWNrdXBzX3dhcm5pbmdfbGFzdF8zMGRheXMYAyABKAMSIwobYmFja3Vwc19zdWNjZXNzX2xhc3RfMzBkYXlzGAQgASgDEiEKGWJ5dGVzX3NjYW5uZWRfbGFzdF8zMGRheXMYBSABKAMSHwoXYnl0ZXNfYWRkZWRfbGFzdF8zMGRheXMYBiABKAMSFwoPdG90YWxfc25hcHNob3RzGAcgASgDEhkKEWJ5dGVzX3NjYW5uZWRfYXZnGAggASgDEhcKD2J5dGVzX2FkZGVkX2F2ZxgJIAEoAxIbChNuZXh0X2JhY2t1cF90aW1lX21zGAogASgDEkAKDnJlY2VudF9iYWNrdXBzGAsgASgLMigudjEuU3VtbWFyeURhc2hib2FyZFJlc3BvbnNlLkJhY2t1cENoYXJ0GoMBCgtCYWNrdXBDaGFydBIPCgdmbG93X2lkGAEgAygDEhQKDHRpbWVzdGFtcF9tcxgCIAMoAxITCgtkdXJhdGlvbl9tcxgDIAMoAxIjCgZzdGF0dXMYBCADKA4yEy52MS5PcGVyYXRpb25TdGF0dXMSEwoLYnl0ZXNfYWRkZWQYBSADKAMiggEKG0dlbmVyYXRlUGFpcmluZ1Rva2VuUmVxdWVzdBINCgVsYWJlbBgBIAEoCRITCgt0dGxfc2Vjb25kcxgCIAEoAxIQCghtYXhfdXNlcxgDIAEoBRItCgtwZXJtaXNzaW9ucxgEIAMoCzIYLnYxLk11bHRpaG9zdC5QZXJtaXNzaW9uIi0KHEdlbmVyYXRlUGFpcmluZ1Rva2VuUmVzcG9uc2USDQoFdG9rZW4YASABKAky7goKCEJhY2tyZXN0EjEKCUdldENvbmZpZxIWLmdvb2dsZS5wcm90b2J1Zi5FbXB0eRoKLnYxLkNvbmZpZyIAEiUKCVNldENvbmZpZxIKLnYxLkNvbmZpZxoKLnYxLkNvbmZpZyIAEjoKCVNldHVwU2Z0cBIULnYxLlNldHVwU2Z0cFJlcXVlc3QaFS52MS5TZXR1cFNmdHBSZXNwb25zZSIAEkwKD0NoZWNrUmVwb0V4aXN0cxIaLnYxLkNoZWNrUmVwb0V4aXN0c1JlcXVlc3QaGy52MS5DaGVja1JlcG9FeGlzdHNSZXNwb25zZSIAEisKB0FkZFJlcG8SEi52MS5BZGRSZXBvUmVxdWVzdBoKLnYxLkNvbmZpZyIAEi4KClJlbW92ZVJlcG8SEi50eXBlcy5TdHJpbmdWYWx1ZRoKLnYxLkNvbmZpZyIAEkQKEkdldE9wZXJhdGlvbkV2ZW50cxIWLmdvb2dsZS5wcm90b2J1Zi5FbXB0eRoSLnYxLk9wZXJhdGlvbkV2ZW50IgAwARI+Cg1HZXRPcGVyYXRpb25zEhgudjEuR2V0T3BlcmF0aW9uc1JlcXVlc3QaES52MS5PcGVyYXRpb25MaXN0IgASQwoNTGlzdFNuYXBzaG90cxIYLnYxLkxpc3RTbmFwc2hvdHNSZXF1ZXN0GhYudjEuUmVzdGljU25hcHNob3RMaXN0IgASUgoRTGlzdFNuYXBzaG90RmlsZXMSHC52MS5MaXN0U25hcHNob3RGaWxlc1JlcXVlc3QaHS52MS5MaXN0U25hcHNob3RGaWxlc1Jlc3BvbnNlIgASNQoGQmFja3VwEhEudjEuQmFja3VwUmVxdWVzdBoWLmdvb2dsZS5wcm90b2J1Zi5FbXB0eSIAEj0KCkRvUmVwb1Rhc2sSFS52MS5Eb1JlcG9UYXNrUmVxdWVzdBoWLmdvb2dsZS5wcm90b2J1Zi5FbXB0eSIAEjUKBkZvcmdldBIRLnYxLkZvcmdldFJlcXVlc3QaFi5nb29nbGUucHJvdG9idWYuRW1wdHkiABI/CgdSZXN0b3JlEhoudjEuUmVzdG9yZVNuYXBzaG90UmVxdWVzdBoWLmdvb2dsZS5wcm90b2J1Zi5FbXB0eSIAEjUKBkNhbmNlbBIRLnR5cGVzLkludDY0VmFsdWUaFi5nb29nbGUucHJvdG9idWYuRW1wdHkiABI0CgdHZXRMb2dzEhIudjEuTG9nRGF0YVJlcXVlc3QaES50eXBlcy5CeXRlc1ZhbHVlIgAwARI4CgpSdW5Db21tYW5kEhUudjEuUnVuQ29tbWFuZFJlcXVlc3QaES50eXBlcy5JbnQ2NFZhbHVlIgASQQoOR2V0RG93bmxvYWRVUkwSGS52MS5HZXREb3dubG9hZFVSTFJlcXVlc3QaEi50eXBlcy5TdHJpbmdWYWx1ZSIAEkEKDENsZWFySGlzdG9yeRIXLnYxLkNsZWFySGlzdG9yeVJlcXVlc3QaFi5nb29nbGUucHJvdG9idWYuRW1wdHkiABI7ChBQYXRoQXV0b2NvbXBsZXRlEhIudHlwZXMuU3RyaW5nVmFsdWUaES50eXBlcy5TdHJpbmdMaXN0IgASTQoTR2V0U3VtbWFyeURhc2hib2FyZBIWLmdvb2dsZS5wcm90b2J1Zi5FbXB0eRocLnYxLlN1bW1hcnlEYXNoYm9hcmRSZXNwb25zZSIAElsKFEdlbmVyYXRlUGFpcmluZ1Rva2VuEh8udjEuR2VuZXJhdGVQYWlyaW5nVG9rZW5SZXF1ZXN0GiAudjEuR2VuZXJhdGVQYWlyaW5nVG9rZW5SZXNwb25zZSIAQixaKmdpdGh1Yi5jb20vZ2FyZXRoZ2VvcmdlL2JhY2tyZXN0L2dlbi9nby92MWIGcHJvdG8z", [file_v1_config, file_v1_restic, file_v1_operations, file_types_value, file_google_protobuf_empty, file_google_api_annotations]); /** * @generated from message v1.BackupRequest @@ -737,6 +737,65 @@ export type SummaryDashboardResponse_BackupChart = Message<"v1.SummaryDashboardR export const SummaryDashboardResponse_BackupChartSchema: GenMessage = /*@__PURE__*/ messageDesc(file_v1_service, 19, 1); +/** + * @generated from message v1.GeneratePairingTokenRequest + */ +export type GeneratePairingTokenRequest = Message<"v1.GeneratePairingTokenRequest"> & { + /** + * human-readable label for the token + * + * @generated from field: string label = 1; + */ + label: string; + + /** + * time-to-live in seconds (e.g. 3600 for 1 hour) + * + * @generated from field: int64 ttl_seconds = 2; + */ + ttlSeconds: bigint; + + /** + * max number of clients that can pair with this token, 0 for unlimited + * + * @generated from field: int32 max_uses = 3; + */ + maxUses: number; + + /** + * permissions to grant to clients that pair with this token + * + * @generated from field: repeated v1.Multihost.Permission permissions = 4; + */ + permissions: Multihost_Permission[]; +}; + +/** + * Describes the message v1.GeneratePairingTokenRequest. + * Use `create(GeneratePairingTokenRequestSchema)` to create a new message. + */ +export const GeneratePairingTokenRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_v1_service, 20); + +/** + * @generated from message v1.GeneratePairingTokenResponse + */ +export type GeneratePairingTokenResponse = Message<"v1.GeneratePairingTokenResponse"> & { + /** + * the opaque pairing token string: ":#" + * + * @generated from field: string token = 1; + */ + token: string; +}; + +/** + * Describes the message v1.GeneratePairingTokenResponse. + * Use `create(GeneratePairingTokenResponseSchema)` to create a new message. + */ +export const GeneratePairingTokenResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_v1_service, 21); + /** * @generated from service v1.Backrest */ @@ -931,6 +990,17 @@ export const Backrest: GenService<{ input: typeof EmptySchema; output: typeof SummaryDashboardResponseSchema; }, + /** + * GeneratePairingToken creates a new pairing token on the server that can be shared with clients to simplify peering. + * The token format is ":#" — an opaque string the client pastes when adding a known host. + * + * @generated from rpc v1.Backrest.GeneratePairingToken + */ + generatePairingToken: { + methodKind: "unary"; + input: typeof GeneratePairingTokenRequestSchema; + output: typeof GeneratePairingTokenResponseSchema; + }, }> = /*@__PURE__*/ serviceDesc(file_v1_service, 0); diff --git a/webui/gen/ts/v1sync/syncservice_pb.ts b/webui/gen/ts/v1sync/syncservice_pb.ts index 532d75f8..96c37b6f 100644 --- a/webui/gen/ts/v1sync/syncservice_pb.ts +++ b/webui/gen/ts/v1sync/syncservice_pb.ts @@ -1,4 +1,4 @@ -// @generated by protoc-gen-es v2.10.0 with parameter "target=ts" +// @generated by protoc-gen-es v2.10.1 with parameter "target=ts" // @generated from file v1sync/syncservice.proto (package v1sync, syntax proto3) /* eslint-disable */ @@ -21,7 +21,7 @@ import type { Message } from "@bufbuild/protobuf"; * Describes the file v1sync/syncservice.proto. */ export const file_v1sync_syncservice: GenFile = /*@__PURE__*/ - fileDesc("Chh2MXN5bmMvc3luY3NlcnZpY2UucHJvdG8SBnYxc3luYyIrChZTeW5jU3RhdGVTdHJlYW1SZXF1ZXN0EhEKCXN1YnNjcmliZRgBIAEoCCKbAgoJUGVlclN0YXRlEhgKEHBlZXJfaW5zdGFuY2VfaWQYASABKAkSEgoKcGVlcl9rZXlpZBgCIAEoCRImCgVzdGF0ZRgDIAEoDjIXLnYxc3luYy5Db25uZWN0aW9uU3RhdGUSFgoOc3RhdHVzX21lc3NhZ2UYBCABKAkSKQoLa25vd25fcGxhbnMYBSADKAsyFC52MXN5bmMuUGxhbk1ldGFkYXRhEikKC2tub3duX3JlcG9zGAYgAygLMhQudjFzeW5jLlJlcG9NZXRhZGF0YRIrCg1yZW1vdGVfY29uZmlnGAcgASgLMhQudjFzeW5jLlJlbW90ZUNvbmZpZxIdChVsYXN0X2hlYXJ0YmVhdF9taWxsaXMYCCABKAMiPQoTQXV0aGVudGljYXRlUmVxdWVzdBImCgtpbnN0YW5jZV9pZBgBIAEoCzIRLnYxLlNpZ25lZE1lc3NhZ2UiPgocR2V0T3BlcmF0aW9uTWV0YWRhdGFSZXNwb25zZRIOCgZvcF9pZHMYASADKAMSDgoGbW9kbm9zGAIgAygDIl0KDExvZ0RhdGFFbnRyeRIOCgZsb2dfaWQYASABKAkSEgoKb3duZXJfb3BpZBgCIAEoAxIaChJleHBpcmF0aW9uX3RzX3VuaXgYAyABKAMSDQoFY2h1bmsYBCABKAwiaAocU2V0QXZhaWxhYmxlUmVzb3VyY2VzUmVxdWVzdBIjCgVyZXBvcxgBIAMoCzIULnYxc3luYy5QbGFuTWV0YWRhdGESIwoFcGxhbnMYAiADKAsyFC52MXN5bmMuUmVwb01ldGFkYXRhIigKDFJlcG9NZXRhZGF0YRIKCgJpZBgBIAEoCRIMCgRndWlkGAIgASgJIhoKDFBsYW5NZXRhZGF0YRIKCgJpZBgBIAEoCSJ2ChBTZXRDb25maWdSZXF1ZXN0EhcKBXBsYW5zGAEgAygLMggudjEuUGxhbhIXCgVyZXBvcxgCIAMoCzIILnYxLlJlcG8SFwoPcmVwb3NfdG9fZGVsZXRlGAMgAygJEhcKD3BsYW5zX3RvX2RlbGV0ZRgEIAMoCSJgCgxSZW1vdGVDb25maWcSDQoFbW9kbm8YASABKAUSDwoHdmVyc2lvbhgCIAEoBRIXCgVyZXBvcxgDIAMoCzIILnYxLlJlcG8SFwoFcGxhbnMYBCADKAsyCC52MS5QbGFuIl8KEkF1dGhvcml6YXRpb25Ub2tlbhIhCgpwdWJsaWNfa2V5GAEgASgLMg0udjEuUHVibGljS2V5EiYKC2luc3RhbmNlX2lkGAIgASgLMhEudjEuU2lnbmVkTWVzc2FnZSK1DwoOU3luY1N0cmVhbUl0ZW0SKwoOc2lnbmVkX21lc3NhZ2UYASABKAsyES52MS5TaWduZWRNZXNzYWdlSAASPwoJaGFuZHNoYWtlGAMgASgLMioudjFzeW5jLlN5bmNTdHJlYW1JdGVtLlN5bmNBY3Rpb25IYW5kc2hha2VIABI/CgloZWFydGJlYXQYBCABKAsyKi52MXN5bmMuU3luY1N0cmVhbUl0ZW0uU3luY0FjdGlvbkhlYXJ0YmVhdEgAElAKEnJlcXVlc3Rfb3BlcmF0aW9ucxgUIAEoCzIyLnYxc3luYy5TeW5jU3RyZWFtSXRlbS5TeW5jQWN0aW9uUmVxdWVzdE9wZXJhdGlvbnNIABJQChJyZWNlaXZlX29wZXJhdGlvbnMYFSABKAsyMi52MXN5bmMuU3luY1N0cmVhbUl0ZW0uU3luY0FjdGlvblJlY2VpdmVPcGVyYXRpb25zSAASSAoOcmVjZWl2ZV9jb25maWcYFiABKAsyLi52MXN5bmMuU3luY1N0cmVhbUl0ZW0uU3luY0FjdGlvblJlY2VpdmVDb25maWdIABJACgpzZXRfY29uZmlnGBggASgLMioudjFzeW5jLlN5bmNTdHJlYW1JdGVtLlN5bmNBY3Rpb25TZXRDb25maWdIABJOChFyZXF1ZXN0X3Jlc291cmNlcxgZIAEoCzIxLnYxc3luYy5TeW5jU3RyZWFtSXRlbS5TeW5jQWN0aW9uUmVxdWVzdFJlc291cmNlc0gAEk4KEXJlY2VpdmVfcmVzb3VyY2VzGBogASgLMjEudjFzeW5jLlN5bmNTdHJlYW1JdGVtLlN5bmNBY3Rpb25SZWNlaXZlUmVzb3VyY2VzSAASQgoLcmVxdWVzdF9sb2cYHiABKAsyKy52MXN5bmMuU3luY1N0cmVhbUl0ZW0uU3luY0FjdGlvblJlcXVlc3RMb2dIABJLChByZWNlaXZlX2xvZ19kYXRhGB8gASgLMi8udjFzeW5jLlN5bmNTdHJlYW1JdGVtLlN5bmNBY3Rpb25SZWNlaXZlTG9nRGF0YUgAEj4KCHRocm90dGxlGOgHIAEoCzIpLnYxc3luYy5TeW5jU3RyZWFtSXRlbS5TeW5jQWN0aW9uVGhyb3R0bGVIABp6ChNTeW5jQWN0aW9uSGFuZHNoYWtlEhgKEHByb3RvY29sX3ZlcnNpb24YASABKAMSIQoKcHVibGljX2tleRgCIAEoCzINLnYxLlB1YmxpY0tleRImCgtpbnN0YW5jZV9pZBgDIAEoCzIRLnYxLlNpZ25lZE1lc3NhZ2UaFQoTU3luY0FjdGlvbkhlYXJ0YmVhdBo/ChdTeW5jQWN0aW9uUmVjZWl2ZUNvbmZpZxIkCgZjb25maWcYASABKAsyFC52MXN5bmMuUmVtb3RlQ29uZmlnGnkKE1N5bmNBY3Rpb25TZXRDb25maWcSFwoFcmVwb3MYASADKAsyCC52MS5SZXBvEhcKBXBsYW5zGAIgAygLMggudjEuUGxhbhIXCg9yZXBvc190b19kZWxldGUYAyADKAkSFwoPcGxhbnNfdG9fZGVsZXRlGAQgAygJGhwKGlN5bmNBY3Rpb25SZXF1ZXN0UmVzb3VyY2VzGmYKGlN5bmNBY3Rpb25SZWNlaXZlUmVzb3VyY2VzEiMKBXJlcG9zGAEgAygLMhQudjFzeW5jLlJlcG9NZXRhZGF0YRIjCgVwbGFucxgCIAMoCzIULnYxc3luYy5QbGFuTWV0YWRhdGEaKAoVU3luY0FjdGlvbkNvbm5lY3RSZXBvEg8KB3JlcG9faWQYASABKAkaRAobU3luY0FjdGlvblJlcXVlc3RPcGVyYXRpb25zEhEKCWhpZ2hfb3BpZBgBIAEoAxISCgpoaWdoX21vZG5vGAIgASgDGkAKG1N5bmNBY3Rpb25SZWNlaXZlT3BlcmF0aW9ucxIhCgVldmVudBgBIAEoCzISLnYxLk9wZXJhdGlvbkV2ZW50GiYKFFN5bmNBY3Rpb25SZXF1ZXN0TG9nEg4KBmxvZ19pZBgBIAEoCRqAAQoYU3luY0FjdGlvblJlY2VpdmVMb2dEYXRhEg4KBmxvZ19pZBgBIAEoCRISCgpvd25lcl9vcGlkGAIgASgDEhoKEmV4cGlyYXRpb25fdHNfdW5peBgDIAEoAxINCgVjaHVuaxgEIAEoDBIVCg1lcnJvcl9tZXNzYWdlGAUgASgJGiYKElN5bmNBY3Rpb25UaHJvdHRsZRIQCghkZWxheV9tcxgBIAEoAxo4ChlTeW5jRXN0YWJsaXNoU2hhcmVkU2VjcmV0EhsKB2VkMjU1MTkYAiABKAlSCmVkMjU1MTlwdWIitAEKE1JlcG9Db25uZWN0aW9uU3RhdGUSHAoYQ09OTkVDVElPTl9TVEFURV9VTktOT1dOEAASHAoYQ09OTkVDVElPTl9TVEFURV9QRU5ESU5HEAESHgoaQ09OTkVDVElPTl9TVEFURV9DT05ORUNURUQQAhIhCh1DT05ORUNUSU9OX1NUQVRFX1VOQVVUSE9SSVpFRBADEh4KGkNPTk5FQ1RJT05fU1RBVEVfTk9UX0ZPVU5EEARCCAoGYWN0aW9uKpwCCg9Db25uZWN0aW9uU3RhdGUSHAoYQ09OTkVDVElPTl9TVEFURV9VTktOT1dOEAASHAoYQ09OTkVDVElPTl9TVEFURV9QRU5ESU5HEAESHgoaQ09OTkVDVElPTl9TVEFURV9DT05ORUNURUQQAhIhCh1DT05ORUNUSU9OX1NUQVRFX0RJU0NPTk5FQ1RFRBADEh8KG0NPTk5FQ1RJT05fU1RBVEVfUkVUUllfV0FJVBAEEh8KG0NPTk5FQ1RJT05fU1RBVEVfRVJST1JfQVVUSBAKEiMKH0NPTk5FQ1RJT05fU1RBVEVfRVJST1JfUFJPVE9DT0wQCxIjCh9DT05ORUNUSU9OX1NUQVRFX0VSUk9SX0lOVEVSTkFMEAwyUwoTQmFja3Jlc3RTeW5jU2VydmljZRI8CgRTeW5jEhYudjFzeW5jLlN5bmNTdHJlYW1JdGVtGhYudjFzeW5jLlN5bmNTdHJlYW1JdGVtIgAoATABMmwKGEJhY2tyZXN0U3luY1N0YXRlU2VydmljZRJQChdHZXRQZWVyU3luY1N0YXRlc1N0cmVhbRIeLnYxc3luYy5TeW5jU3RhdGVTdHJlYW1SZXF1ZXN0GhEudjFzeW5jLlBlZXJTdGF0ZSIAMAFCMFouZ2l0aHViLmNvbS9nYXJldGhnZW9yZ2UvYmFja3Jlc3QvZ2VuL2dvL3Yxc3luY2IGcHJvdG8z", [file_v1_config, file_v1_crypto, file_v1_restic, file_v1_service, file_v1_operations, file_types_value, file_google_protobuf_empty, file_google_api_annotations, file_google_protobuf_any]); + fileDesc("Chh2MXN5bmMvc3luY3NlcnZpY2UucHJvdG8SBnYxc3luYyIrChZTeW5jU3RhdGVTdHJlYW1SZXF1ZXN0EhEKCXN1YnNjcmliZRgBIAEoCCKbAgoJUGVlclN0YXRlEhgKEHBlZXJfaW5zdGFuY2VfaWQYASABKAkSEgoKcGVlcl9rZXlpZBgCIAEoCRImCgVzdGF0ZRgDIAEoDjIXLnYxc3luYy5Db25uZWN0aW9uU3RhdGUSFgoOc3RhdHVzX21lc3NhZ2UYBCABKAkSKQoLa25vd25fcGxhbnMYBSADKAsyFC52MXN5bmMuUGxhbk1ldGFkYXRhEikKC2tub3duX3JlcG9zGAYgAygLMhQudjFzeW5jLlJlcG9NZXRhZGF0YRIrCg1yZW1vdGVfY29uZmlnGAcgASgLMhQudjFzeW5jLlJlbW90ZUNvbmZpZxIdChVsYXN0X2hlYXJ0YmVhdF9taWxsaXMYCCABKAMiPQoTQXV0aGVudGljYXRlUmVxdWVzdBImCgtpbnN0YW5jZV9pZBgBIAEoCzIRLnYxLlNpZ25lZE1lc3NhZ2UiPgocR2V0T3BlcmF0aW9uTWV0YWRhdGFSZXNwb25zZRIOCgZvcF9pZHMYASADKAMSDgoGbW9kbm9zGAIgAygDIl0KDExvZ0RhdGFFbnRyeRIOCgZsb2dfaWQYASABKAkSEgoKb3duZXJfb3BpZBgCIAEoAxIaChJleHBpcmF0aW9uX3RzX3VuaXgYAyABKAMSDQoFY2h1bmsYBCABKAwiaAocU2V0QXZhaWxhYmxlUmVzb3VyY2VzUmVxdWVzdBIjCgVyZXBvcxgBIAMoCzIULnYxc3luYy5QbGFuTWV0YWRhdGESIwoFcGxhbnMYAiADKAsyFC52MXN5bmMuUmVwb01ldGFkYXRhIigKDFJlcG9NZXRhZGF0YRIKCgJpZBgBIAEoCRIMCgRndWlkGAIgASgJIhoKDFBsYW5NZXRhZGF0YRIKCgJpZBgBIAEoCSJ2ChBTZXRDb25maWdSZXF1ZXN0EhcKBXBsYW5zGAEgAygLMggudjEuUGxhbhIXCgVyZXBvcxgCIAMoCzIILnYxLlJlcG8SFwoPcmVwb3NfdG9fZGVsZXRlGAMgAygJEhcKD3BsYW5zX3RvX2RlbGV0ZRgEIAMoCSKWAQocU2V0UmVtb3RlQ2xpZW50Q29uZmlnUmVxdWVzdBISCgpwZWVyX2tleWlkGAEgASgJEhcKBXJlcG9zGAIgAygLMggudjEuUmVwbxIXCgVwbGFucxgDIAMoCzIILnYxLlBsYW4SFwoPcmVwb3NfdG9fZGVsZXRlGAQgAygJEhcKD3BsYW5zX3RvX2RlbGV0ZRgFIAMoCSIfCh1TZXRSZW1vdGVDbGllbnRDb25maWdSZXNwb25zZSJgCgxSZW1vdGVDb25maWcSDQoFbW9kbm8YASABKAUSDwoHdmVyc2lvbhgCIAEoBRIXCgVyZXBvcxgDIAMoCzIILnYxLlJlcG8SFwoFcGxhbnMYBCADKAsyCC52MS5QbGFuIl8KEkF1dGhvcml6YXRpb25Ub2tlbhIhCgpwdWJsaWNfa2V5GAEgASgLMg0udjEuUHVibGljS2V5EiYKC2luc3RhbmNlX2lkGAIgASgLMhEudjEuU2lnbmVkTWVzc2FnZSLIEAoOU3luY1N0cmVhbUl0ZW0SKwoOc2lnbmVkX21lc3NhZ2UYASABKAsyES52MS5TaWduZWRNZXNzYWdlSAASPwoJaGFuZHNoYWtlGAMgASgLMioudjFzeW5jLlN5bmNTdHJlYW1JdGVtLlN5bmNBY3Rpb25IYW5kc2hha2VIABI/CgloZWFydGJlYXQYBCABKAsyKi52MXN5bmMuU3luY1N0cmVhbUl0ZW0uU3luY0FjdGlvbkhlYXJ0YmVhdEgAElAKEm9wZXJhdGlvbl9tYW5pZmVzdBgUIAEoCzIyLnYxc3luYy5TeW5jU3RyZWFtSXRlbS5TeW5jQWN0aW9uT3BlcmF0aW9uTWFuaWZlc3RIABJQChJyZWNlaXZlX29wZXJhdGlvbnMYFSABKAsyMi52MXN5bmMuU3luY1N0cmVhbUl0ZW0uU3luY0FjdGlvblJlY2VpdmVPcGVyYXRpb25zSAASVwoWcmVxdWVzdF9vcGVyYXRpb25fZGF0YRgWIAEoCzI1LnYxc3luYy5TeW5jU3RyZWFtSXRlbS5TeW5jQWN0aW9uUmVxdWVzdE9wZXJhdGlvbkRhdGFIABJICg5yZWNlaXZlX2NvbmZpZxgXIAEoCzIuLnYxc3luYy5TeW5jU3RyZWFtSXRlbS5TeW5jQWN0aW9uUmVjZWl2ZUNvbmZpZ0gAEkAKCnNldF9jb25maWcYGCABKAsyKi52MXN5bmMuU3luY1N0cmVhbUl0ZW0uU3luY0FjdGlvblNldENvbmZpZ0gAEk4KEXJlcXVlc3RfcmVzb3VyY2VzGBkgASgLMjEudjFzeW5jLlN5bmNTdHJlYW1JdGVtLlN5bmNBY3Rpb25SZXF1ZXN0UmVzb3VyY2VzSAASTgoRcmVjZWl2ZV9yZXNvdXJjZXMYGiABKAsyMS52MXN5bmMuU3luY1N0cmVhbUl0ZW0uU3luY0FjdGlvblJlY2VpdmVSZXNvdXJjZXNIABJCCgtyZXF1ZXN0X2xvZxgeIAEoCzIrLnYxc3luYy5TeW5jU3RyZWFtSXRlbS5TeW5jQWN0aW9uUmVxdWVzdExvZ0gAEksKEHJlY2VpdmVfbG9nX2RhdGEYHyABKAsyLy52MXN5bmMuU3luY1N0cmVhbUl0ZW0uU3luY0FjdGlvblJlY2VpdmVMb2dEYXRhSAASPgoIdGhyb3R0bGUY6AcgASgLMikudjFzeW5jLlN5bmNTdHJlYW1JdGVtLlN5bmNBY3Rpb25UaHJvdHRsZUgAGpIBChNTeW5jQWN0aW9uSGFuZHNoYWtlEhgKEHByb3RvY29sX3ZlcnNpb24YASABKAMSIQoKcHVibGljX2tleRgCIAEoCzINLnYxLlB1YmxpY0tleRImCgtpbnN0YW5jZV9pZBgDIAEoCzIRLnYxLlNpZ25lZE1lc3NhZ2USFgoOcGFpcmluZ19zZWNyZXQYBCABKAkaFQoTU3luY0FjdGlvbkhlYXJ0YmVhdBo/ChdTeW5jQWN0aW9uUmVjZWl2ZUNvbmZpZxIkCgZjb25maWcYASABKAsyFC52MXN5bmMuUmVtb3RlQ29uZmlnGnkKE1N5bmNBY3Rpb25TZXRDb25maWcSFwoFcmVwb3MYASADKAsyCC52MS5SZXBvEhcKBXBsYW5zGAIgAygLMggudjEuUGxhbhIXCg9yZXBvc190b19kZWxldGUYAyADKAkSFwoPcGxhbnNfdG9fZGVsZXRlGAQgAygJGhwKGlN5bmNBY3Rpb25SZXF1ZXN0UmVzb3VyY2VzGmYKGlN5bmNBY3Rpb25SZWNlaXZlUmVzb3VyY2VzEiMKBXJlcG9zGAEgAygLMhQudjFzeW5jLlJlcG9NZXRhZGF0YRIjCgVwbGFucxgCIAMoCzIULnYxc3luYy5QbGFuTWV0YWRhdGEaKAoVU3luY0FjdGlvbkNvbm5lY3RSZXBvEg8KB3JlcG9faWQYASABKAkaPQobU3luY0FjdGlvbk9wZXJhdGlvbk1hbmlmZXN0Eg4KBm9wX2lkcxgBIAMoAxIOCgZtb2Rub3MYAiADKAMaMAoeU3luY0FjdGlvblJlcXVlc3RPcGVyYXRpb25EYXRhEg4KBm9wX2lkcxgBIAMoAxpAChtTeW5jQWN0aW9uUmVjZWl2ZU9wZXJhdGlvbnMSIQoFZXZlbnQYASABKAsyEi52MS5PcGVyYXRpb25FdmVudBomChRTeW5jQWN0aW9uUmVxdWVzdExvZxIOCgZsb2dfaWQYASABKAkagAEKGFN5bmNBY3Rpb25SZWNlaXZlTG9nRGF0YRIOCgZsb2dfaWQYASABKAkSEgoKb3duZXJfb3BpZBgCIAEoAxIaChJleHBpcmF0aW9uX3RzX3VuaXgYAyABKAMSDQoFY2h1bmsYBCABKAwSFQoNZXJyb3JfbWVzc2FnZRgFIAEoCRomChJTeW5jQWN0aW9uVGhyb3R0bGUSEAoIZGVsYXlfbXMYASABKAMaLgoZU3luY0VzdGFibGlzaFNoYXJlZFNlY3JldBIRCgllY2RzYV9wdWIYAiABKAkitAEKE1JlcG9Db25uZWN0aW9uU3RhdGUSHAoYQ09OTkVDVElPTl9TVEFURV9VTktOT1dOEAASHAoYQ09OTkVDVElPTl9TVEFURV9QRU5ESU5HEAESHgoaQ09OTkVDVElPTl9TVEFURV9DT05ORUNURUQQAhIhCh1DT05ORUNUSU9OX1NUQVRFX1VOQVVUSE9SSVpFRBADEh4KGkNPTk5FQ1RJT05fU1RBVEVfTk9UX0ZPVU5EEARCCAoGYWN0aW9uKpwCCg9Db25uZWN0aW9uU3RhdGUSHAoYQ09OTkVDVElPTl9TVEFURV9VTktOT1dOEAASHAoYQ09OTkVDVElPTl9TVEFURV9QRU5ESU5HEAESHgoaQ09OTkVDVElPTl9TVEFURV9DT05ORUNURUQQAhIhCh1DT05ORUNUSU9OX1NUQVRFX0RJU0NPTk5FQ1RFRBADEh8KG0NPTk5FQ1RJT05fU1RBVEVfUkVUUllfV0FJVBAEEh8KG0NPTk5FQ1RJT05fU1RBVEVfRVJST1JfQVVUSBAKEiMKH0NPTk5FQ1RJT05fU1RBVEVfRVJST1JfUFJPVE9DT0wQCxIjCh9DT05ORUNUSU9OX1NUQVRFX0VSUk9SX0lOVEVSTkFMEAwyUwoTQmFja3Jlc3RTeW5jU2VydmljZRI8CgRTeW5jEhYudjFzeW5jLlN5bmNTdHJlYW1JdGVtGhYudjFzeW5jLlN5bmNTdHJlYW1JdGVtIgAoATABMtQBChhCYWNrcmVzdFN5bmNTdGF0ZVNlcnZpY2USUAoXR2V0UGVlclN5bmNTdGF0ZXNTdHJlYW0SHi52MXN5bmMuU3luY1N0YXRlU3RyZWFtUmVxdWVzdBoRLnYxc3luYy5QZWVyU3RhdGUiADABEmYKFVNldFJlbW90ZUNsaWVudENvbmZpZxIkLnYxc3luYy5TZXRSZW1vdGVDbGllbnRDb25maWdSZXF1ZXN0GiUudjFzeW5jLlNldFJlbW90ZUNsaWVudENvbmZpZ1Jlc3BvbnNlIgBCMFouZ2l0aHViLmNvbS9nYXJldGhnZW9yZ2UvYmFja3Jlc3QvZ2VuL2dvL3Yxc3luY2IGcHJvdG8z", [file_v1_config, file_v1_crypto, file_v1_restic, file_v1_service, file_v1_operations, file_types_value, file_google_protobuf_empty, file_google_api_annotations, file_google_protobuf_any]); /** * @generated from message v1sync.SyncStateStreamRequest @@ -292,6 +292,66 @@ export type SetConfigRequest = Message<"v1sync.SetConfigRequest"> & { export const SetConfigRequestSchema: GenMessage = /*@__PURE__*/ messageDesc(file_v1sync_syncservice, 8); +/** + * @generated from message v1sync.SetRemoteClientConfigRequest + */ +export type SetRemoteClientConfigRequest = Message<"v1sync.SetRemoteClientConfigRequest"> & { + /** + * The key ID of the connected peer to push config to. + * + * @generated from field: string peer_keyid = 1; + */ + peerKeyid: string; + + /** + * Repos to create or update on the peer. + * + * @generated from field: repeated v1.Repo repos = 2; + */ + repos: Repo[]; + + /** + * Plans to create or update on the peer. + * + * @generated from field: repeated v1.Plan plans = 3; + */ + plans: Plan[]; + + /** + * Repo IDs to delete on the peer. + * + * @generated from field: repeated string repos_to_delete = 4; + */ + reposToDelete: string[]; + + /** + * Plan IDs to delete on the peer. + * + * @generated from field: repeated string plans_to_delete = 5; + */ + plansToDelete: string[]; +}; + +/** + * Describes the message v1sync.SetRemoteClientConfigRequest. + * Use `create(SetRemoteClientConfigRequestSchema)` to create a new message. + */ +export const SetRemoteClientConfigRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_v1sync_syncservice, 9); + +/** + * @generated from message v1sync.SetRemoteClientConfigResponse + */ +export type SetRemoteClientConfigResponse = Message<"v1sync.SetRemoteClientConfigResponse"> & { +}; + +/** + * Describes the message v1sync.SetRemoteClientConfigResponse. + * Use `create(SetRemoteClientConfigResponseSchema)` to create a new message. + */ +export const SetRemoteClientConfigResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_v1sync_syncservice, 10); + /** * @generated from message v1sync.RemoteConfig */ @@ -326,7 +386,7 @@ export type RemoteConfig = Message<"v1sync.RemoteConfig"> & { * Use `create(RemoteConfigSchema)` to create a new message. */ export const RemoteConfigSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1sync_syncservice, 9); + messageDesc(file_v1sync_syncservice, 11); /** * @generated from message v1sync.AuthorizationToken @@ -350,7 +410,7 @@ export type AuthorizationToken = Message<"v1sync.AuthorizationToken"> & { * Use `create(AuthorizationTokenSchema)` to create a new message. */ export const AuthorizationTokenSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1sync_syncservice, 10); + messageDesc(file_v1sync_syncservice, 12); /** * @generated from message v1sync.SyncStreamItem @@ -381,10 +441,10 @@ export type SyncStreamItem = Message<"v1sync.SyncStreamItem"> & { case: "heartbeat"; } | { /** - * @generated from field: v1sync.SyncStreamItem.SyncActionRequestOperations request_operations = 20; + * @generated from field: v1sync.SyncStreamItem.SyncActionOperationManifest operation_manifest = 20; */ - value: SyncStreamItem_SyncActionRequestOperations; - case: "requestOperations"; + value: SyncStreamItem_SyncActionOperationManifest; + case: "operationManifest"; } | { /** * @generated from field: v1sync.SyncStreamItem.SyncActionReceiveOperations receive_operations = 21; @@ -393,7 +453,13 @@ export type SyncStreamItem = Message<"v1sync.SyncStreamItem"> & { case: "receiveOperations"; } | { /** - * @generated from field: v1sync.SyncStreamItem.SyncActionReceiveConfig receive_config = 22; + * @generated from field: v1sync.SyncStreamItem.SyncActionRequestOperationData request_operation_data = 22; + */ + value: SyncStreamItem_SyncActionRequestOperationData; + case: "requestOperationData"; + } | { + /** + * @generated from field: v1sync.SyncStreamItem.SyncActionReceiveConfig receive_config = 23; */ value: SyncStreamItem_SyncActionReceiveConfig; case: "receiveConfig"; @@ -445,7 +511,7 @@ export type SyncStreamItem = Message<"v1sync.SyncStreamItem"> & { * Use `create(SyncStreamItemSchema)` to create a new message. */ export const SyncStreamItemSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1sync_syncservice, 11); + messageDesc(file_v1sync_syncservice, 13); /** * @generated from message v1sync.SyncStreamItem.SyncActionHandshake @@ -465,6 +531,13 @@ export type SyncStreamItem_SyncActionHandshake = Message<"v1sync.SyncStreamItem. * @generated from field: v1.SignedMessage instance_id = 3; */ instanceId?: SignedMessage; + + /** + * optional one-time secret from a pairing token, used to auto-authorize a new client + * + * @generated from field: string pairing_secret = 4; + */ + pairingSecret: string; }; /** @@ -472,7 +545,7 @@ export type SyncStreamItem_SyncActionHandshake = Message<"v1sync.SyncStreamItem. * Use `create(SyncStreamItem_SyncActionHandshakeSchema)` to create a new message. */ export const SyncStreamItem_SyncActionHandshakeSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1sync_syncservice, 11, 0); + messageDesc(file_v1sync_syncservice, 13, 0); /** * SyncActionHeartbeat is sent periodically to keep the connection alive. @@ -487,7 +560,7 @@ export type SyncStreamItem_SyncActionHeartbeat = Message<"v1sync.SyncStreamItem. * Use `create(SyncStreamItem_SyncActionHeartbeatSchema)` to create a new message. */ export const SyncStreamItem_SyncActionHeartbeatSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1sync_syncservice, 11, 1); + messageDesc(file_v1sync_syncservice, 13, 1); /** * @generated from message v1sync.SyncStreamItem.SyncActionReceiveConfig @@ -504,7 +577,7 @@ export type SyncStreamItem_SyncActionReceiveConfig = Message<"v1sync.SyncStreamI * Use `create(SyncStreamItem_SyncActionReceiveConfigSchema)` to create a new message. */ export const SyncStreamItem_SyncActionReceiveConfigSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1sync_syncservice, 11, 2); + messageDesc(file_v1sync_syncservice, 13, 2); /** * @generated from message v1sync.SyncStreamItem.SyncActionSetConfig @@ -536,7 +609,7 @@ export type SyncStreamItem_SyncActionSetConfig = Message<"v1sync.SyncStreamItem. * Use `create(SyncStreamItem_SyncActionSetConfigSchema)` to create a new message. */ export const SyncStreamItem_SyncActionSetConfigSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1sync_syncservice, 11, 3); + messageDesc(file_v1sync_syncservice, 13, 3); /** * @generated from message v1sync.SyncStreamItem.SyncActionRequestResources @@ -549,7 +622,7 @@ export type SyncStreamItem_SyncActionRequestResources = Message<"v1sync.SyncStre * Use `create(SyncStreamItem_SyncActionRequestResourcesSchema)` to create a new message. */ export const SyncStreamItem_SyncActionRequestResourcesSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1sync_syncservice, 11, 4); + messageDesc(file_v1sync_syncservice, 13, 4); /** * @generated from message v1sync.SyncStreamItem.SyncActionReceiveResources @@ -571,7 +644,7 @@ export type SyncStreamItem_SyncActionReceiveResources = Message<"v1sync.SyncStre * Use `create(SyncStreamItem_SyncActionReceiveResourcesSchema)` to create a new message. */ export const SyncStreamItem_SyncActionReceiveResourcesSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1sync_syncservice, 11, 5); + messageDesc(file_v1sync_syncservice, 13, 5); /** * @generated from message v1sync.SyncStreamItem.SyncActionConnectRepo @@ -588,33 +661,46 @@ export type SyncStreamItem_SyncActionConnectRepo = Message<"v1sync.SyncStreamIte * Use `create(SyncStreamItem_SyncActionConnectRepoSchema)` to create a new message. */ export const SyncStreamItem_SyncActionConnectRepoSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1sync_syncservice, 11, 6); + messageDesc(file_v1sync_syncservice, 13, 6); /** - * @generated from message v1sync.SyncStreamItem.SyncActionRequestOperations + * @generated from message v1sync.SyncStreamItem.SyncActionOperationManifest */ -export type SyncStreamItem_SyncActionRequestOperations = Message<"v1sync.SyncStreamItem.SyncActionRequestOperations"> & { +export type SyncStreamItem_SyncActionOperationManifest = Message<"v1sync.SyncStreamItem.SyncActionOperationManifest"> & { /** - * The highest operation ID the requester has. - * - * @generated from field: int64 high_opid = 1; + * @generated from field: repeated int64 op_ids = 1; */ - highOpid: bigint; + opIds: bigint[]; /** - * The highest modno the requester has. - * - * @generated from field: int64 high_modno = 2; + * @generated from field: repeated int64 modnos = 2; */ - highModno: bigint; + modnos: bigint[]; }; /** - * Describes the message v1sync.SyncStreamItem.SyncActionRequestOperations. - * Use `create(SyncStreamItem_SyncActionRequestOperationsSchema)` to create a new message. + * Describes the message v1sync.SyncStreamItem.SyncActionOperationManifest. + * Use `create(SyncStreamItem_SyncActionOperationManifestSchema)` to create a new message. */ -export const SyncStreamItem_SyncActionRequestOperationsSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1sync_syncservice, 11, 7); +export const SyncStreamItem_SyncActionOperationManifestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_v1sync_syncservice, 13, 7); + +/** + * @generated from message v1sync.SyncStreamItem.SyncActionRequestOperationData + */ +export type SyncStreamItem_SyncActionRequestOperationData = Message<"v1sync.SyncStreamItem.SyncActionRequestOperationData"> & { + /** + * @generated from field: repeated int64 op_ids = 1; + */ + opIds: bigint[]; +}; + +/** + * Describes the message v1sync.SyncStreamItem.SyncActionRequestOperationData. + * Use `create(SyncStreamItem_SyncActionRequestOperationDataSchema)` to create a new message. + */ +export const SyncStreamItem_SyncActionRequestOperationDataSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_v1sync_syncservice, 13, 8); /** * @generated from message v1sync.SyncStreamItem.SyncActionReceiveOperations @@ -631,7 +717,7 @@ export type SyncStreamItem_SyncActionReceiveOperations = Message<"v1sync.SyncStr * Use `create(SyncStreamItem_SyncActionReceiveOperationsSchema)` to create a new message. */ export const SyncStreamItem_SyncActionReceiveOperationsSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1sync_syncservice, 11, 8); + messageDesc(file_v1sync_syncservice, 13, 9); /** * @generated from message v1sync.SyncStreamItem.SyncActionRequestLog @@ -648,7 +734,7 @@ export type SyncStreamItem_SyncActionRequestLog = Message<"v1sync.SyncStreamItem * Use `create(SyncStreamItem_SyncActionRequestLogSchema)` to create a new message. */ export const SyncStreamItem_SyncActionRequestLogSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1sync_syncservice, 11, 9); + messageDesc(file_v1sync_syncservice, 13, 10); /** * @generated from message v1sync.SyncStreamItem.SyncActionReceiveLogData @@ -695,7 +781,7 @@ export type SyncStreamItem_SyncActionReceiveLogData = Message<"v1sync.SyncStream * Use `create(SyncStreamItem_SyncActionReceiveLogDataSchema)` to create a new message. */ export const SyncStreamItem_SyncActionReceiveLogDataSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1sync_syncservice, 11, 10); + messageDesc(file_v1sync_syncservice, 13, 11); /** * @generated from message v1sync.SyncStreamItem.SyncActionThrottle @@ -712,21 +798,21 @@ export type SyncStreamItem_SyncActionThrottle = Message<"v1sync.SyncStreamItem.S * Use `create(SyncStreamItem_SyncActionThrottleSchema)` to create a new message. */ export const SyncStreamItem_SyncActionThrottleSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1sync_syncservice, 11, 11); + messageDesc(file_v1sync_syncservice, 13, 12); /** * @generated from message v1sync.SyncStreamItem.SyncEstablishSharedSecret */ export type SyncStreamItem_SyncEstablishSharedSecret = Message<"v1sync.SyncStreamItem.SyncEstablishSharedSecret"> & { /** - * a one-time-use ed25519 public key with a matching unshared private key. Used to perform a key exchange. + * a one-time-use ECDSA public key with a matching unshared private key. Used to perform a key exchange. * See https://pkg.go.dev/crypto/ecdh#PrivateKey.ECDH . * * base64 encoded public key * - * @generated from field: string ed25519 = 2 [json_name = "ed25519pub"]; + * @generated from field: string ecdsa_pub = 2; */ - ed25519: string; + ecdsaPub: string; }; /** @@ -734,7 +820,7 @@ export type SyncStreamItem_SyncEstablishSharedSecret = Message<"v1sync.SyncStrea * Use `create(SyncStreamItem_SyncEstablishSharedSecretSchema)` to create a new message. */ export const SyncStreamItem_SyncEstablishSharedSecretSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_v1sync_syncservice, 11, 12); + messageDesc(file_v1sync_syncservice, 13, 13); /** * @generated from enum v1sync.SyncStreamItem.RepoConnectionState @@ -772,7 +858,7 @@ export enum SyncStreamItem_RepoConnectionState { * Describes the enum v1sync.SyncStreamItem.RepoConnectionState. */ export const SyncStreamItem_RepoConnectionStateSchema: GenEnum = /*@__PURE__*/ - enumDesc(file_v1sync_syncservice, 11, 0); + enumDesc(file_v1sync_syncservice, 13, 0); /** * @generated from enum v1sync.ConnectionState @@ -858,6 +944,16 @@ export const BackrestSyncStateService: GenService<{ input: typeof SyncStateStreamRequestSchema; output: typeof PeerStateSchema; }, + /** + * SetRemoteClientConfig pushes a config change to a connected authorized client peer. + * + * @generated from rpc v1sync.BackrestSyncStateService.SetRemoteClientConfig + */ + setRemoteClientConfig: { + methodKind: "unary"; + input: typeof SetRemoteClientConfigRequestSchema; + output: typeof SetRemoteClientConfigResponseSchema; + }, }> = /*@__PURE__*/ serviceDesc(file_v1sync_syncservice, 1); diff --git a/webui/messages/en.json b/webui/messages/en.json index b249e1b9..993024e8 100644 --- a/webui/messages/en.json +++ b/webui/messages/en.json @@ -83,9 +83,9 @@ "settings_multihost_identity_tooltip": "Multihost identity is used to identify this instance in a multihost setup. It is cryptographically derived from the public key of this instance.", "settings_multihost_identity_placeholder": "Unique multihost identity", "button_copy": "copy", - "settings_multihost_authorized_clients": "Authorized Clients", - "settings_multihost_authorized_clients_tooltip": "Authorized clients are other Backrest instances that are allowed to access repositories on this instance.", - "settings_multihost_authorized_client_item": "Authorized Client", + "settings_multihost_authorized_clients": "Trusted Peers", + "settings_multihost_authorized_clients_tooltip": "Trusted peers are other Backrest instances that are allowed to connect and access repositories on this instance. Peers are added automatically via pairing tokens.", + "settings_multihost_authorized_client_item": "Trusted Peer", "settings_multihost_known_hosts": "Known Hosts", "settings_multihost_known_hosts_tooltip": "Known hosts are other Backrest instances that this instance can connect to.", "settings_multihost_known_host_item": "Known Host", @@ -350,6 +350,9 @@ "add_repo_modal_field_check_policy_heading": "Check Policy", "add_repo_modal_field_check_policy_help": "Check operations verify the integrity of your restic repo. They should run infrequently, around once a month is a good starting point. Check will, at a minimum, verify the structure of your repo. Optionally check can also redownload and verify checksums for pack data, this is useful if your storage may be prone to flipped bits or silent data loss.", "add_repo_modal_field_check_policy_tooltip": "The schedule on which check operations are run for this repository. Restic check operations verify the integrity of your repository by scanning the on-disk structures that make up your backup data. Check can optionally be configured to re-read and re-hash data, this is slow and can be bandwidth expensive but will catch any bitrot or silent corruption in the storage medium.", + "add_repo_modal_field_forget_policy": "Forget Policy", + "add_repo_modal_field_forget_policy_help": "Optionally schedule a repo-wide forget that applies a single retention policy across all plans. When enabled, per-plan retention policies are disabled.", + "add_plan_modal_retention_managed_by_repo": "Retention is managed at the repository level via the repo's Forget Policy. Edit the repository to change retention settings.", "add_repo_modal_field_read_data": "Read Data %", "add_repo_modal_field_read_data_tooltip": "The percentage of pack data in this repository that will be read and verified. Higher values will use more bandwidth (e.g. 100% will re-read the entire repository on each check).", "add_repo_modal_field_command_modifiers": "Command Modifiers", diff --git a/webui/src/api/logState.ts b/webui/src/api/logState.ts index 5397af90..a5cff389 100644 --- a/webui/src/api/logState.ts +++ b/webui/src/api/logState.ts @@ -26,15 +26,18 @@ export const syncStateFromRequest = ( state: OplogState, req: GetOperationsRequest, onError?: (e: Error) => void, + onInitialLoad?: () => void, ): (() => void) => { getOperations(req) .then((res) => { state.add(...res); + onInitialLoad?.(); }) .catch((e) => { if (onError) { onError(e); } + onInitialLoad?.(); }); const cbHelper = (event?: OperationEvent, err?: Error) => { diff --git a/webui/src/app/App.tsx b/webui/src/app/App.tsx index d419adfc..28e94593 100644 --- a/webui/src/app/App.tsx +++ b/webui/src/app/App.tsx @@ -13,6 +13,7 @@ import { FiEdit2, FiMenu, FiHome, + FiChevronRight, } from "react-icons/fi"; import { @@ -55,7 +56,7 @@ import LogoSvg from "../../assets/logo.svg"; import { keyBy } from "../lib/util"; import { Code } from "@connectrpc/connect"; import { LoginModal } from "../features/auth/LoginModal"; -import { backrestService, setAuthToken } from "../api/client"; +import { backrestService, syncStateService, setAuthToken } from "../api/client"; import { useConfig } from "./provider"; import { shouldShowSettings } from "../state/configutil"; import { OpSelector, OpSelectorSchema } from "../../gen/ts/v1/service_pb"; @@ -70,7 +71,7 @@ import { } from "react-router-dom"; import { MainContentAreaTemplate } from "../components/layout/MainContentArea"; import { create } from "@bufbuild/protobuf"; -import { PeerState, RepoMetadata } from "../../gen/ts/v1sync/syncservice_pb"; +import { PeerState, PlanMetadata, RepoMetadata, SetRemoteClientConfigRequestSchema } from "../../gen/ts/v1sync/syncservice_pb"; import { useSyncStates } from "../state/peerStates"; import * as m from "../paraglide/messages"; import { Link } from "../components/ui/link"; @@ -133,7 +134,26 @@ const RepoViewContainer = () => { key={repoId} > {repo ? ( - + <> + {repo.originInstanceId && ( + + This is a remote repo from{" "} + {repo.originInstanceId}. Operation history only + includes backups run locally and may be incomplete. + + )} + + ) : ( )} @@ -176,6 +196,40 @@ const RemoteRepoViewContainer = () => { ); }; +const RemotePlanViewContainer = () => { + const { peerInstanceId, planId } = useParams(); + const peerStates = useSyncStates(); + + const peerState = peerStates.find( + (state) => state.peerInstanceId === peerInstanceId, + ); + const peerPlan = (peerState?.knownPlans || []).find((p) => p.id === planId); + + return ( + + {peerPlan ? ( + + ) : ( + + )} + + ); +}; + const PlanViewContainer = () => { const { planId } = useParams(); const [config, setConfig] = useConfig(); @@ -203,6 +257,175 @@ const PlanViewContainer = () => { ); }; +const PeerNavItem = ({ + icon, + typeLabel, + name, + active, + onClick, + onEdit, +}: { + icon: React.ReactNode; + typeLabel: string; + name: string; + active: boolean; + onClick: () => void; + onEdit?: (e: React.MouseEvent) => void; +}) => ( + + + {icon} + + + {typeLabel} + + + {name} + + {onEdit && ( + + { + e.stopPropagation(); + onEdit(e); + }} + > + + + + )} + +); + +const PeerInstanceSection = ({ + peerState, + sel, + remoteConfig, + isActive, + handleNav, + handleRemoteRepoEdit, + handleRemotePlanEdit, +}: { + peerState: PeerState; + sel: OpSelector; + remoteConfig: PeerState["remoteConfig"]; + isActive: (path: string) => boolean; + handleNav: (path: string) => void; + handleRemoteRepoEdit: (repo: Repo) => void; + handleRemotePlanEdit: (plan: Plan) => void; +}) => { + const [expanded, setExpanded] = useState(false); + + return ( + + setExpanded((prev) => !prev)} + > + + + + + + + + {peerState.peerInstanceId} + + + + {expanded && ( + <> + {peerState.knownRepos.map((repo: RepoMetadata) => { + const repoPath = `/peer/${peerState.peerInstanceId}/repo/${repo.id}`; + const editableRepo = remoteConfig?.repos?.find( + (r: Repo) => r.guid === repo.guid, + ); + return ( + + } + typeLabel="repo" + name={repo.id} + active={isActive(repoPath)} + onClick={() => handleNav(repoPath)} + onEdit={ + editableRepo + ? () => handleRemoteRepoEdit(editableRepo) + : undefined + } + /> + ); + })} + + {peerState.knownPlans.map((planMeta: PlanMetadata) => { + const planPath = `/peer/${peerState.peerInstanceId}/plan/${planMeta.id}`; + const editablePlan = remoteConfig?.plans?.find( + (p: Plan) => p.id === planMeta.id, + ); + return ( + + } + typeLabel="plan" + name={planMeta.id} + active={isActive(planPath)} + onClick={() => handleNav(planPath)} + onEdit={ + editablePlan + ? () => handleRemotePlanEdit(editablePlan) + : undefined + } + /> + ); + })} + + )} + + ); +}; + const SidebarPlanItem = React.memo( ({ plan, @@ -329,6 +552,17 @@ const SidebarRepoItem = React.memo( > {repo.id} + {repo.originInstanceId && ( + + {repo.originInstanceId} + + )} void }) => { if (!config) return null; const configPlans = config.plans || []; - const configRepos = config.repos || []; + const localRepos = (config.repos || []).filter( + (r) => !r.originInstanceId, + ); + const remoteRepos = (config.repos || []).filter( + (r) => !!r.originInstanceId, + ); return ( void }) => { > {m.app_menu_add_repo()} - {configRepos.map((repo) => ( + {localRepos.map((repo) => ( void }) => { }} /> ))} + {remoteRepos.length > 0 && ( + <> + + Remote + + {remoteRepos.map((repo) => ( + { + const { AddRepoModal } = + await import("../features/repositories/AddRepoModal"); + showModal(); + onClose?.(); + }} + /> + ))} + + )} @@ -506,54 +774,63 @@ const SidebarContent = ({ onClose }: { onClose?: () => void }) => { {peerStates.map((peerState) => { - // Logic to get peer config if needed, filtering handled by original logic - // Assuming we list all peerStates derived from hook const sel = create(OpSelectorSchema, { originalInstanceKeyid: peerState.peerKeyid, }); - return ( - - - - - - - {peerState.peerInstanceId} - - + const remoteConfig = peerState.remoteConfig; - {/* Nested Repos for Peer */} - {peerState.knownRepos.map((repo: RepoMetadata) => { - const repoPath = `/peer/${peerState.peerInstanceId}/repo/${repo.id}`; - const active = isActive(repoPath); - return ( - handleNav(repoPath)} - > - - - - - {repo.id} - - - ); - })} - + const handleRemoteRepoEdit = async (repo: Repo) => { + const { AddRepoModal } = + await import("../features/repositories/AddRepoModal"); + showModal( + { + await syncStateService.setRemoteClientConfig( + create(SetRemoteClientConfigRequestSchema, { + peerKeyid: peerState.peerKeyid, + repos: [updatedRepo], + }), + ); + alerts.success("Remote repo updated"); + }} + />, + ); + onClose?.(); + }; + + const handleRemotePlanEdit = async (plan: Plan) => { + const { AddPlanModal } = + await import("../features/plans/AddPlanModal"); + showModal( + { + await syncStateService.setRemoteClientConfig( + create(SetRemoteClientConfigRequestSchema, { + peerKeyid: peerState.peerKeyid, + plans: [updatedPlan], + }), + ); + alerts.success("Remote plan updated"); + }} + />, + ); + onClose?.(); + }; + + return ( + ); })} @@ -708,6 +985,10 @@ export const App: React.FC = () => { path="/peer/:peerInstanceId/repo/:repoId" element={} /> + } + /> = ({ !e.open && onClose()} + closeOnInteractOutside={false} size={rootSize} scrollBehavior="inside" > diff --git a/webui/src/components/common/RetentionPolicyView.tsx b/webui/src/components/common/RetentionPolicyView.tsx new file mode 100644 index 00000000..d5a118fc --- /dev/null +++ b/webui/src/components/common/RetentionPolicyView.tsx @@ -0,0 +1,189 @@ +import { Flex, Stack, Card, Grid } from "@chakra-ui/react"; +import { useMemo } from "react"; +import * as m from "../../paraglide/messages"; +import { Button } from "../ui/button"; +import { Tooltip } from "../ui/tooltip"; +import { NumberInputField } from "./NumberInput"; + +export const RetentionPolicyView = ({ + schedule, + retention, + onChange, +}: { + schedule?: any; + retention: any; + onChange: (v: any) => void; +}) => { + const determineMode = () => { + if (!retention) return "policyTimeBucketed"; + if (retention.policyKeepLastN) return "policyKeepLastN"; + if (retention.policyKeepAll) return "policyKeepAll"; + if (retention.policyTimeBucketed) return "policyTimeBucketed"; + return "policyTimeBucketed"; + }; + const mode = determineMode(); + + const handleModeChange = (newMode: string) => { + if (newMode === "policyKeepLastN") { + onChange({ policyKeepLastN: 30 }); + } else if (newMode === "policyTimeBucketed") { + onChange({ + policyTimeBucketed: { + yearly: 0, + monthly: 3, + weekly: 4, + daily: 7, + hourly: 24, + keepLastN: 0, + }, + }); + } else { + onChange({ policyKeepAll: true }); + } + }; + + const cronIsSubHourly = useMemo( + () => + schedule?.schedule?.value && + !/^\d+ /.test(schedule.schedule.value) && + schedule.schedule.case === "cron", + [schedule], + ); + + const updateRetentionField = (path: string[], val: any) => { + const next = { ...retention }; + let curr = next; + for (let i = 0; i < path.length - 1; i++) { + curr[path[i]] = curr[path[i]] ? { ...curr[path[i]] } : {}; + curr = curr[path[i]]; + } + curr[path[path.length - 1]] = val; + onChange(next); + }; + + return ( + + + + + {[ + { + value: "policyKeepLastN", + label: m.add_plan_modal_retention_policy_mode_count_label(), + tooltip: + m.add_plan_modal_retention_policy_keep_last_n_tooltip(), + }, + { + value: "policyTimeBucketed", + label: m.add_plan_modal_retention_policy_mode_time_label(), + tooltip: + m.add_plan_modal_retention_policy_time_bucketed_tooltip(), + }, + { + value: "policyKeepAll", + label: m.add_plan_modal_retention_policy_mode_none_label(), + tooltip: m.add_plan_modal_retention_policy_keep_all_tooltip(), + }, + ].map((option) => ( + + + + ))} + + + + + {mode === "policyKeepAll" && ( +

{m.add_plan_modal_retention_policy_keep_all_warning()}

+ )} + + {mode === "policyKeepLastN" && ( + + onChange({ ...schedule, policyKeepLastN: e.valueAsNumber }) + } + /> + )} + + {mode === "policyTimeBucketed" && ( + + + updateRetentionField( + ["policyTimeBucketed", "hourly"], + e.valueAsNumber, + ) + } + /> + + updateRetentionField( + ["policyTimeBucketed", "daily"], + e.valueAsNumber, + ) + } + /> + + updateRetentionField( + ["policyTimeBucketed", "weekly"], + e.valueAsNumber, + ) + } + /> + + updateRetentionField( + ["policyTimeBucketed", "monthly"], + e.valueAsNumber, + ) + } + /> + + updateRetentionField( + ["policyTimeBucketed", "yearly"], + e.valueAsNumber, + ) + } + /> + + updateRetentionField( + ["policyTimeBucketed", "keepLastN"], + e.valueAsNumber, + ) + } + /> + + )} +
+
+
+ ); +}; diff --git a/webui/src/components/common/SectionCard.tsx b/webui/src/components/common/SectionCard.tsx new file mode 100644 index 00000000..b60b50b3 --- /dev/null +++ b/webui/src/components/common/SectionCard.tsx @@ -0,0 +1,62 @@ +import React from "react"; +import { Box, Flex, Text } from "@chakra-ui/react"; +import { IconType } from "react-icons"; + +interface SectionCardProps { + id?: string; + icon?: React.ReactElement | IconType; + title: string; + description?: string; + children: React.ReactNode; + cardRef?: React.Ref; +} + +export const SectionCard: React.FC = ({ + id, + icon, + title, + description, + children, + cardRef, +}) => { + return ( + + + {icon && ( + + {React.isValidElement(icon) + ? icon + : React.createElement(icon as IconType, { size: 16 })} + + )} + + + {title} + + {description && ( + + {description} + + )} + + + {children} + + ); +}; diff --git a/webui/src/components/common/StatusPill.tsx b/webui/src/components/common/StatusPill.tsx new file mode 100644 index 00000000..d7b09eba --- /dev/null +++ b/webui/src/components/common/StatusPill.tsx @@ -0,0 +1,47 @@ +import React from "react"; +import { Box } from "@chakra-ui/react"; + +type PillTone = "neutral" | "ok" | "warn" | "error" | "info" | "mono"; + +interface StatusPillProps { + tone?: PillTone; + children: React.ReactNode; +} + +const toneStyles: Record = { + neutral: { bg: "gray.100", color: "gray.700" }, + ok: { bg: "green.100", color: "green.700" }, + warn: { bg: "orange.100", color: "orange.700" }, + error: { bg: "red.100", color: "red.700" }, + info: { bg: "blue.100", color: "blue.700" }, + mono: { bg: "gray.100", color: "gray.900" }, +}; + +export const StatusPill: React.FC = ({ + tone = "neutral", + children, +}) => { + const styles = toneStyles[tone]; + return ( + + {children} + + ); +}; diff --git a/webui/src/components/common/SyncStateIcon.tsx b/webui/src/components/common/SyncStateIcon.tsx index 53f8bdbb..c5ca8ab7 100644 --- a/webui/src/components/common/SyncStateIcon.tsx +++ b/webui/src/components/common/SyncStateIcon.tsx @@ -1,4 +1,3 @@ -import React, { useState } from "react"; import { PeerState, ConnectionState, @@ -13,7 +12,7 @@ import { FiLoader, FiHelpCircle, } from "react-icons/fi"; -import { Box, Spinner, Text } from "@chakra-ui/react"; +import { Box } from "@chakra-ui/react"; import { Tooltip } from "../ui/tooltip"; export const PeerStateConnectionStatusIcon = ({ @@ -133,8 +132,24 @@ export const PeerStateConnectionStatusIcon = ({ }; return ( - - + + {getStatusIcon()} diff --git a/webui/src/components/common/ToggleField.tsx b/webui/src/components/common/ToggleField.tsx new file mode 100644 index 00000000..94969158 --- /dev/null +++ b/webui/src/components/common/ToggleField.tsx @@ -0,0 +1,76 @@ +import React from "react"; +import { Box, Flex, Text } from "@chakra-ui/react"; + +interface ToggleFieldProps { + checked: boolean; + onChange: (checked: boolean) => void; + label: React.ReactNode; + hint?: string; + disabled?: boolean; +} + +export const ToggleField: React.FC = ({ + checked, + onChange, + label, + hint, + disabled = false, +}) => { + return ( + + {/* Track */} + { + e.preventDefault(); + if (!disabled) onChange(!checked); + }} + > + {/* Thumb */} + + + + + {label} + + {hint && ( + + {hint} + + )} + + !disabled && onChange(e.target.checked)} + style={{ display: "none" }} + /> + + ); +}; diff --git a/webui/src/components/common/TwoPaneModal.tsx b/webui/src/components/common/TwoPaneModal.tsx new file mode 100644 index 00000000..b4be3048 --- /dev/null +++ b/webui/src/components/common/TwoPaneModal.tsx @@ -0,0 +1,404 @@ +import React, { useCallback, useEffect, useRef, useState } from "react"; +import { Box, Flex, Text, Portal } from "@chakra-ui/react"; +import { + DialogBackdrop, + DialogContent, + DialogPositioner, + DialogRoot, +} from "@chakra-ui/react"; +import { Button } from "../ui/button"; +import { IconType } from "react-icons"; +import { + FiAlertCircle, + FiCheck, + FiLoader, + FiX, +} from "react-icons/fi"; + +// --- Section definition --- +export interface SectionDef { + id: string; + label: string; + icon: React.ReactElement | IconType; +} + +// --- TwoPaneModal props --- +interface TwoPaneModalProps { + isOpen: boolean; + onClose: () => void; + + // Header + title: string; + subtitle?: string; + headerIcon?: React.ReactElement; + headerExtra?: React.ReactNode; + + // Sections & nav + sections: SectionDef[]; + children: React.ReactNode; + + // Save bar + dirty?: boolean; + dirtyCount?: number; + errorCount?: number; + onSave?: () => void; + onDiscard?: () => void; + saving?: boolean; + saveDisabled?: boolean; + + // Footer override (if you don't want the default save bar) + footer?: React.ReactNode; + + width?: string | number; +} + +export const TwoPaneModal: React.FC = ({ + isOpen, + onClose, + title, + subtitle, + headerIcon, + headerExtra, + sections, + children, + dirty = false, + dirtyCount = 0, + errorCount = 0, + onSave, + onDiscard, + saving = false, + saveDisabled = false, + footer, + width = "900px", +}) => { + const scrollRef = useRef(null); + const sectionRefs = useRef>({}); + const [activeSection, setActiveSection] = useState(sections[0]?.id || ""); + + // Scroll-spy + useEffect(() => { + const root = scrollRef.current; + if (!root) return; + const onScroll = () => { + const top = root.scrollTop; + let current = sections[0]?.id || ""; + for (const s of sections) { + const el = sectionRefs.current[s.id]; + if (!el) continue; + if (el.offsetTop - 80 <= top) current = s.id; + } + setActiveSection(current); + }; + root.addEventListener("scroll", onScroll, { passive: true }); + onScroll(); + return () => root.removeEventListener("scroll", onScroll); + }, [sections]); + + const scrollTo = useCallback( + (id: string) => { + const el = sectionRefs.current[id]; + const root = scrollRef.current; + if (!el || !root) return; + root.scrollTo({ top: el.offsetTop - 56, behavior: "smooth" }); + setActiveSection(id); + }, + [], + ); + + // Provide ref-registration function to children via context + const registerRef = useCallback((id: string, el: HTMLElement | null) => { + sectionRefs.current[id] = el; + }, []); + + return ( + !e.open && onClose()} + closeOnInteractOutside={false} + size="xl" + > + + + + + {/* Header */} + + {headerIcon && ( + + {headerIcon} + + )} + + + + {title} + + {headerExtra} + + {subtitle && ( + + {subtitle} + + )} + + + + + + + {/* Two-pane body */} + + {/* Nav rail */} + + {sections.map((s) => { + const isActive = activeSection === s.id; + return ( + scrollTo(s.id)} + display="flex" + alignItems="center" + gap={2} + w="full" + py={1.5} + px={2.5} + bg={isActive ? "bg.muted" : "transparent"} + border={0} + borderRadius="sm" + fontSize="sm" + fontWeight={isActive ? "medium" : "normal"} + color="fg" + cursor="pointer" + textAlign="left" + mb={0.5} + _hover={{ bg: isActive ? "bg.muted" : "bg.emphasized" }} + > + + {React.isValidElement(s.icon) + ? s.icon + : React.createElement(s.icon as IconType, { + size: 14, + })} + + {s.label} + + ); + })} + + + {/* Scrolling content */} + + + {children} + + + + + + {/* Footer / save bar */} + {footer ? ( + + {footer} + + ) : ( + + + {dirty ? ( + <> + + + {dirtyCount} unsaved{" "} + {dirtyCount === 1 ? "change" : "changes"} + + {errorCount > 0 && ( + + + {errorCount} {errorCount === 1 ? "error" : "errors"} to + fix + + )} + + ) : ( + <> + + + + + All changes saved + + + )} + + + + + + )} + + + + + ); +}; + +// --- Context for child sections to register refs --- +interface TwoPaneContextValue { + registerRef: (id: string, el: HTMLElement | null) => void; +} + +const TwoPaneContext = React.createContext({ + registerRef: () => {}, +}); + +export const useTwoPaneRef = () => React.useContext(TwoPaneContext); + +// --- TwoPaneSection: wraps each section's content with ref registration --- +interface TwoPaneSectionProps { + id: string; + children: React.ReactNode; +} + +export const TwoPaneSection: React.FC = ({ + id, + children, +}) => { + const { registerRef } = useTwoPaneRef(); + const ref = useCallback( + (el: HTMLDivElement | null) => { + registerRef(id, el); + }, + [id, registerRef], + ); + + return ( + + {children} + + ); +}; diff --git a/webui/src/components/ui/tooltip.tsx b/webui/src/components/ui/tooltip.tsx index 109e0c2b..f9dbc2dc 100644 --- a/webui/src/components/ui/tooltip.tsx +++ b/webui/src/components/ui/tooltip.tsx @@ -7,6 +7,7 @@ export interface TooltipProps extends ChakraTooltip.RootProps { portalRef?: React.RefObject; content: React.ReactNode; contentProps?: ChakraTooltip.ContentProps; + positionerProps?: ChakraTooltip.PositionerProps; disabled?: boolean; } @@ -19,6 +20,7 @@ export const Tooltip = React.forwardRef( portalled, content, contentProps, + positionerProps, portalRef, ...rest } = props; @@ -31,7 +33,7 @@ export const Tooltip = React.forwardRef( {children} {/* @ts-ignore */} - + {showArrow && } {content} diff --git a/webui/src/features/dashboard/SummaryDashboard.tsx b/webui/src/features/dashboard/SummaryDashboard.tsx index 5908e343..293464c5 100644 --- a/webui/src/features/dashboard/SummaryDashboard.tsx +++ b/webui/src/features/dashboard/SummaryDashboard.tsx @@ -362,6 +362,7 @@ const MultihostSummary = ({ }: { multihostConfig: Multihost | null; }) => { + const [config] = useConfig(); const allPeerStates = useSyncStates(); const peerStates = useMemo(() => { const map = new Map(); @@ -371,6 +372,19 @@ const MultihostSummary = ({ return map; }, [allPeerStates]); + // Build a map of host instance ID -> repo IDs shared by that host (repos in local config with originInstanceId set) + const sharedReposByHost = useMemo(() => { + const map = new Map(); + for (const repo of config?.repos || []) { + if (repo.originInstanceId) { + const repos = map.get(repo.originInstanceId) || []; + repos.push(repo.id); + map.set(repo.originInstanceId, repos); + } + } + return map; + }, [config?.repos]); + const knownHostTiles: JSX.Element[] = []; for (const cfgPeer of multihostConfig?.knownHosts || []) { const peerState = peerStates.get(cfgPeer.keyid); @@ -378,7 +392,11 @@ const MultihostSummary = ({ continue; } knownHostTiles.push( - , + , ); } @@ -411,7 +429,13 @@ const MultihostSummary = ({ ); }; -const PeerStateTile = ({ peerState }: { peerState: PeerState }) => { +const PeerStateTile = ({ + peerState, + sharedRepoIds, +}: { + peerState: PeerState; + sharedRepoIds?: string[]; +}) => { const state = useState(1); useEffect(() => { // Force rerender every second to update the last heartbeat time @@ -449,6 +473,48 @@ const PeerStateTile = ({ peerState }: { peerState: PeerState }) => { /> } /> + {peerState.knownRepos.length > 0 && ( + + {peerState.knownRepos.map((repo) => ( + + {repo.id} + + ))} + + } + /> + )} + {sharedRepoIds && sharedRepoIds.length > 0 && ( + + {sharedRepoIds.map((repoId) => ( + + {repoId} + + ))} + + } + /> + )} diff --git a/webui/src/features/operations/OperationListView.tsx b/webui/src/features/operations/OperationListView.tsx index e1d76dc1..95312416 100644 --- a/webui/src/features/operations/OperationListView.tsx +++ b/webui/src/features/operations/OperationListView.tsx @@ -57,6 +57,7 @@ export const OperationListView = ({ return syncStateFromRequest(logState, req, (e) => { alerts.error("Failed to fetch operations: " + e.message); + }, () => { setLoading(false); }); }, [req ? toJsonString(GetOperationsRequestSchema, req) : ""]); diff --git a/webui/src/features/operations/OperationTreeView.tsx b/webui/src/features/operations/OperationTreeView.tsx index 259468cc..634b227d 100644 --- a/webui/src/features/operations/OperationTreeView.tsx +++ b/webui/src/features/operations/OperationTreeView.tsx @@ -146,6 +146,7 @@ export const OperationTreeView = ({ return syncStateFromRequest(logState, req, (err) => { alerts.error("API error: " + err.message); + }, () => { setLoading(false); }); }, [toJsonString(GetOperationsRequestSchema, req)]); diff --git a/webui/src/features/plans/AddPlanModal.tsx b/webui/src/features/plans/AddPlanModal.tsx index ec87491e..0a71ee03 100644 --- a/webui/src/features/plans/AddPlanModal.tsx +++ b/webui/src/features/plans/AddPlanModal.tsx @@ -2,30 +2,22 @@ import { Flex, Stack, Input, - Textarea, createListCollection, SelectContent, SelectItem, - SelectLabel, SelectRoot, SelectTrigger, SelectValueText, - IconButton, - Card, - Box, - HStack, Text as CText, - Grid, Code, } from "@chakra-ui/react"; -import { Checkbox } from "../../components/ui/checkbox"; import { AccordionItem, AccordionItemContent, AccordionItemTrigger, AccordionRoot, } from "../../components/ui/accordion"; -import React, { useEffect, useState, useMemo } from "react"; +import { useEffect, useState } from "react"; import { useShowModal } from "../../components/common/ModalManager"; import { ConfigSchema, @@ -33,65 +25,46 @@ import { RetentionPolicySchema, Schedule_Clock, type Plan, - type RetentionPolicy, - type Schedule, } from "../../../gen/ts/v1/config_pb"; -import { FiPlus as Plus, FiMinus as Minus, FiMenu } from "react-icons/fi"; -import { BsCalculator as Calculator } from "react-icons/bs"; +import { FiFileText, FiFolder, FiClock, FiArchive, FiSliders } from "react-icons/fi"; import { alerts, formatErrorAlert } from "../../components/common/Alerts"; import { namePattern } from "../../lib/util"; import { ConfirmButton } from "../../components/common/SpinButton"; import { useConfig } from "../../app/provider"; import { backrestService } from "../../api/client"; -import { - DndContext, - closestCenter, - KeyboardSensor, - PointerSensor, - useSensor, - useSensors, - DragEndEvent, -} from "@dnd-kit/core"; -import { - arrayMove, - SortableContext, - sortableKeyboardCoordinates, - verticalListSortingStrategy, - useSortable, -} from "@dnd-kit/sortable"; -import { CSS } from "@dnd-kit/utilities"; import { clone, create, equals, fromJson, toJson, - JsonValue, } from "@bufbuild/protobuf"; import * as m from "../../paraglide/messages"; -import { FormModal } from "../../components/common/FormModal"; import { Button } from "../../components/ui/button"; import { Field } from "../../components/ui/field"; -import { Tooltip } from "../../components/ui/tooltip"; -import { NumberInputField } from "../../components/common/NumberInput"; // Assuming I migrated this or will check +import { RetentionPolicyView } from "../../components/common/RetentionPolicyView"; import { ScheduleFormItem, ScheduleDefaultsDaily, } from "../../components/common/ScheduleFormItem"; -// Use the real implementation -import { URIAutocomplete } from "../../components/common/URIAutocomplete"; import { HooksFormList, hooksListTooltipText, } from "../../components/common/HooksFormList"; import { DynamicList } from "../../components/common/DynamicList"; +import { + TwoPaneModal, + TwoPaneSection, + type SectionDef, +} from "../../components/common/TwoPaneModal"; +import { SectionCard } from "../../components/common/SectionCard"; // Default Plan const planDefaults = create(PlanSchema, { schedule: { schedule: { case: "cron", - value: "0 * * * *", // every hour + value: "0 * * * *", }, clock: Schedule_Clock.LOCAL, }, @@ -107,19 +80,20 @@ const planDefaults = create(PlanSchema, { }, }); -export const AddPlanModal = ({ template }: { template: Plan | null }) => { +export const AddPlanModal = ({ template, onSaveOverride }: { template: Plan | null, onSaveOverride?: (plan: Plan) => Promise }) => { const [confirmLoading, setConfirmLoading] = useState(false); const showModal = useShowModal(); const [config, setConfig] = useConfig(); - // Local State + const selectedRepo = config?.repos.find((r) => r.id === (template?.repo || formData?.repo)); + const repoHasScheduledForget = !!selectedRepo?.forgetPolicy?.schedule; + const [formData, setFormData] = useState( template ? toJson(PlanSchema, template, { alwaysEmitImplicit: true }) : toJson(PlanSchema, planDefaults, { alwaysEmitImplicit: true }), ); - // Sync state with template prop useEffect(() => { setFormData( template @@ -128,13 +102,11 @@ export const AddPlanModal = ({ template }: { template: Plan | null }) => { ); }, [template]); - // Helper to update fields const updateField = (path: string[], value: any) => { setFormData((prev: any) => { const next = { ...prev }; let curr = next; for (let i = 0; i < path.length - 1; i++) { - // Create shallow copy of the next level if it exists, or new object if not curr[path[i]] = curr[path[i]] ? { ...curr[path[i]] } : {}; curr = curr[path[i]]; } @@ -181,7 +153,6 @@ export const AddPlanModal = ({ template }: { template: Plan | null }) => { const handleOk = async () => { setConfirmLoading(true); try { - // Validation if (!formData.id?.trim()) { throw new Error(m.add_plan_modal_validation_plan_name_required()); } @@ -201,7 +172,6 @@ export const AddPlanModal = ({ template }: { template: Plan | null }) => { throw new Error(m.add_plan_modal_validation_flag_pattern()); } - // Check retention for sub-hourly schedules const scheduleValue = formData.schedule?.schedule?.value; const isCron = formData.schedule?.schedule?.case === "cron"; const isSubHourly = @@ -221,7 +191,6 @@ export const AddPlanModal = ({ template }: { template: Plan | null }) => { ignoreUnknownFields: true, }); - // Clean up retention if empty (logic from original) if ( plan.retention && equals( @@ -233,6 +202,12 @@ export const AddPlanModal = ({ template }: { template: Plan | null }) => { delete plan.retention; } + if (onSaveOverride) { + await onSaveOverride(plan); + showModal(null); + return; + } + const configCopy = clone(ConfigSchema, config); if (template) { @@ -254,13 +229,53 @@ export const AddPlanModal = ({ template }: { template: Plan | null }) => { } }; - const repos = config?.repos || []; + const allRepos = config?.repos || []; + const localRepos = allRepos.filter((r) => !r.originInstanceId); + const remoteRepos = allRepos.filter((r) => !!r.originInstanceId); const repoOptions = createListCollection({ - items: repos.map((r) => ({ label: r.id, value: r.id })), + items: [ + ...localRepos.map((r) => ({ label: r.id, value: r.id })), + ...remoteRepos.map((r) => ({ + label: `${r.id} (from ${r.originInstanceId})`, + value: r.id, + })), + ], }); + const sections: SectionDef[] = [ + { id: "details", label: "Details", icon: }, + { id: "scope", label: "Scope", icon: }, + { id: "schedule", label: "Schedule", icon: }, + { id: "retention", label: "Retention", icon: }, + { id: "advanced", label: "Advanced", icon: }, + ]; + + const footer = ( + + + {template && ( + + {m.add_plan_modal_button_delete()} + + )} + + + ); + return ( - showModal(null)} title={ @@ -268,442 +283,248 @@ export const AddPlanModal = ({ template }: { template: Plan | null }) => { ? m.add_plan_modal_title_update() : m.add_plan_modal_title_add() } - size="large" - footer={ - - - {template && ( - - {m.add_plan_modal_button_delete()} - - )} - - - } + headerIcon={} + sections={sections} + footer={footer} > - - {/* Info Link */} -

- {m.add_plan_modal_see_guide_prefix()}{" "} - - {m.add_plan_modal_see_guide_link()} - {" "} - {m.add_plan_modal_see_guide_suffix()} -

+ {/* Details Section */} + + } + title={m.op_row_backup_details()} + description="Plan name and target repository." + > + + p.id === formData.id))) + } + errorText={ + !!formData.id && !namePattern.test(formData.id) + ? m.add_plan_modal_validation_plan_name_pattern() + : m.add_plan_modal_validation_plan_exists() + } + > + updateField(["id"], e.target.value)} + disabled={!!template} + placeholder={"plan" + ((config?.plans?.length || 0) + 1)} + /> + - {/* Plan Details */} -
- - - - p.id === formData.id))) - } - errorText={ - !!formData.id && !namePattern.test(formData.id) - ? m.add_plan_modal_validation_plan_name_pattern() - : m.add_plan_modal_validation_plan_exists() - } - > - updateField(["id"], e.target.value)} - disabled={!!template} - placeholder={"plan" + ((config?.plans?.length || 0) + 1)} - /> - + + + updateField(["repo"], e.value[0]) + } + disabled={!!template} + width="full" + > + {/* @ts-ignore */} + + {/* @ts-ignore */} + + + {/* @ts-ignore */} + + {localRepos.length > 0 && remoteRepos.length > 0 && ( + + Local + + )} + {repoOptions.items.slice(0, localRepos.length).map((item: any) => ( + + {item.label} + + ))} + {remoteRepos.length > 0 && ( + + Remote + + )} + {repoOptions.items.slice(localRepos.length).map((item: any) => ( + + {item.label} + + ))} + + + + + + - - - updateField(["repo"], e.value[0]) - } - disabled={!!template} - width="full" + {/* Scope Section */} + + } + title={m.settings_peer_permission_scopes()} + description="Directories and exclusion patterns." + > + + updateField(["paths"], items)} + required + autocompleteType="uri" + placeholder={m.add_plan_modal_field_paths()} + /> + + + updateField(["excludes"], items) + } + tooltip={ + <> + {m.add_plan_modal_field_excludes_tooltip_prefix()}{" "} + - {/* @ts-ignore */} - - {/* @ts-ignore */} - - - {/* @ts-ignore */} - - {repoOptions.items.map((item: any) => ( - // @ts-ignore - - {item.label} - - ))} - - - - - - -
+ {m.add_plan_modal_field_excludes_tooltip_link()} + {" "} + {m.add_plan_modal_field_excludes_tooltip_suffix()} + + } + placeholder={m.add_plan_modal_field_excludes()} + /> - {/* Scope */} -
- - - - updateField(["paths"], items)} - required - autocompleteType="uri" - placeholder={m.add_plan_modal_field_paths()} - /> + + updateField(["iexcludes"], items) + } + tooltip={ + <> + {m.add_plan_modal_field_iexcludes_tooltip_prefix()}{" "} + + {m.add_plan_modal_field_excludes_tooltip_link()} + {" "} + {m.add_plan_modal_field_excludes_tooltip_suffix()} + + } + placeholder={m.add_plan_modal_field_iexcludes()} + /> + + + - - updateField(["excludes"], items) - } - tooltip={ - <> - {m.add_plan_modal_field_excludes_tooltip_prefix()}{" "} - - {m.add_plan_modal_field_excludes_tooltip_link()} - {" "} - {m.add_plan_modal_field_excludes_tooltip_suffix()} - - } - placeholder={m.add_plan_modal_field_excludes()} - /> - - - updateField(["iexcludes"], items) - } - tooltip={ - <> - {m.add_plan_modal_field_iexcludes_tooltip_prefix()}{" "} - - {m.add_plan_modal_field_excludes_tooltip_link()} - {" "} - {m.add_plan_modal_field_excludes_tooltip_suffix()} - - } - placeholder={m.add_plan_modal_field_iexcludes()} - /> - - - -
- - {/* Schedule */} -
+ {/* Schedule Section */} + + } + title={m.add_plan_modal_field_schedule()} + description="When backups run automatically." + > updateField(["schedule"], v)} defaults={ScheduleDefaultsDaily} /> -
+
+
- {/* Retention Policy */} -
- updateField(["retention"], v)} - /> -
- - {/* Advanced */} -
- - - - - updateField(["backup_flags"], items) - } - tooltip={m.add_plan_modal_field_backup_flags_tooltip()} - placeholder="--flag" - autocompleteType="flag" - /> - - - updateField(["hooks"], v)} - /> - - - - -
- - {/* JSON Preview */} - - - - - {m.add_repo_modal_preview_json()} - - - - - {JSON.stringify(formData, null, 2)} - - - - -
-
- ); -}; - -const Section = ({ - title, - children, -}: { - title: string; - children: React.ReactNode; -}) => ( - - - {title} - - {children} - -); - -// Retention View -const RetentionPolicyView = ({ schedule, retention, onChange }: any) => { - // Mode determination - const determineMode = () => { - if (!retention) return "policyTimeBucketed"; - if (retention.policyKeepLastN) return "policyKeepLastN"; - if (retention.policyKeepAll) return "policyKeepAll"; - if (retention.policyTimeBucketed) return "policyTimeBucketed"; - return "policyTimeBucketed"; - }; - const mode = determineMode(); - - const handleModeChange = (newMode: string) => { - if (newMode === "policyKeepLastN") { - onChange({ policyKeepLastN: 30 }); - } else if (newMode === "policyTimeBucketed") { - onChange({ - policyTimeBucketed: { - yearly: 0, - monthly: 3, - weekly: 4, - daily: 7, - hourly: 24, - keepLastN: 0, - }, - }); - } else { - onChange({ policyKeepAll: true }); - } - }; - - // Derived values - const cronIsSubHourly = useMemo( - () => - schedule?.schedule?.value && - !/^\d+ /.test(schedule.schedule.value) && - schedule.schedule.case === "cron", - [schedule], - ); - - // Helpers to update nested retention fields - const updateRetentionField = (path: string[], val: any) => { - const next = { ...retention }; - let curr = next; - for (let i = 0; i < path.length - 1; i++) { - // Create shallow copy of the next level to ensure immutability - curr[path[i]] = curr[path[i]] ? { ...curr[path[i]] } : {}; - curr = curr[path[i]]; - } - curr[path[path.length - 1]] = val; - onChange(next); - }; - - return ( - - - - {/* Mode Selector */} - - {[ - { - value: "policyKeepLastN", - label: m.add_plan_modal_retention_policy_mode_count_label(), - tooltip: m.add_plan_modal_retention_policy_keep_last_n_tooltip() - }, - { - value: "policyTimeBucketed", - label: m.add_plan_modal_retention_policy_mode_time_label(), - tooltip: m.add_plan_modal_retention_policy_time_bucketed_tooltip() - }, - { - value: "policyKeepAll", - label: m.add_plan_modal_retention_policy_mode_none_label(), - tooltip: m.add_plan_modal_retention_policy_keep_all_tooltip() - }, - ].map((option) => ( - - - - ))} - - - - - {/* Mode Content */} - {mode === "policyKeepAll" && ( -

- {m.add_plan_modal_retention_policy_keep_all_warning()} -

- )} - - {mode === "policyKeepLastN" && ( - - onChange({ ...schedule, policyKeepLastN: e.valueAsNumber }) - } + {/* Retention Section */} + + } + title={m.add_plan_modal_retention_policy_label()} + description="How long to keep snapshots before forgetting them." + > + {repoHasScheduledForget ? ( + + {m.add_plan_modal_retention_managed_by_repo()} + + ) : ( + updateField(["retention"], v)} /> )} + + - {mode === "policyTimeBucketed" && ( - - - updateRetentionField( - ["policyTimeBucketed", "hourly"], - e.valueAsNumber, - ) - } + {/* Advanced Section */} + + } + title={m.add_plan_modal_advanced_label()} + description="Extra flags and notification hooks." + > + + + updateField(["backup_flags"], items) + } + tooltip={m.add_plan_modal_field_backup_flags_tooltip()} + placeholder="--flag" + autocompleteType="flag" + /> + + + updateField(["hooks"], v)} /> - - updateRetentionField( - ["policyTimeBucketed", "daily"], - e.valueAsNumber, - ) - } - /> - - updateRetentionField( - ["policyTimeBucketed", "weekly"], - e.valueAsNumber, - ) - } - /> - - updateRetentionField( - ["policyTimeBucketed", "monthly"], - e.valueAsNumber, - ) - } - /> - - updateRetentionField( - ["policyTimeBucketed", "yearly"], - e.valueAsNumber, - ) - } - /> - - updateRetentionField( - ["policyTimeBucketed", "keepLastN"], - e.valueAsNumber, - ) - } - /> - - )} -
-
-
+ + + + + + {/* JSON Preview */} + + + + + {m.add_repo_modal_preview_json()} + + + + + {JSON.stringify(formData, null, 2)} + + + + + ); }; + diff --git a/webui/src/features/repositories/AddRepoModal.tsx b/webui/src/features/repositories/AddRepoModal.tsx index 39c3da98..6af18b5b 100644 --- a/webui/src/features/repositories/AddRepoModal.tsx +++ b/webui/src/features/repositories/AddRepoModal.tsx @@ -2,28 +2,18 @@ import { Stack, Flex, Input, - Card, Text as CText, Grid, Code, Box, } from "@chakra-ui/react"; import { EnumSelector, EnumOption } from "../../components/common/EnumSelector"; -import { Checkbox } from "../../components/ui/checkbox"; -import { - AccordionItem, - AccordionItemContent, - AccordionItemTrigger, - AccordionRoot, -} from "../../components/ui/accordion"; import React, { useEffect, useRef, useState } from "react"; import { useShowModal } from "../../components/common/ModalManager"; import { CommandPrefix_CPUNiceLevel, - CommandPrefix_CPUNiceLevelSchema, CommandPrefix_IONiceLevel, - CommandPrefix_IONiceLevelSchema, Repo, RepoSchema, Schedule_Clock, @@ -31,27 +21,25 @@ import { import { AddRepoRequestSchema, CheckRepoExistsRequestSchema, - SetupSftpRequestSchema, } from "../../../gen/ts/v1/service_pb"; import { StringValueSchema } from "../../../gen/ts/types/value_pb"; import { URIAutocomplete } from "../../components/common/URIAutocomplete"; import { alerts, formatErrorAlert } from "../../components/common/Alerts"; import { namePattern } from "../../lib/util"; import { backrestService } from "../../api/client"; -import { ConfirmButton, SpinButton } from "../../components/common/SpinButton"; +import { ConfirmButton } from "../../components/common/SpinButton"; import { useConfig } from "../../app/provider"; import { ScheduleFormItem, ScheduleDefaultsInfrequent, + ScheduleDefaultsDaily, } from "../../components/common/ScheduleFormItem"; import { isWindows } from "../../state/buildcfg"; -import { create, fromJson, toJson, JsonValue } from "@bufbuild/protobuf"; +import { create, fromJson, toJson } from "@bufbuild/protobuf"; import * as m from "../../paraglide/messages"; -import { FormModal } from "../../components/common/FormModal"; import { Button } from "../../components/ui/button"; import { Field } from "../../components/ui/field"; import { PasswordInput } from "../../components/ui/password-input"; -import { Tooltip } from "../../components/ui/tooltip"; import { NumberInputField } from "../../components/common/NumberInput"; import { HooksFormList, @@ -68,6 +56,27 @@ import { DialogRoot, DialogTitle, } from "../../components/ui/dialog"; +import { + FiTag, + FiLink, + FiClock, + FiZap, + FiSliders, +} from "react-icons/fi"; +import { + TwoPaneModal, + TwoPaneSection, + type SectionDef, +} from "../../components/common/TwoPaneModal"; +import { SectionCard } from "../../components/common/SectionCard"; +import { ToggleField } from "../../components/common/ToggleField"; +import { RetentionPolicyView } from "../../components/common/RetentionPolicyView"; +import { + AccordionRoot, + AccordionItem, + AccordionItemTrigger, + AccordionItemContent, +} from "../../components/ui/accordion"; const repoDefaults = create(RepoSchema, { prunePolicy: { @@ -75,7 +84,7 @@ const repoDefaults = create(RepoSchema, { schedule: { schedule: { case: "cron", - value: "0 0 1 * *", // 1st of the month + value: "0 0 1 * *", }, clock: Schedule_Clock.LAST_RUN_TIME, }, @@ -84,7 +93,7 @@ const repoDefaults = create(RepoSchema, { schedule: { schedule: { case: "cron", - value: "0 0 1 * *", // 1st of the month + value: "0 0 1 * *", }, clock: Schedule_Clock.LAST_RUN_TIME, }, @@ -137,7 +146,6 @@ const SftpConfigSection = ({ try { if (!uri) return; - // Parse host and port from the SFTP URI const authority = uri.replace("sftp:", "").split("/")[0]; const hostPart = authority.includes("@") ? authority.split("@")[1] : authority; let host = hostPart; @@ -278,24 +286,22 @@ const SftpConfigSection = ({ ); }; -export const AddRepoModal = ({ template }: { template: Repo | null }) => { +export const AddRepoModal = ({ template, onSaveOverride }: { template: Repo | null, onSaveOverride?: (repo: Repo) => Promise }) => { const [confirmLoading, setConfirmLoading] = useState(false); const showModal = useShowModal(); const [config, setConfig] = useConfig(); + const isRemoteOrigin = !!template?.originInstanceId; - // Local state for form fields const [formData, setFormData] = useState( template ? toJson(RepoSchema, template, { alwaysEmitImplicit: true }) : toJson(RepoSchema, repoDefaults, { alwaysEmitImplicit: true }), ); - // SFTP specific state const [sftpIdentityFile, setSftpIdentityFile] = useState(""); const [sftpPort, setSftpPort] = useState(null); const [sftpKnownHostsPath, setSftpKnownHostsPath] = useState(""); - // Ref to read current flags without making them a useEffect dependency const flagsRef = useRef([]); const [confirmation, setConfirmation] = useState({ @@ -317,7 +323,6 @@ export const AddRepoModal = ({ template }: { template: Repo | null }) => { setSftpKnownHostsPath(""); if (template?.uri?.startsWith("sftp:")) { - // Populate SFTP fields by parsing the existing sftp.args flag const sftpArgsFlag = (template.flags || []).find( (f) => f.includes("sftp.args") || f.includes("sftp.command"), ); @@ -360,26 +365,20 @@ export const AddRepoModal = ({ template }: { template: Repo | null }) => { return curr; }; - // Keep flagsRef in sync with latest formData.flags so the SFTP effect can - // read the current value without flags being a reactive dependency. flagsRef.current = (formData.flags as string[]) || []; - // Keep sftp.args flag in sync with the SFTP config fields. useEffect(() => { const uri = getField(["uri"]); if (!uri?.startsWith("sftp:")) { return; } - // Read flags via ref so this effect does not re-run whenever the user - // edits the flags list (which would immediately erase empty rows). const currentFlags = flagsRef.current; const newFlags = currentFlags.filter( (f: string) => f && !f.includes("sftp.args") && !f.includes("sftp.command"), ); - // Always include -oBatchMode=yes; quote paths to handle spaces. let sftpArgs = "-oBatchMode=yes"; if (sftpIdentityFile) { @@ -411,8 +410,6 @@ export const AddRepoModal = ({ template }: { template: Repo | null }) => { sftpIdentityFile, sftpPort, sftpKnownHostsPath, - // flags intentionally omitted: flagsRef avoids a circular dep where any - // user edit to flags would re-trigger the effect and erase empty rows. ]); if (!config) return null; @@ -434,10 +431,8 @@ export const AddRepoModal = ({ template }: { template: Repo | null }) => { throw new Error(m.add_repo_modal_error_uri_required()); } - // Env and Password validation await envVarSetValidator(formData); - // Flags validation const flags = getField(["flags"]); if (flags && flags.some((f: string) => !/^\-\-?.*$/.test(f))) { throw new Error(m.add_repo_modal_error_flag_format()); @@ -478,6 +473,13 @@ export const AddRepoModal = ({ template }: { template: Repo | null }) => { ignoreUnknownFields: true, }); + if (onSaveOverride) { + await onSaveOverride(repo); + showModal(null); + alerts.success(m.add_repo_modal_success_updated({ uri: repo.uri })); + return; + } + const req = create(AddRepoRequestSchema, { repo: repo, }); @@ -524,7 +526,7 @@ export const AddRepoModal = ({ template }: { template: Repo | null }) => { }, }); } else { - throw e; // rethrow to be caught by the outer catch + throw e; } } } catch (e: any) { @@ -635,6 +637,49 @@ export const AddRepoModal = ({ template }: { template: Repo | null }) => { }, ]; + const sections: SectionDef[] = [ + { id: "identity", label: "Identity", icon: }, + { id: "connection", label: "Connection", icon: }, + { id: "scheduling", label: "Scheduling", icon: }, + { id: "hooks", label: "Hooks", icon: }, + { id: "advanced", label: "Advanced", icon: }, + ]; + + const footer = ( + + + {template && ( + + {m.add_plan_modal_button_delete()} + + )} + {!isRemoteOrigin && ( + <> + + + + )} + + ); + return ( <> { - showModal(null)} title={ @@ -666,240 +711,198 @@ export const AddRepoModal = ({ template }: { template: Repo | null }) => { ? m.add_repo_modal_title_edit() : m.add_repo_modal_title_add() } - size="large" - footer={ - - - {template && ( - - {m.add_plan_modal_button_delete()} - - )} - - - - } + headerIcon={} + sections={sections} + footer={footer} > - -

- {m.add_repo_modal_guide_text_p1()}{" "} - - {m.add_repo_modal_guide_link_text()} - {" "} - {m.add_repo_modal_guide_text_p2()}{" "} - - {m.add_repo_modal_guide_restic_link_text()} - {" "} - {m.add_repo_modal_guide_text_p3()} -

+ + {isRemoteOrigin && ( + + + This repository is managed by remote instance {template?.originInstanceId} and cannot be edited. You may delete it to remove the local copy. + + + )} -
- - - - + } + title="Identity" + description="Display name, identifiers, and unlock behaviour." + > + + r.id === getField(["id"]), + ))) + } + errorText={ + !!getField(["id"]) && !namePattern.test(getField(["id"])) + ? m.add_plan_modal_validation_plan_name_pattern() + : m.add_repo_modal_error_repo_exists() + } + > + ) => + updateField(["id"], e.target.value) } - required - invalid={ - !!getField(["id"]) && - (!namePattern.test(getField(["id"])) || - (!template && - !!config.repos.find( - (r) => r.id === getField(["id"]), - ))) - } - errorText={ - !!getField(["id"]) && !namePattern.test(getField(["id"])) - ? m.add_plan_modal_validation_plan_name_pattern() - : m.add_repo_modal_error_repo_exists() - } - > - ) => - updateField(["id"], e.target.value) - } - disabled={!!template} - placeholder={"repo" + ((config?.repos?.length || 0) + 1)} - /> - + disabled={!!template} + placeholder={"repo" + ((config?.repos?.length || 0) + 1)} + /> + - updateField(["autoUnlock"], v)} + label={m.add_repo_modal_field_auto_unlock()} + hint={m.add_repo_modal_field_auto_unlock_tooltip()} + /> + + updateField(["shared"], v)} + label="Shared" + hint="Automatically push this repo's configuration to all authorized clients with read permission." + /> + + + + + {/* Connection Section */} + + } + title="Connection" + description="Where the repo lives and how Backrest authenticates." + > + + + {m.add_repo_modal_field_uri_tooltip_title()} + +
  • {m.add_repo_modal_field_uri_tooltip_local()}
  • +
  • {m.add_repo_modal_field_uri_tooltip_s3()}
  • +
  • {m.add_repo_modal_field_uri_tooltip_sftp()}
  • +
  • + {m.add_repo_modal_field_uri_tooltip_see()}{" "} + + {m.add_repo_modal_field_uri_tooltip_restic_docs()} + {" "} + {m.add_repo_modal_field_uri_tooltip_info()} +
  • +
    + + } + required + > + updateField(["uri"], val)} + /> +
    + + {getField(["uri"])?.startsWith("sftp:") && ( + + )} + + - {m.add_repo_modal_field_uri_tooltip_title()} + {m.add_repo_modal_field_password_tooltip_intro()} -
  • {m.add_repo_modal_field_uri_tooltip_local()}
  • -
  • {m.add_repo_modal_field_uri_tooltip_s3()}
  • -
  • {m.add_repo_modal_field_uri_tooltip_sftp()}
  • - {m.add_repo_modal_field_uri_tooltip_see()}{" "} - - {m.add_repo_modal_field_uri_tooltip_restic_docs()} - {" "} - {m.add_repo_modal_field_uri_tooltip_info()} + {m.add_repo_modal_field_password_tooltip_entropy()} +
  • +
  • + {m.add_repo_modal_field_password_tooltip_env()} +
  • +
  • + {m.add_repo_modal_field_password_tooltip_generate()}
  • - } - required - > - updateField(["uri"], val)} - /> -
    + ) : undefined + } + > + + + ) => + updateField(["password"], e.target.value) + } + disabled={!!template} + /> + + {!template && ( + + )} + + - {/* SFTP Specific Fields */} - {getField(["uri"])?.startsWith("sftp:") && ( - - )} + updateField(["env"], items)} + tooltip={ + + + {m.add_repo_modal_field_env_vars_tooltip()} + + + + } + placeholder="KEY=VALUE" + /> +
    +
    +
    - - {m.add_repo_modal_field_password_tooltip_intro()} - -
  • - {m.add_repo_modal_field_password_tooltip_entropy()} -
  • -
  • - {m.add_repo_modal_field_password_tooltip_env()} -
  • -
  • - {m.add_repo_modal_field_password_tooltip_generate()} -
  • -
    - - ) : undefined - } - > - - - ) => - updateField(["password"], e.target.value) - } - disabled={!!template} - /> - - {!template && ( - - )} - -
    - - - updateField(["autoUnlock"], !!e.checked)} - > - {m.add_repo_modal_field_auto_unlock()} - - - {m.add_repo_modal_field_auto_unlock_tooltip()} - - - -
    -
    -
    - -
    - - - - updateField(["env"], items)} - tooltip={ - - - {m.add_repo_modal_field_env_vars_tooltip()} - - - - } - placeholder="KEY=VALUE" - /> - - - updateField(["flags"], items) - } - placeholder="--flag" - /> - - - -
    -
    - - + {/* Scheduling Section */} + + } + title="Prune Policy" + description={m.add_repo_modal_field_prune_policy_help()} + > { defaults={ScheduleDefaultsInfrequent} /> - - -
    + -
    - - + } + title="Check Policy" + description={m.add_repo_modal_field_check_policy_help()} + > { defaults={ScheduleDefaultsInfrequent} /> - - -
    + -
    - - + } + title={m.add_repo_modal_field_forget_policy()} + description={m.add_repo_modal_field_forget_policy_help()} + > + + + updateField(["forgetPolicy", "schedule"], val) + } + defaults={ScheduleDefaultsDaily} + /> + {(() => { + const sched = getField(["forgetPolicy", "schedule"]); + return sched && !sched.disabled ? ( + + updateField(["forgetPolicy", "retention"], v) + } + /> + ) : null; + })()} + + + + + {/* Hooks Section */} + + } + title="Hooks" + description="Run commands or send notifications on operation events." + > + + updateField(["hooks"], v)} + /> + + + + + {/* Advanced Section */} + + } + title="Advanced" + description="Command priority, extra flags, and raw restic options." + > {!isWindows && ( @@ -1005,19 +1054,17 @@ export const AddRepoModal = ({ template }: { template: Repo | null }) => { )} - - updateField(["hooks"], v)} - /> - - - - -
    + + updateField(["flags"], items) + } + placeholder="--flag" + /> +
    + + {/* JSON Preview */} @@ -1041,36 +1088,12 @@ export const AddRepoModal = ({ template }: { template: Repo | null }) => { - -
    +
    + ); }; -const Section = ({ - title, - help, - children, -}: { - title: React.ReactNode; - help?: React.ReactNode; - children: React.ReactNode; -}) => ( - - - - {title} - - {help && ( - - {help} - - )} - - {children} - -); - // Utils const cryptoRandomPassword = (): string => { let vals = crypto.getRandomValues(new Uint8Array(64)); @@ -1175,6 +1198,7 @@ const formatMissingEnvVars = (partialMatches: string[][]): string => { }) .join(" or "); }; + const EnvVarTooltip = ({ uri }: { uri: string }) => { if (!uri) return null; const scheme = uri.split(":")[0]; diff --git a/webui/src/features/settings/SettingsModal.tsx b/webui/src/features/settings/SettingsModal.tsx index 90b3e3aa..ee9cc367 100644 --- a/webui/src/features/settings/SettingsModal.tsx +++ b/webui/src/features/settings/SettingsModal.tsx @@ -2,82 +2,57 @@ import { Flex, Stack, Input, - Textarea, createListCollection, IconButton, - Card, - Heading, Text, Box, } from "@chakra-ui/react"; -import { Checkbox } from "../../components/ui/checkbox"; -import React, { useEffect, useState, useMemo } from "react"; +import { useState } from "react"; import { useShowModal } from "../../components/common/ModalManager"; import { FiPlus as Plus, FiMinus as Minus, FiCopy as Copy, + FiEye, + FiEyeOff, + FiSettings, + FiLock, + FiGlobe, } from "react-icons/fi"; import { formatErrorAlert, alerts } from "../../components/common/Alerts"; -import { namePattern } from "../../lib/util"; import { backrestService, authenticationService } from "../../api/client"; -import { clone, fromJson, toJson } from "@bufbuild/protobuf"; +import { clone, create, fromJson, toJson } from "@bufbuild/protobuf"; import { AuthSchema, - Config, ConfigSchema, UserSchema, MultihostSchema, Multihost_PeerSchema, - Multihost_Permission_Type, } from "../../../gen/ts/v1/config_pb"; -import { PeerState } from "../../../gen/ts/v1sync/syncservice_pb"; +import { GeneratePairingTokenRequestSchema } from "../../../gen/ts/v1/service_pb"; import { useSyncStates } from "../../state/peerStates"; import { PeerStateConnectionStatusIcon } from "../../components/common/SyncStateIcon"; import { isMultihostSyncEnabled } from "../../state/buildcfg"; import * as m from "../../paraglide/messages"; -import { FormModal } from "../../components/common/FormModal"; import { Button } from "../../components/ui/button"; import { Field } from "../../components/ui/field"; -import { Tooltip } from "../../components/ui/tooltip"; import { PasswordInput } from "../../components/ui/password-input"; -import { - AccordionRoot, - AccordionItem, - AccordionItemTrigger, - AccordionItemContent, -} from "../../components/ui/accordion"; import { SelectRoot, SelectTrigger, SelectContent, SelectItem, SelectValueText, - SelectLabel, } from "../../components/ui/select"; import { useConfig } from "../../app/provider"; - import { useUserPreferences } from "../../lib/userPreferences"; - -interface FormData { - auth: { - disabled?: boolean; - users: { - name: string; - passwordBcrypt: string; - needsBcrypt?: boolean; - isExisting?: boolean; - }[]; - }; - instance: string; - multihost: { - identity: { - keyid: string; - }; - knownHosts: any[]; - authorizedClients: any[]; - }; -} +import { + TwoPaneModal, + TwoPaneSection, + type SectionDef, +} from "../../components/common/TwoPaneModal"; +import { SectionCard } from "../../components/common/SectionCard"; +import { ToggleField } from "../../components/common/ToggleField"; export const SettingsModal = () => { const [config, setConfig] = useConfig(); @@ -86,6 +61,18 @@ export const SettingsModal = () => { const [confirmLoading, setConfirmLoading] = useState(false); const [reloadOnCancel, setReloadOnCancel] = useState(false); + // Pairing token generation state + const [showGenerateForm, setShowGenerateForm] = useState(false); + const [tokenLabel, setTokenLabel] = useState(""); + const [tokenTtl, setTokenTtl] = useState("3600"); + const [tokenMaxUses, setTokenMaxUses] = useState(1); + const [generatedToken, setGeneratedToken] = useState(""); + const [generateLoading, setGenerateLoading] = useState(false); + const [initialTokenCount] = useState( + () => config?.multihost?.pairingTokens?.length || 0, + ); + + // Local state initialized from config const [formData, setFormData] = useState(() => { if (!config) return null; @@ -113,6 +100,60 @@ export const SettingsModal = () => { }; }); + const [initialFormData, setInitialFormData] = useState(() => + JSON.stringify(formData), + ); + const dirty = JSON.stringify(formData) !== initialFormData; + + const ttlOptions = createListCollection({ + items: [ + { label: "15 minutes", value: "900" }, + { label: "1 hour", value: "3600" }, + { label: "24 hours", value: "86400" }, + { label: "7 days", value: "604800" }, + { label: "Forever", value: "0" }, + ], + }); + + const refreshConfig = async () => { + const freshConfig = await backrestService.getConfig({}); + setConfig(freshConfig); + }; + + const handleGenerateToken = async () => { + setGenerateLoading(true); + try { + const resp = await backrestService.generatePairingToken( + create(GeneratePairingTokenRequestSchema, { + label: tokenLabel, + ttlSeconds: BigInt(parseInt(tokenTtl)), + maxUses: tokenMaxUses, + }), + ); + setGeneratedToken(resp.token); + await refreshConfig(); + } catch (e: any) { + alerts.error(formatErrorAlert(e, "Failed to generate pairing token")); + } finally { + setGenerateLoading(false); + } + }; + + const handleRemovePairingToken = async (index: number) => { + if (!config) return; + try { + const newConfig = clone(ConfigSchema, config); + if (newConfig.multihost) { + newConfig.multihost.pairingTokens.splice(index, 1); + } + setConfig(await backrestService.setConfig(newConfig)); + alerts.success("Pairing token removed."); + } catch (e: any) { + alerts.error(formatErrorAlert(e, "Failed to remove pairing token")); + } + }; + + if (!config || !formData) return null; const updateField = (path: string[], value: any) => { @@ -142,7 +183,6 @@ export const SettingsModal = () => { try { const workingData = JSON.parse(JSON.stringify(formData)); - // Hash passwords if needed if (workingData.auth?.users) { for (const user of workingData.auth.users) { if (user.needsBcrypt) { @@ -156,7 +196,6 @@ export const SettingsModal = () => { } } - // Update configuration let newConfig = clone(ConfigSchema, config); newConfig.auth = fromJson(AuthSchema, workingData.auth, { ignoreUnknownFields: false, @@ -173,6 +212,7 @@ export const SettingsModal = () => { } setConfig(await backrestService.setConfig(newConfig)); + setInitialFormData(JSON.stringify(formData)); setReloadOnCancel(true); alerts.success(m.settings_success_updated()); } catch (e: any) { @@ -191,274 +231,436 @@ export const SettingsModal = () => { const users = getField(["auth", "users"]) || []; + const sections: SectionDef[] = [ + { id: "general", label: "General", icon: }, + { id: "auth", label: "Authentication", icon: }, + ...(isMultihostSyncEnabled + ? [ + { + id: "multihost", + label: "Multihost", + icon: , + } as SectionDef, + ] + : []), + ]; + return ( - - - - - } + headerIcon={} + sections={sections} + dirty={dirty} + dirtyCount={1} + onSave={handleOk} + onDiscard={() => { + setFormData(JSON.parse(initialFormData)); + }} + saving={confirmLoading} > - - {users.length === 0 && !getField(["auth", "disabled"]) && ( - - - {m.settings_initial_setup_title()} - {m.settings_initial_setup_message()} - - {m.settings_initial_setup_hint()} - - - - )} - - + } + title="General" + description="Instance identity and display preferences." > - updateField(["instance"], e.target.value)} - disabled={!!config.instance} - placeholder={m.settings_field_instance_id_placeholder()} - /> - + + {users.length === 0 && !getField(["auth", "disabled"]) && ( + + + {m.settings_initial_setup_title()} + {m.settings_initial_setup_message()} + + {m.settings_initial_setup_hint()} + + + + )} - {/* @ts-ignore */} - - {/* User Settings Section */} - {/* @ts-ignore */} - - - { - // @ts-ignore - m.settings_section_user_settings - ? m.settings_section_user_settings() - : "User Settings" - } - - - - - - - + + updateField(["instance"], e.target.value)} + disabled={!!config.instance} + placeholder={m.settings_field_instance_id_placeholder()} + /> + - {/* Authentication Section */} - {/* @ts-ignore */} - - - {m.settings_section_authentication()} - - - - - - updateField(["auth", "disabled"], !!e.checked) - } - > - {m.settings_auth_disable()} - - + + + + - - - {users.map((user: any, index: number) => ( - - { - const newUsers = [...users]; - newUsers[index].name = e.target.value; - updateField(["auth", "users"], newUsers); - }} - disabled={user.isExisting} - flex={1} - /> - { - const newUsers = [...users]; - newUsers[index].passwordBcrypt = e.target.value; - newUsers[index].needsBcrypt = true; - updateField(["auth", "users"], newUsers); - }} - rootProps={{ flex: 1 }} - /> - { - const newUsers = [...users]; - newUsers.splice(index, 1); - updateField(["auth", "users"], newUsers); - }} - > - - - - ))} - - - + + + + ))} + - - + + + + - {/* Multihost Section */} - {isMultihostSyncEnabled && ( - // @ts-ignore - - - {m.settings_section_multihost()} - - - - - {m.settings_multihost_intro()} - - - {m.settings_multihost_warning()} - + {/* Multihost Section */} + {isMultihostSyncEnabled && ( + + } + title={m.settings_section_multihost()} + description="Peer-to-peer synchronisation between Backrest instances." + > + + + {m.settings_multihost_intro()} + + + {m.settings_multihost_warning()} + - + + + + navigator.clipboard.writeText( + getField(["multihost", "identity", "keyid"]) || "", + ) + } + aria-label="Copy" > - + + + + + + + + } + title="Pairing Tokens" + description="Tokens that can be shared with other Backrest instances to simplify peering." + > + + {(config.multihost?.pairingTokens || []).map( + (token, index) => ( + = initialTokenCount} + generatedTokenString={ + index >= initialTokenCount ? generatedToken : undefined + } + config={config} + onRemove={() => handleRemovePairingToken(index)} + /> + ), + )} + + {showGenerateForm && ( + + + setTokenLabel(e.target.value)} + placeholder="e.g. laptop-2" + width="full" /> - + + + setTokenTtl(e.value[0]) + } + > + {/* @ts-ignore */} + + {/* @ts-ignore */} + + + {/* @ts-ignore */} + + {ttlOptions.items.map((o: any) => ( + + {o.label} + + ))} + + + + + + setTokenMaxUses(parseInt(e.target.value) || 0) + } + min={0} + width="full" + /> + + + + - + + + )} - - - updateField(["multihost", "authorizedClients"], items) - } - itemTypeName={m.settings_multihost_authorized_client_item()} - peerStates={peerStates} - config={config} - showInstanceUrl={false} - /> - + {!showGenerateForm && ( + + )} + + - - - updateField(["multihost", "knownHosts"], items) - } - itemTypeName={m.settings_multihost_known_host_item()} - peerStates={peerStates} - config={config} - showInstanceUrl={true} - /> - - - - - )} + } + title={m.settings_multihost_authorized_clients()} + description={m.settings_multihost_authorized_clients_tooltip()} + > + + updateField(["multihost", "authorizedClients"], items) + } + peerStates={peerStates} + config={config} + showInstanceUrl={false} + /> + - {/* Preview Section */} - {/* @ts-ignore */} - - - {m.settings_section_preview()} - - - - {JSON.stringify(formData, null, 2)} - - - - - - + } + title={m.settings_multihost_known_hosts()} + description={m.settings_multihost_known_hosts_tooltip()} + > + + updateField(["multihost", "knownHosts"], items) + } + peerStates={peerStates} + config={config} + /> + + + )} + ); }; -// --- Peer Sub-components --- +// --- Pairing Token Item --- -const PeerFormList = ({ +const PairingTokenItem = ({ + token, + isNew, + generatedTokenString, + config, + onRemove, +}: { + token: any; + isNew: boolean; + generatedTokenString?: string; + config: any; + onRemove: () => void; +}) => { + const [showToken, setShowToken] = useState(isNew); + + // Build the full token string: :# + const fullTokenString = + generatedTokenString || + `${config.multihost?.identity?.keyid || ""}:${token.secret || ""}#${config.instance || ""}`; + + const isExpired = + token.expiresAtUnix > 0n && + token.expiresAtUnix < BigInt(Math.floor(Date.now() / 1000)); + const usesText = + token.maxUses === 0 + ? `${token.uses} uses (unlimited)` + : `${token.uses}/${token.maxUses} uses`; + const expiryText = + token.expiresAtUnix === 0n + ? "Never expires" + : isExpired + ? `Expired ${new Date(Number(token.expiresAtUnix) * 1000).toLocaleString()}` + : `Expires ${new Date(Number(token.expiresAtUnix) * 1000).toLocaleString()}`; + + return ( + + + + + {token.label || "(no label)"} + + + {expiryText} -- {usesText} + + + + setShowToken(!showToken)} + aria-label={showToken ? "Hide token" : "Show token"} + > + {showToken ? : } + + + + + + + {showToken && ( + + + navigator.clipboard.writeText(fullTokenString)} + aria-label="Copy token" + > + + + + )} + + ); +}; + +// --- Known Hosts List (with integrated pairing) --- + +const KnownHostsList = ({ items, onUpdate, - itemTypeName, peerStates, config, - showInstanceUrl, }: any) => { - const handleAdd = () => { - onUpdate([ - ...items, - { instanceId: "", keyId: "", instanceUrl: "", permissions: [] }, - ]); - }; + const [showAddForm, setShowAddForm] = useState(false); + const [pairToken, setPairToken] = useState(""); + const [pairInstanceUrl, setPairInstanceUrl] = useState(""); const handleRemove = (index: number) => { const next = [...items]; @@ -472,8 +674,179 @@ const PeerFormList = ({ onUpdate(next); }; + const handleAdd = () => { + try { + if (!pairToken.trim()) { + onUpdate([ + ...items, + { + instanceId: "", + keyId: "", + instanceUrl: pairInstanceUrl, + permissions: [ + { + type: "PERMISSION_READ_OPERATIONS", + scopes: ["*"], + }, + { + type: "PERMISSION_RECEIVE_SHARED_REPOS", + }, + ], + }, + ]); + setShowAddForm(false); + setPairToken(""); + setPairInstanceUrl(""); + return; + } + + const hashIdx = pairToken.indexOf("#"); + const colonIdx = pairToken.indexOf(":"); + if (hashIdx === -1 || colonIdx === -1 || colonIdx > hashIdx) { + throw new Error( + 'Invalid token format. Expected ":#"', + ); + } + const keyId = pairToken.substring(0, colonIdx); + const secret = pairToken.substring(colonIdx + 1, hashIdx); + const instanceId = pairToken.substring(hashIdx + 1); + + if (!keyId || !secret || !instanceId) { + throw new Error("Token is missing required fields"); + } + if (!pairInstanceUrl) { + throw new Error("Instance URL is required"); + } + + onUpdate([ + ...items, + { + instanceId, + keyId, + instanceUrl: pairInstanceUrl, + initialPairingSecret: secret, + permissions: [ + { + type: "PERMISSION_READ_OPERATIONS", + scopes: ["*"], + }, + { + type: "PERMISSION_RECEIVE_SHARED_REPOS", + }, + ], + }, + ]); + + setPairToken(""); + setPairInstanceUrl(""); + setShowAddForm(false); + alerts.success("Server added to known hosts. Save settings to apply."); + } catch (e: any) { + alerts.error(formatErrorAlert(e, "Failed to add known host")); + } + }; + return ( - + + {items.map((item: any, index: number) => ( + handleItemUpdate(index, val)} + onRemove={() => handleRemove(index)} + peerStates={peerStates} + showInstanceUrl={true} + config={config} + /> + ))} + + {showAddForm ? ( + + + + Paste a pairing token from another Backrest server, or leave blank + to configure manually. + + + setPairToken(e.target.value)} + placeholder=':#' + width="full" + /> + + + setPairInstanceUrl(e.target.value)} + placeholder="e.g. http://server:9898" + width="full" + /> + + + + + + + + ) : ( + + )} + + ); +}; + +// --- Peer Sub-components --- + +const PeerFormList = ({ + items, + onUpdate, + peerStates, + config, + showInstanceUrl, +}: any) => { + const handleRemove = (index: number) => { + const next = [...items]; + next.splice(index, 1); + onUpdate(next); + }; + + const handleItemUpdate = (index: number, val: any) => { + const next = [...items]; + next[index] = val; + onUpdate(next); + }; + + return ( + + {items.length === 0 && ( + + No trusted peers yet. Generate a pairing token above and share it with + another instance to get started. + + )} {items.map((item: any, index: number) => ( ))} - ); }; @@ -512,21 +879,9 @@ const PeerFormListItem = ({ }; return ( - - - {peerState && } - - - - - + - + + + {peerState && ( + + )} + + + + {showInstanceUrl && ( @@ -553,8 +921,6 @@ const PeerFormListItem = ({ )} - {/* Permissions (Only for known hosts logic in original? No, original had isKnownHost? logic) */} - {/* PeerPermissionsTile logic */} updateItem("permissions", perms)} @@ -576,16 +942,20 @@ const PeerPermissionsTile = ({ permissions, onUpdate, config }: any) => { ], }); + // Permission type values must match what toJson produces for enum fields (string names, not numbers). const permissionTypeOptions = createListCollection({ items: [ { label: m.settings_permission_edit_repo(), - value: - Multihost_Permission_Type.PERMISSION_READ_WRITE_CONFIG.toString(), + value: "PERMISSION_READ_WRITE_CONFIG", }, { label: m.settings_permission_read_ops(), - value: Multihost_Permission_Type.PERMISSION_READ_OPERATIONS.toString(), + value: "PERMISSION_READ_OPERATIONS", + }, + { + label: "Receive shared repos", + value: "PERMISSION_RECEIVE_SHARED_REPOS", }, ], }); @@ -594,7 +964,7 @@ const PeerPermissionsTile = ({ permissions, onUpdate, config }: any) => { onUpdate([ ...permissions, { - type: Multihost_Permission_Type.PERMISSION_READ_OPERATIONS, + type: "PERMISSION_READ_OPERATIONS", scopes: ["*"], }, ]); @@ -632,7 +1002,7 @@ const PeerPermissionsTile = ({ permissions, onUpdate, config }: any) => { collection={permissionTypeOptions} value={[perm.type.toString()]} onValueChange={(e: any) => - handleUpdate(index, "type", parseInt(e.value[0])) + handleUpdate(index, "type", e.value[0]) } > {/* @ts-ignore */} @@ -643,7 +1013,7 @@ const PeerPermissionsTile = ({ permissions, onUpdate, config }: any) => { /> {/* @ts-ignore */} - + {permissionTypeOptions.items.map((o: any) => ( {o.label} @@ -653,32 +1023,34 @@ const PeerPermissionsTile = ({ permissions, onUpdate, config }: any) => { - - - handleUpdate(index, "scopes", e.value) - } - > - {/* @ts-ignore */} - + {perm.type !== "PERMISSION_RECEIVE_SHARED_REPOS" && ( + + + handleUpdate(index, "scopes", e.value) + } + > {/* @ts-ignore */} - - - {/* @ts-ignore */} - - {repoOptions.items.map((o: any) => ( - - {o.label} - - ))} - - - + + {/* @ts-ignore */} + + + {/* @ts-ignore */} + + {repoOptions.items.map((o: any) => ( + + {o.label} + + ))} + + + + )} { ); }; -// Mock Alert component if needed or use toast const Alert = ({ status, children }: any) => ( ( ); // Feature flags -export const isMultihostSyncEnabled = features.has("multihost-sync"); +export const isMultihostSyncEnabled = true;