mirror of
https://github.com/garethgeorge/backrest.git
synced 2025-12-13 09:15:39 +00:00
feat: implement forget operation
This commit is contained in:
6
.github/workflows/build-and-test.yml
vendored
6
.github/workflows/build-and-test.yml
vendored
@@ -26,11 +26,11 @@ jobs:
|
||||
with:
|
||||
node-version: "20"
|
||||
|
||||
- name: Build WebUI
|
||||
run: cd webui && npm install && npm run build
|
||||
- name: Install Deps
|
||||
run: ./hack/install-deps.sh
|
||||
|
||||
- name: Build
|
||||
run: go build -v ./...
|
||||
run: ./hack/build.sh
|
||||
|
||||
- name: Test
|
||||
run: PATH=$(pwd):$PATH go test ./...
|
||||
|
||||
8
.github/workflows/release.yml
vendored
8
.github/workflows/release.yml
vendored
@@ -24,11 +24,11 @@ jobs:
|
||||
with:
|
||||
node-version: "20"
|
||||
|
||||
- name: Build WebUI
|
||||
run: cd webui && npm install && npm run build
|
||||
- name: Install Deps
|
||||
run: ./hack/install-deps.sh
|
||||
|
||||
- name: Build Binary
|
||||
run: go build .
|
||||
- name: Build
|
||||
run: ./hack/build.sh
|
||||
|
||||
- name: Rename Files
|
||||
run: |
|
||||
|
||||
@@ -274,6 +274,7 @@ type RetentionPolicy struct {
|
||||
KeepMonthly int32 `protobuf:"varint,6,opt,name=keep_monthly,json=keepMonthly,proto3" json:"keep_monthly,omitempty"` // keep the last n monthly snapshots.
|
||||
KeepYearly int32 `protobuf:"varint,7,opt,name=keep_yearly,json=keepYearly,proto3" json:"keep_yearly,omitempty"` // keep the last n yearly snapshots.
|
||||
KeepWithinDuration string `protobuf:"bytes,8,opt,name=keep_within_duration,json=keepWithinDuration,proto3" json:"keep_within_duration,omitempty"` // keep snapshots within a duration e.g. 1y2m3d4h5m6s
|
||||
Prune bool `protobuf:"varint,9,opt,name=prune,proto3" json:"prune,omitempty"` // prune snapshots after forget.
|
||||
}
|
||||
|
||||
func (x *RetentionPolicy) Reset() {
|
||||
@@ -364,6 +365,13 @@ func (x *RetentionPolicy) GetKeepWithinDuration() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *RetentionPolicy) GetPrune() bool {
|
||||
if x != nil {
|
||||
return x.Prune
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var File_v1_config_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_v1_config_proto_rawDesc = []byte{
|
||||
@@ -392,7 +400,7 @@ var file_v1_config_proto_rawDesc = []byte{
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x72, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x09, 0x72, 0x65,
|
||||
0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69,
|
||||
0x63, 0x79, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb2, 0x02,
|
||||
0x63, 0x79, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xc8, 0x02,
|
||||
0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63,
|
||||
0x79, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x75, 0x6e, 0x75, 0x73, 0x65, 0x64, 0x5f,
|
||||
0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x78,
|
||||
@@ -412,10 +420,11 @@ var file_v1_config_proto_rawDesc = []byte{
|
||||
0x12, 0x30, 0x0a, 0x14, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f,
|
||||
0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12,
|
||||
0x6b, 0x65, 0x65, 0x70, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x67, 0x61, 0x72, 0x65, 0x74, 0x68, 0x67, 0x65, 0x6f, 0x72, 0x67, 0x65, 0x2f, 0x72, 0x65,
|
||||
0x73, 0x74, 0x69, 0x63, 0x75, 0x69, 0x2f, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
|
||||
0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x72, 0x75, 0x6e, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28,
|
||||
0x08, 0x52, 0x05, 0x70, 0x72, 0x75, 0x6e, 0x65, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x61, 0x72, 0x65, 0x74, 0x68, 0x67, 0x65, 0x6f,
|
||||
0x72, 0x67, 0x65, 0x2f, 0x72, 0x65, 0x73, 0x74, 0x69, 0x63, 0x75, 0x69, 0x2f, 0x67, 0x6f, 0x2f,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -202,6 +202,8 @@ type Operation struct {
|
||||
//
|
||||
// *Operation_OperationBackup
|
||||
// *Operation_OperationIndexSnapshot
|
||||
// *Operation_OperationForget
|
||||
// *Operation_OperationPrune
|
||||
Op isOperation_Op `protobuf_oneof:"op"`
|
||||
}
|
||||
|
||||
@@ -314,6 +316,20 @@ func (x *Operation) GetOperationIndexSnapshot() *OperationIndexSnapshot {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Operation) GetOperationForget() *OperationForget {
|
||||
if x, ok := x.GetOp().(*Operation_OperationForget); ok {
|
||||
return x.OperationForget
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Operation) GetOperationPrune() *OperationPrune {
|
||||
if x, ok := x.GetOp().(*Operation_OperationPrune); ok {
|
||||
return x.OperationPrune
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type isOperation_Op interface {
|
||||
isOperation_Op()
|
||||
}
|
||||
@@ -326,10 +342,22 @@ type Operation_OperationIndexSnapshot struct {
|
||||
OperationIndexSnapshot *OperationIndexSnapshot `protobuf:"bytes,101,opt,name=operation_index_snapshot,json=operationIndexSnapshot,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Operation_OperationForget struct {
|
||||
OperationForget *OperationForget `protobuf:"bytes,102,opt,name=operation_forget,json=operationForget,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Operation_OperationPrune struct {
|
||||
OperationPrune *OperationPrune `protobuf:"bytes,103,opt,name=operation_prune,json=operationPrune,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*Operation_OperationBackup) isOperation_Op() {}
|
||||
|
||||
func (*Operation_OperationIndexSnapshot) isOperation_Op() {}
|
||||
|
||||
func (*Operation_OperationForget) isOperation_Op() {}
|
||||
|
||||
func (*Operation_OperationPrune) isOperation_Op() {}
|
||||
|
||||
// OperationEvent is used in the wireformat to stream operation changes to clients
|
||||
type OperationEvent struct {
|
||||
state protoimpl.MessageState
|
||||
@@ -481,15 +509,13 @@ func (x *OperationIndexSnapshot) GetSnapshot() *ResticSnapshot {
|
||||
return nil
|
||||
}
|
||||
|
||||
// OperationForget tracks a forget operation and may additionally track prune output if a prune was run.
|
||||
// OperationForget tracks a forget operation.
|
||||
type OperationForget struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Forget []*ResticSnapshot `protobuf:"bytes,1,rep,name=forget,proto3" json:"forget,omitempty"`
|
||||
Pruned bool `protobuf:"varint,2,opt,name=pruned,proto3" json:"pruned,omitempty"`
|
||||
PruneOutput string `protobuf:"bytes,3,opt,name=prune_output,json=pruneOutput,proto3" json:"prune_output,omitempty"`
|
||||
}
|
||||
|
||||
func (x *OperationForget) Reset() {
|
||||
@@ -531,16 +557,50 @@ func (x *OperationForget) GetForget() []*ResticSnapshot {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *OperationForget) GetPruned() bool {
|
||||
if x != nil {
|
||||
return x.Pruned
|
||||
}
|
||||
return false
|
||||
// OperationPrune tracks a prune operation.
|
||||
type OperationPrune struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Output string `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"`
|
||||
}
|
||||
|
||||
func (x *OperationForget) GetPruneOutput() string {
|
||||
func (x *OperationPrune) Reset() {
|
||||
*x = OperationPrune{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_v1_operations_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *OperationPrune) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*OperationPrune) ProtoMessage() {}
|
||||
|
||||
func (x *OperationPrune) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_v1_operations_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use OperationPrune.ProtoReflect.Descriptor instead.
|
||||
func (*OperationPrune) Descriptor() ([]byte, []int) {
|
||||
return file_v1_operations_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *OperationPrune) GetOutput() string {
|
||||
if x != nil {
|
||||
return x.PruneOutput
|
||||
return x.Output
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -554,7 +614,7 @@ var file_v1_operations_proto_rawDesc = []byte{
|
||||
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x0a, 0x6f,
|
||||
0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
|
||||
0x0d, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a,
|
||||
0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xba, 0x03, 0x0a, 0x09, 0x4f,
|
||||
0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xbb, 0x04, 0x0a, 0x09, 0x4f,
|
||||
0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x72, 0x65, 0x70, 0x6f,
|
||||
0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x70, 0x6f, 0x49,
|
||||
@@ -582,30 +642,37 @@ var file_v1_operations_proto_rawDesc = []byte{
|
||||
0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x53,
|
||||
0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x48, 0x00, 0x52, 0x16, 0x6f, 0x70, 0x65, 0x72, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f,
|
||||
0x74, 0x42, 0x04, 0x0a, 0x02, 0x6f, 0x70, 0x22, 0x69, 0x0a, 0x0e, 0x4f, 0x70, 0x65, 0x72, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x04, 0x74, 0x79, 0x70,
|
||||
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65,
|
||||
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52,
|
||||
0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70,
|
||||
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x22, 0x4b, 0x0a, 0x0f, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42,
|
||||
0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x38, 0x0a, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x74,
|
||||
0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x45, 0x6e,
|
||||
0x74, 0x72, 0x79, 0x52, 0x0a, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22,
|
||||
0x48, 0x0a, 0x16, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x64, 0x65,
|
||||
0x78, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x2e, 0x0a, 0x08, 0x73, 0x6e, 0x61,
|
||||
0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x52, 0x65, 0x73, 0x74, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52,
|
||||
0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x78, 0x0a, 0x0f, 0x4f, 0x70, 0x65,
|
||||
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x67, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x06,
|
||||
0x66, 0x6f, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x76,
|
||||
0x74, 0x12, 0x40, 0x0a, 0x10, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66,
|
||||
0x6f, 0x72, 0x67, 0x65, 0x74, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x67, 0x65, 0x74,
|
||||
0x48, 0x00, 0x52, 0x0f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72,
|
||||
0x67, 0x65, 0x74, 0x12, 0x3d, 0x0a, 0x0f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x5f, 0x70, 0x72, 0x75, 0x6e, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x75, 0x6e, 0x65,
|
||||
0x48, 0x00, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x75,
|
||||
0x6e, 0x65, 0x42, 0x04, 0x0a, 0x02, 0x6f, 0x70, 0x22, 0x69, 0x0a, 0x0e, 0x4f, 0x70, 0x65, 0x72,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x04, 0x74, 0x79,
|
||||
0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70,
|
||||
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65,
|
||||
0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x76, 0x31, 0x2e, 0x4f,
|
||||
0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x22, 0x4b, 0x0a, 0x0f, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x38, 0x0a, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73,
|
||||
0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x45,
|
||||
0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x22, 0x48, 0x0a, 0x16, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x64,
|
||||
0x65, 0x78, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x2e, 0x0a, 0x08, 0x73, 0x6e,
|
||||
0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74,
|
||||
0x52, 0x06, 0x66, 0x6f, 0x72, 0x67, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x75, 0x6e,
|
||||
0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x72, 0x75, 0x6e, 0x65, 0x64,
|
||||
0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x75, 0x6e, 0x65, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x75, 0x6e, 0x65, 0x4f, 0x75, 0x74,
|
||||
0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x3d, 0x0a, 0x0f, 0x4f, 0x70,
|
||||
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x67, 0x65, 0x74, 0x12, 0x2a, 0x0a,
|
||||
0x06, 0x66, 0x6f, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f,
|
||||
0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x67, 0x65, 0x74, 0x22, 0x28, 0x0a, 0x0e, 0x4f, 0x70, 0x65,
|
||||
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x75, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f,
|
||||
0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x75, 0x74,
|
||||
0x70, 0x75, 0x74, 0x2a, 0x4d, 0x0a, 0x12, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x45, 0x56, 0x45,
|
||||
0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d,
|
||||
@@ -641,7 +708,7 @@ func file_v1_operations_proto_rawDescGZIP() []byte {
|
||||
}
|
||||
|
||||
var file_v1_operations_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
|
||||
var file_v1_operations_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
||||
var file_v1_operations_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
|
||||
var file_v1_operations_proto_goTypes = []interface{}{
|
||||
(OperationEventType)(0), // 0: v1.OperationEventType
|
||||
(OperationStatus)(0), // 1: v1.OperationStatus
|
||||
@@ -651,24 +718,27 @@ var file_v1_operations_proto_goTypes = []interface{}{
|
||||
(*OperationBackup)(nil), // 5: v1.OperationBackup
|
||||
(*OperationIndexSnapshot)(nil), // 6: v1.OperationIndexSnapshot
|
||||
(*OperationForget)(nil), // 7: v1.OperationForget
|
||||
(*BackupProgressEntry)(nil), // 8: v1.BackupProgressEntry
|
||||
(*ResticSnapshot)(nil), // 9: v1.ResticSnapshot
|
||||
(*OperationPrune)(nil), // 8: v1.OperationPrune
|
||||
(*BackupProgressEntry)(nil), // 9: v1.BackupProgressEntry
|
||||
(*ResticSnapshot)(nil), // 10: v1.ResticSnapshot
|
||||
}
|
||||
var file_v1_operations_proto_depIdxs = []int32{
|
||||
3, // 0: v1.OperationList.operations:type_name -> v1.Operation
|
||||
1, // 1: v1.Operation.status:type_name -> v1.OperationStatus
|
||||
5, // 2: v1.Operation.operation_backup:type_name -> v1.OperationBackup
|
||||
6, // 3: v1.Operation.operation_index_snapshot:type_name -> v1.OperationIndexSnapshot
|
||||
0, // 4: v1.OperationEvent.type:type_name -> v1.OperationEventType
|
||||
3, // 5: v1.OperationEvent.operation:type_name -> v1.Operation
|
||||
8, // 6: v1.OperationBackup.last_status:type_name -> v1.BackupProgressEntry
|
||||
9, // 7: v1.OperationIndexSnapshot.snapshot:type_name -> v1.ResticSnapshot
|
||||
9, // 8: v1.OperationForget.forget:type_name -> v1.ResticSnapshot
|
||||
9, // [9:9] is the sub-list for method output_type
|
||||
9, // [9:9] is the sub-list for method input_type
|
||||
9, // [9:9] is the sub-list for extension type_name
|
||||
9, // [9:9] is the sub-list for extension extendee
|
||||
0, // [0:9] is the sub-list for field type_name
|
||||
7, // 4: v1.Operation.operation_forget:type_name -> v1.OperationForget
|
||||
8, // 5: v1.Operation.operation_prune:type_name -> v1.OperationPrune
|
||||
0, // 6: v1.OperationEvent.type:type_name -> v1.OperationEventType
|
||||
3, // 7: v1.OperationEvent.operation:type_name -> v1.Operation
|
||||
9, // 8: v1.OperationBackup.last_status:type_name -> v1.BackupProgressEntry
|
||||
10, // 9: v1.OperationIndexSnapshot.snapshot:type_name -> v1.ResticSnapshot
|
||||
10, // 10: v1.OperationForget.forget:type_name -> v1.ResticSnapshot
|
||||
11, // [11:11] is the sub-list for method output_type
|
||||
11, // [11:11] is the sub-list for method input_type
|
||||
11, // [11:11] is the sub-list for extension type_name
|
||||
11, // [11:11] is the sub-list for extension extendee
|
||||
0, // [0:11] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_v1_operations_proto_init() }
|
||||
@@ -750,10 +820,24 @@ func file_v1_operations_proto_init() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_v1_operations_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*OperationPrune); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
file_v1_operations_proto_msgTypes[1].OneofWrappers = []interface{}{
|
||||
(*Operation_OperationBackup)(nil),
|
||||
(*Operation_OperationIndexSnapshot)(nil),
|
||||
(*Operation_OperationForget)(nil),
|
||||
(*Operation_OperationPrune)(nil),
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
@@ -761,7 +845,7 @@ func file_v1_operations_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_v1_operations_proto_rawDesc,
|
||||
NumEnums: 2,
|
||||
NumMessages: 6,
|
||||
NumMessages: 7,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
#! /bin/sh
|
||||
set -x
|
||||
|
||||
(cd proto && ./update.sh)
|
||||
(cd webui && npm run build)
|
||||
(cd webui && npm i && npm run build)
|
||||
rm -f resticui
|
||||
go build ./cmd/resticui
|
||||
go build .
|
||||
rice append --exec resticui
|
||||
|
||||
@@ -2,5 +2,3 @@
|
||||
set -x
|
||||
|
||||
go install github.com/GeertJohan/go.rice/rice@latest
|
||||
go install github.com/GeertJohan/go.rice@latest
|
||||
python -m pip install lastversion
|
||||
@@ -1,4 +1,4 @@
|
||||
#! /bin/sh
|
||||
set -x
|
||||
|
||||
DEBUG=1 go run ./cmd/resticui
|
||||
DEBUG=1 go run .
|
||||
|
||||
232
internal/orchestrator/backup.go
Normal file
232
internal/orchestrator/backup.go
Normal file
@@ -0,0 +1,232 @@
|
||||
package orchestrator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
v1 "github.com/garethgeorge/resticui/gen/go/v1"
|
||||
"github.com/garethgeorge/resticui/internal/oplog/indexutil"
|
||||
"github.com/garethgeorge/resticui/internal/protoutil"
|
||||
"github.com/garethgeorge/resticui/pkg/restic"
|
||||
"github.com/gitploy-io/cronexpr"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// BackupTask is a scheduled backup operation.
|
||||
type BackupTask struct {
|
||||
name string
|
||||
orchestrator *Orchestrator // owning orchestrator
|
||||
plan *v1.Plan
|
||||
op *v1.Operation
|
||||
scheduler func(curTime time.Time) *time.Time
|
||||
cancel atomic.Pointer[context.CancelFunc] // nil unless operation is running.
|
||||
}
|
||||
|
||||
var _ Task = &BackupTask{}
|
||||
|
||||
func NewScheduledBackupTask(orchestrator *Orchestrator, plan *v1.Plan) (*BackupTask, error) {
|
||||
sched, err := cronexpr.ParseInLocation(plan.Cron, time.Now().Location().String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse schedule %q: %w", plan.Cron, err)
|
||||
}
|
||||
|
||||
return &BackupTask{
|
||||
name: fmt.Sprintf("backup for plan %q", plan.Id),
|
||||
orchestrator: orchestrator,
|
||||
plan: plan,
|
||||
scheduler: func(curTime time.Time) *time.Time {
|
||||
next := sched.Next(curTime)
|
||||
return &next
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewOneofBackupTask(orchestrator *Orchestrator, plan *v1.Plan, at time.Time) *BackupTask {
|
||||
didOnce := false
|
||||
return &BackupTask{
|
||||
name: fmt.Sprintf("onetime backup for plan %q", plan.Id),
|
||||
orchestrator: orchestrator,
|
||||
plan: plan,
|
||||
scheduler: func(curTime time.Time) *time.Time {
|
||||
if didOnce {
|
||||
return nil
|
||||
}
|
||||
didOnce = true
|
||||
return &at
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *BackupTask) Name() string {
|
||||
return t.name
|
||||
}
|
||||
|
||||
func (t *BackupTask) Next(now time.Time) *time.Time {
|
||||
next := t.scheduler(now)
|
||||
if next == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
t.op = &v1.Operation{
|
||||
PlanId: t.plan.Id,
|
||||
RepoId: t.plan.Repo,
|
||||
UnixTimeStartMs: timeToUnixMillis(*next),
|
||||
Status: v1.OperationStatus_STATUS_PENDING,
|
||||
Op: &v1.Operation_OperationBackup{},
|
||||
}
|
||||
|
||||
if err := t.orchestrator.OpLog.Add(t.op); err != nil {
|
||||
zap.S().Errorf("task %v failed to add operation to oplog: %v", t.Name(), err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return next
|
||||
}
|
||||
|
||||
func (t *BackupTask) Run(ctx context.Context) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
t.cancel.Store(&cancel)
|
||||
err := backupHelper(ctx, t.orchestrator, t.plan, t.op)
|
||||
t.op = nil
|
||||
t.cancel.Store(nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *BackupTask) Cancel(status v1.OperationStatus) error {
|
||||
if t.op == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cancel := t.cancel.Load()
|
||||
if cancel != nil && status == v1.OperationStatus_STATUS_USER_CANCELLED {
|
||||
(*cancel)() // try to interrupt the running operation.
|
||||
}
|
||||
|
||||
t.op.Status = status
|
||||
t.op.UnixTimeEndMs = curTimeMillis()
|
||||
return t.orchestrator.OpLog.Update(t.op)
|
||||
}
|
||||
|
||||
// backupHelper does a backup.
|
||||
func backupHelper(ctx context.Context, orchestrator *Orchestrator, plan *v1.Plan, op *v1.Operation) error {
|
||||
backupOp := &v1.Operation_OperationBackup{
|
||||
OperationBackup: &v1.OperationBackup{},
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
op.Op = backupOp
|
||||
op.UnixTimeStartMs = curTimeMillis()
|
||||
|
||||
err := WithOperation(orchestrator.OpLog, op, func() error {
|
||||
zap.L().Info("Starting backup", zap.String("plan", plan.Id), zap.Int64("opId", op.Id))
|
||||
repo, err := orchestrator.GetRepo(plan.Repo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get repo %q: %w", plan.Repo, err)
|
||||
}
|
||||
|
||||
lastSent := time.Now() // debounce progress updates, these can endup being very frequent.
|
||||
summary, err := repo.Backup(ctx, plan, func(entry *restic.BackupProgressEntry) {
|
||||
if time.Since(lastSent) < 250*time.Millisecond {
|
||||
return
|
||||
}
|
||||
lastSent = time.Now()
|
||||
|
||||
backupOp.OperationBackup.LastStatus = protoutil.BackupProgressEntryToProto(entry)
|
||||
if err := orchestrator.OpLog.Update(op); err != nil {
|
||||
zap.S().Errorf("failed to update oplog with progress for backup: %v", err)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("repo.Backup for repo %q: %w", plan.Repo, err)
|
||||
}
|
||||
|
||||
op.SnapshotId = summary.SnapshotId
|
||||
backupOp.OperationBackup.LastStatus = protoutil.BackupProgressEntryToProto(summary)
|
||||
if backupOp.OperationBackup.LastStatus == nil {
|
||||
return fmt.Errorf("expected a final backup progress entry, got nil")
|
||||
}
|
||||
|
||||
zap.L().Info("Backup complete", zap.String("plan", plan.Id), zap.Duration("duration", time.Since(startTime)), zap.Any("summary", summary))
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("backup operation: %w", err)
|
||||
}
|
||||
|
||||
// this could alternatively be scheduled as a separate task, but it probably makes sense to index snapshots immediately after a backup.
|
||||
if err := indexSnapshotsHelper(ctx, orchestrator, plan); err != nil {
|
||||
return fmt.Errorf("reindexing snapshots after backup operation: %w", err)
|
||||
}
|
||||
|
||||
if plan.Retention != nil {
|
||||
orchestrator.ScheduleTask(NewOneofForgetTask(orchestrator, plan, time.Now()))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func indexSnapshotsHelper(ctx context.Context, orchestrator *Orchestrator, plan *v1.Plan) error {
|
||||
repo, err := orchestrator.GetRepo(plan.Repo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get repo %q: %w", plan.Repo, err)
|
||||
}
|
||||
|
||||
snapshots, err := repo.SnapshotsForPlan(ctx, plan)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get snapshots for plan %q: %w", plan.Id, err)
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
alreadyIndexed := 0
|
||||
var indexOps []*v1.Operation
|
||||
for _, snapshot := range snapshots {
|
||||
ops, err := orchestrator.OpLog.GetBySnapshotId(snapshot.Id, indexutil.CollectAll())
|
||||
if err != nil {
|
||||
return fmt.Errorf("HasIndexSnapshot for snapshot %q: %w", snapshot.Id, err)
|
||||
}
|
||||
|
||||
if containsSnapshotOperation(ops) {
|
||||
alreadyIndexed += 1
|
||||
continue
|
||||
}
|
||||
|
||||
snapshotProto := protoutil.SnapshotToProto(snapshot)
|
||||
indexOps = append(indexOps, &v1.Operation{
|
||||
RepoId: plan.Repo,
|
||||
PlanId: plan.Id,
|
||||
UnixTimeStartMs: snapshotProto.UnixTimeMs,
|
||||
UnixTimeEndMs: snapshotProto.UnixTimeMs,
|
||||
Status: v1.OperationStatus_STATUS_SUCCESS,
|
||||
SnapshotId: snapshotProto.Id,
|
||||
Op: &v1.Operation_OperationIndexSnapshot{
|
||||
OperationIndexSnapshot: &v1.OperationIndexSnapshot{
|
||||
Snapshot: snapshotProto,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if err := orchestrator.OpLog.BulkAdd(indexOps); err != nil {
|
||||
return fmt.Errorf("BulkAdd snapshot operations: %w", err)
|
||||
}
|
||||
|
||||
zap.L().Debug("Indexed snapshots",
|
||||
zap.String("plan", plan.Id),
|
||||
zap.Duration("duration", time.Since(startTime)),
|
||||
zap.Int("alreadyIndexed", alreadyIndexed),
|
||||
zap.Int("newlyAdded", len(snapshots)-alreadyIndexed),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func containsSnapshotOperation(ops []*v1.Operation) bool {
|
||||
for _, op := range ops {
|
||||
if _, ok := op.Op.(*v1.Operation_OperationIndexSnapshot); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
106
internal/orchestrator/forget.go
Normal file
106
internal/orchestrator/forget.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package orchestrator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
v1 "github.com/garethgeorge/resticui/gen/go/v1"
|
||||
)
|
||||
|
||||
// ForgetTask tracks a forget operation.
|
||||
type ForgetTask struct {
|
||||
name string
|
||||
orchestrator *Orchestrator // owning orchestrator
|
||||
plan *v1.Plan
|
||||
op *v1.Operation
|
||||
at *time.Time
|
||||
cancel atomic.Pointer[context.CancelFunc] // nil unless operation is running.
|
||||
}
|
||||
|
||||
var _ Task = &ForgetTask{}
|
||||
|
||||
func NewOneofForgetTask(orchestrator *Orchestrator, plan *v1.Plan, at time.Time) *ForgetTask {
|
||||
return &ForgetTask{
|
||||
orchestrator: orchestrator,
|
||||
plan: plan,
|
||||
at: &at,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *ForgetTask) Name() string {
|
||||
return fmt.Sprintf("forget for plan %q", t.plan.Id)
|
||||
}
|
||||
|
||||
func (t *ForgetTask) Next(now time.Time) *time.Time {
|
||||
ret := t.at
|
||||
if ret != nil {
|
||||
t.at = nil
|
||||
t.op = &v1.Operation{
|
||||
PlanId: t.plan.Id,
|
||||
RepoId: t.plan.Repo,
|
||||
UnixTimeStartMs: timeToUnixMillis(*ret),
|
||||
Status: v1.OperationStatus_STATUS_PENDING,
|
||||
Op: &v1.Operation_OperationForget{},
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (t *ForgetTask) Run(ctx context.Context) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
t.cancel.Store(&cancel)
|
||||
defer t.cancel.Store(nil)
|
||||
|
||||
if t.plan.Retention == nil {
|
||||
return errors.New("plan does not have a retention policy")
|
||||
}
|
||||
|
||||
forgetOp := &v1.Operation_OperationForget{
|
||||
OperationForget: &v1.OperationForget{},
|
||||
}
|
||||
|
||||
t.op.Op = forgetOp
|
||||
t.op.UnixTimeStartMs = curTimeMillis()
|
||||
|
||||
if err := WithOperation(t.orchestrator.OpLog, t.op, func() error {
|
||||
repo, err := t.orchestrator.GetRepo(t.plan.Repo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get repo %q: %w", t.plan.Repo, err)
|
||||
}
|
||||
|
||||
forgot, err := repo.Forget(ctx, t.plan)
|
||||
if err != nil {
|
||||
return fmt.Errorf("forget: %w", err)
|
||||
}
|
||||
|
||||
forgetOp.OperationForget.Forget = append(forgetOp.OperationForget.Forget, forgot...)
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.plan.Retention.Prune {
|
||||
// TODO: schedule a prune task.
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *ForgetTask) Cancel(status v1.OperationStatus) error {
|
||||
if t.op == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cancel := t.cancel.Load()
|
||||
if cancel != nil && status == v1.OperationStatus_STATUS_USER_CANCELLED {
|
||||
(*cancel)() // try to interrupt the running operation.
|
||||
}
|
||||
|
||||
t.op.Status = status
|
||||
t.op.UnixTimeEndMs = curTimeMillis()
|
||||
return t.orchestrator.OpLog.Update(t.op)
|
||||
}
|
||||
@@ -57,7 +57,7 @@ func (o *Orchestrator) ApplyConfig(cfg *v1.Config) error {
|
||||
defer o.mu.Unlock()
|
||||
o.config = cfg
|
||||
|
||||
zap.L().Info("Applying config to orchestrator", zap.Any("config", cfg))
|
||||
zap.L().Info("Applying config to orchestrator")
|
||||
|
||||
// Update the config provided to the repo pool.
|
||||
if err := o.repoPool.configProvider.Update(cfg); err != nil {
|
||||
@@ -133,7 +133,7 @@ func (o *Orchestrator) Run(mainCtx context.Context) {
|
||||
if err := t.task.Run(mainCtx); err != nil {
|
||||
zap.L().Error("task failed", zap.String("task", t.task.Name()), zap.Error(err))
|
||||
} else {
|
||||
zap.L().Debug("task finished", zap.String("task", t.task.Name()))
|
||||
zap.L().Info("task finished", zap.String("task", t.task.Name()))
|
||||
}
|
||||
|
||||
curTime := time.Now()
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/garethgeorge/resticui/gen/go/v1"
|
||||
"github.com/garethgeorge/resticui/internal/config"
|
||||
)
|
||||
|
||||
@@ -26,6 +27,10 @@ func (t *testTask) Run(ctx context.Context) error {
|
||||
return t.onRun()
|
||||
}
|
||||
|
||||
func (t *testTask) Cancel(withStatus v1.OperationStatus) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestTaskScheduling(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -33,7 +38,10 @@ func TestTaskScheduling(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
orch := NewOrchestrator("", config.NewDefaultConfig(), nil)
|
||||
orch, err := NewOrchestrator("", config.NewDefaultConfig(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create orchestrator: %v", err)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
@@ -69,7 +77,10 @@ func TestTaskRescheduling(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
orch := NewOrchestrator("", config.NewDefaultConfig(), nil)
|
||||
orch, err := NewOrchestrator("", config.NewDefaultConfig(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create orchestrator: %v", err)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
@@ -115,7 +126,10 @@ func TestGracefulShutdown(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Arrange
|
||||
orch := NewOrchestrator("", config.NewDefaultConfig(), nil)
|
||||
orch, err := NewOrchestrator("", config.NewDefaultConfig(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create orchestrator: %v", err)
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
go func() {
|
||||
@@ -132,7 +146,10 @@ func TestSchedulerWait(t *testing.T) {
|
||||
|
||||
// Arrange
|
||||
curTime := time.Now()
|
||||
orch := NewOrchestrator("", config.NewDefaultConfig(), nil)
|
||||
orch, err := NewOrchestrator("", config.NewDefaultConfig(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create orchestrator: %v", err)
|
||||
}
|
||||
orch.now = func() time.Time {
|
||||
return curTime
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/garethgeorge/resticui/gen/go/v1"
|
||||
"github.com/garethgeorge/resticui/internal/protoutil"
|
||||
"github.com/garethgeorge/resticui/pkg/restic"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
@@ -101,6 +102,32 @@ func (r *RepoOrchestrator) ListSnapshotFiles(ctx context.Context, snapshotId str
|
||||
return lsEnts, nil
|
||||
}
|
||||
|
||||
func (r *RepoOrchestrator) Forget(ctx context.Context, plan *v1.Plan) ([]*v1.ResticSnapshot, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
policy := plan.Retention
|
||||
if policy == nil {
|
||||
return nil, fmt.Errorf("plan %q has no retention policy", plan.Id)
|
||||
}
|
||||
|
||||
l := zap.L().With(zap.String("repo", r.repoConfig.Id), zap.String("plan", plan.Id))
|
||||
|
||||
l.Debug("Forget snapshots", zap.Any("policy", policy))
|
||||
result, err := r.repo.Forget(ctx, protoutil.RetentionPolicyFromProto(plan.Retention))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get snapshots for repo %v: %w", r.repoConfig.Id, err)
|
||||
}
|
||||
l.Debug("Forget result", zap.Any("result", result))
|
||||
|
||||
var forgotten []*v1.ResticSnapshot
|
||||
for _, snapshot := range result.Remove {
|
||||
forgotten = append(forgotten, protoutil.SnapshotToProto(&snapshot))
|
||||
}
|
||||
|
||||
return forgotten, nil
|
||||
}
|
||||
|
||||
func tagForPlan(plan *v1.Plan) string {
|
||||
return fmt.Sprintf("plan:%s", plan.Id)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/garethgeorge/resticui/gen/go/v1"
|
||||
)
|
||||
|
||||
type heapTestTask struct {
|
||||
@@ -25,6 +27,10 @@ func (t *heapTestTask) Run(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *heapTestTask) Cancel(withStatus v1.OperationStatus) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestTaskQueueOrdering(t *testing.T) {
|
||||
h := taskQueue{}
|
||||
|
||||
|
||||
@@ -7,12 +7,7 @@ import (
|
||||
|
||||
v1 "github.com/garethgeorge/resticui/gen/go/v1"
|
||||
"github.com/garethgeorge/resticui/internal/oplog"
|
||||
"github.com/garethgeorge/resticui/internal/oplog/indexutil"
|
||||
"github.com/garethgeorge/resticui/internal/protoutil"
|
||||
"github.com/garethgeorge/resticui/pkg/restic"
|
||||
"github.com/gitploy-io/cronexpr"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type Task interface {
|
||||
@@ -22,209 +17,6 @@ type Task interface {
|
||||
Cancel(withStatus v1.OperationStatus) error // cancel the task's execution with the given status (either STATUS_USER_CANCELLED or STATUS_SYSTEM_CANCELLED).
|
||||
}
|
||||
|
||||
// BackupTask is a scheduled backup operation.
|
||||
type BackupTask struct {
|
||||
name string
|
||||
orchestrator *Orchestrator // owning orchestrator
|
||||
plan *v1.Plan
|
||||
op *v1.Operation
|
||||
scheduler func(curTime time.Time) *time.Time
|
||||
cancel context.CancelFunc // nil unless operation is running.
|
||||
}
|
||||
|
||||
var _ Task = &BackupTask{}
|
||||
|
||||
func NewScheduledBackupTask(orchestrator *Orchestrator, plan *v1.Plan) (*BackupTask, error) {
|
||||
sched, err := cronexpr.ParseInLocation(plan.Cron, time.Now().Location().String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse schedule %q: %w", plan.Cron, err)
|
||||
}
|
||||
|
||||
return &BackupTask{
|
||||
name: fmt.Sprintf("backup for plan %q", plan.Id),
|
||||
orchestrator: orchestrator,
|
||||
plan: plan,
|
||||
scheduler: func(curTime time.Time) *time.Time {
|
||||
next := sched.Next(curTime)
|
||||
return &next
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewOneofBackupTask(orchestrator *Orchestrator, plan *v1.Plan, at time.Time) *BackupTask {
|
||||
didOnce := false
|
||||
return &BackupTask{
|
||||
name: fmt.Sprintf("onetime backup for plan %q", plan.Id),
|
||||
orchestrator: orchestrator,
|
||||
plan: plan,
|
||||
scheduler: func(curTime time.Time) *time.Time {
|
||||
if didOnce {
|
||||
return nil
|
||||
}
|
||||
didOnce = true
|
||||
return &at
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *BackupTask) Name() string {
|
||||
return t.name
|
||||
}
|
||||
|
||||
func (t *BackupTask) Next(now time.Time) *time.Time {
|
||||
next := t.scheduler(now)
|
||||
if next == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
t.op = &v1.Operation{
|
||||
PlanId: t.plan.Id,
|
||||
RepoId: t.plan.Repo,
|
||||
UnixTimeStartMs: timeToUnixMillis(*next),
|
||||
Status: v1.OperationStatus_STATUS_PENDING,
|
||||
Op: &v1.Operation_OperationBackup{},
|
||||
}
|
||||
|
||||
if err := t.orchestrator.OpLog.Add(t.op); err != nil {
|
||||
zap.S().Errorf("task %v failed to add operation to oplog: %v", t.Name(), err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return next
|
||||
}
|
||||
|
||||
func (t *BackupTask) Run(ctx context.Context) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
t.cancel = cancel
|
||||
err := backupHelper(ctx, t.orchestrator, t.plan, t.op)
|
||||
t.op = nil
|
||||
t.cancel = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *BackupTask) Cancel(status v1.OperationStatus) error {
|
||||
if t.op == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if t.cancel != nil && status == v1.OperationStatus_STATUS_USER_CANCELLED {
|
||||
t.cancel() // try to interrupt the running operation.
|
||||
}
|
||||
|
||||
t.op.Status = status
|
||||
t.op.UnixTimeEndMs = curTimeMillis()
|
||||
return t.orchestrator.OpLog.Update(t.op)
|
||||
}
|
||||
|
||||
// backupHelper does a backup.
|
||||
func backupHelper(ctx context.Context, orchestrator *Orchestrator, plan *v1.Plan, op *v1.Operation) error {
|
||||
backupOp := &v1.Operation_OperationBackup{
|
||||
OperationBackup: &v1.OperationBackup{},
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
op.Op = backupOp
|
||||
op.UnixTimeStartMs = curTimeMillis()
|
||||
|
||||
err := WithOperation(orchestrator.OpLog, op, func() error {
|
||||
zap.L().Info("Starting backup", zap.String("plan", plan.Id), zap.Int64("opId", op.Id))
|
||||
repo, err := orchestrator.GetRepo(plan.Repo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get repo %q: %w", plan.Repo, err)
|
||||
}
|
||||
|
||||
lastSent := time.Now() // debounce progress updates, these can endup being very frequent.
|
||||
summary, err := repo.Backup(ctx, plan, func(entry *restic.BackupProgressEntry) {
|
||||
if time.Since(lastSent) < 250*time.Millisecond {
|
||||
return
|
||||
}
|
||||
lastSent = time.Now()
|
||||
|
||||
backupOp.OperationBackup.LastStatus = protoutil.BackupProgressEntryToProto(entry)
|
||||
if err := orchestrator.OpLog.Update(op); err != nil {
|
||||
zap.S().Errorf("failed to update oplog with progress for backup: %v", err)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("repo.Backup for repo %q: %w", plan.Repo, err)
|
||||
}
|
||||
|
||||
op.SnapshotId = summary.SnapshotId
|
||||
backupOp.OperationBackup.LastStatus = protoutil.BackupProgressEntryToProto(summary)
|
||||
if backupOp.OperationBackup.LastStatus == nil {
|
||||
return fmt.Errorf("expected a final backup progress entry, got nil")
|
||||
}
|
||||
|
||||
zap.L().Info("Backup complete", zap.String("plan", plan.Id), zap.Duration("duration", time.Since(startTime)), zap.Any("summary", summary))
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("backup operation: %w", err)
|
||||
}
|
||||
|
||||
// this could alternatively be scheduled as a separate task, but it probably makes sense to index snapshots immediately after a backup.
|
||||
if err := indexSnapshotsHelper(ctx, orchestrator, plan); err != nil {
|
||||
return fmt.Errorf("reindexing snapshots after backup operation: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func indexSnapshotsHelper(ctx context.Context, orchestrator *Orchestrator, plan *v1.Plan) error {
|
||||
repo, err := orchestrator.GetRepo(plan.Repo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get repo %q: %w", plan.Repo, err)
|
||||
}
|
||||
|
||||
snapshots, err := repo.SnapshotsForPlan(ctx, plan)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get snapshots for plan %q: %w", plan.Id, err)
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
alreadyIndexed := 0
|
||||
var indexOps []*v1.Operation
|
||||
for _, snapshot := range snapshots {
|
||||
ops, err := orchestrator.OpLog.GetBySnapshotId(snapshot.Id, indexutil.CollectAll())
|
||||
if err != nil {
|
||||
return fmt.Errorf("HasIndexSnapshot for snapshot %q: %w", snapshot.Id, err)
|
||||
}
|
||||
|
||||
if containsSnapshotOperation(ops) {
|
||||
alreadyIndexed += 1
|
||||
continue
|
||||
}
|
||||
|
||||
snapshotProto := protoutil.SnapshotToProto(snapshot)
|
||||
indexOps = append(indexOps, &v1.Operation{
|
||||
RepoId: plan.Repo,
|
||||
PlanId: plan.Id,
|
||||
UnixTimeStartMs: snapshotProto.UnixTimeMs,
|
||||
UnixTimeEndMs: snapshotProto.UnixTimeMs,
|
||||
Status: v1.OperationStatus_STATUS_SUCCESS,
|
||||
SnapshotId: snapshotProto.Id,
|
||||
Op: &v1.Operation_OperationIndexSnapshot{
|
||||
OperationIndexSnapshot: &v1.OperationIndexSnapshot{
|
||||
Snapshot: snapshotProto,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if err := orchestrator.OpLog.BulkAdd(indexOps); err != nil {
|
||||
return fmt.Errorf("BulkAdd snapshot operations: %w", err)
|
||||
}
|
||||
|
||||
zap.L().Debug("Indexed snapshots",
|
||||
zap.String("plan", plan.Id),
|
||||
zap.Duration("duration", time.Since(startTime)),
|
||||
zap.Int("alreadyIndexed", alreadyIndexed),
|
||||
zap.Int("newlyAdded", len(snapshots)-alreadyIndexed),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// WithOperation is a utility that creates an operation to track the function's execution.
|
||||
// timestamps are automatically added and the status is automatically updated if an error occurs.
|
||||
func WithOperation(oplog *oplog.OpLog, op *v1.Operation, do func() error) error {
|
||||
@@ -263,12 +55,3 @@ func timeToUnixMillis(t time.Time) int64 {
|
||||
func curTimeMillis() int64 {
|
||||
return timeToUnixMillis(time.Now())
|
||||
}
|
||||
|
||||
func containsSnapshotOperation(ops []*v1.Operation) bool {
|
||||
for _, op := range ops {
|
||||
if _, ok := op.Op.(*v1.Operation_OperationIndexSnapshot); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -71,3 +71,27 @@ func BackupProgressEntryToProto(b *restic.BackupProgressEntry) *v1.BackupProgres
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func RetentionPolicyFromProto(p *v1.RetentionPolicy) *restic.RetentionPolicy {
|
||||
return &restic.RetentionPolicy{
|
||||
KeepLastN: int(p.KeepLastN),
|
||||
KeepHourly: int(p.KeepHourly),
|
||||
KeepDaily: int(p.KeepDaily),
|
||||
KeepWeekly: int(p.KeepWeekly),
|
||||
KeepMonthly: int(p.KeepMonthly),
|
||||
KeepYearly: int(p.KeepYearly),
|
||||
KeepWithinDuration: p.KeepWithinDuration,
|
||||
}
|
||||
}
|
||||
|
||||
func RetentionPolicyToProto(p *restic.RetentionPolicy) *v1.RetentionPolicy {
|
||||
return &v1.RetentionPolicy{
|
||||
KeepLastN: int32(p.KeepLastN),
|
||||
KeepHourly: int32(p.KeepHourly),
|
||||
KeepDaily: int32(p.KeepDaily),
|
||||
KeepWeekly: int32(p.KeepWeekly),
|
||||
KeepMonthly: int32(p.KeepMonthly),
|
||||
KeepYearly: int32(p.KeepYearly),
|
||||
KeepWithinDuration: p.KeepWithinDuration,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -176,7 +176,7 @@ func (r *Repo) Snapshots(ctx context.Context, opts ...GenericOption) ([]*Snapsho
|
||||
return snapshots, nil
|
||||
}
|
||||
|
||||
func (r *Repo) Forget(ctx context.Context, policy RetentionPolicy, pruneOutput io.Writer, opts ...GenericOption) (*ForgetResult, error) {
|
||||
func (r *Repo) Forget(ctx context.Context, policy *RetentionPolicy, opts ...GenericOption) (*ForgetResult, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
@@ -218,8 +218,25 @@ func (r *Repo) Forget(ctx context.Context, policy RetentionPolicy, pruneOutput i
|
||||
cmd.Env = append(cmd.Env, r.buildEnv()...)
|
||||
cmd.Env = append(cmd.Env, opt.extraEnv...)
|
||||
|
||||
return &result[0], nil
|
||||
}
|
||||
|
||||
func (r *Repo) Prune(ctx context.Context, pruneOutput io.Writer, opts ...GenericOption) error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
opt := resolveOpts(opts)
|
||||
|
||||
args := []string{"prune"}
|
||||
args = append(args, r.extraArgs...)
|
||||
args = append(args, opt.extraArgs...)
|
||||
|
||||
cmd := exec.CommandContext(ctx, r.cmd, args...)
|
||||
cmd.Env = append(cmd.Env, r.buildEnv()...)
|
||||
cmd.Env = append(cmd.Env, opt.extraEnv...)
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
var writer io.Writer = buf
|
||||
var writer io.Writer = newLimitWriter(buf, 1000)
|
||||
if pruneOutput != nil {
|
||||
writer = io.MultiWriter(pruneOutput, buf)
|
||||
}
|
||||
@@ -227,10 +244,10 @@ func (r *Repo) Forget(ctx context.Context, policy RetentionPolicy, pruneOutput i
|
||||
cmd.Stderr = writer
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, NewCmdError(cmd, buf.Bytes(), err)
|
||||
return NewCmdError(cmd, buf.Bytes(), err)
|
||||
}
|
||||
|
||||
return &result[0], nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repo) ListDirectory(ctx context.Context, snapshot string, path string, opts ...GenericOption) (*Snapshot, []*LsEntry, error) {
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
v1 "github.com/garethgeorge/resticui/gen/go/v1"
|
||||
@@ -248,8 +247,7 @@ func TestResticForget(t *testing.T) {
|
||||
}
|
||||
|
||||
// prune all snapshots
|
||||
output := bytes.NewBuffer(nil)
|
||||
res, err := r.Forget(context.Background(), RetentionPolicy{KeepLastN: 3}, output)
|
||||
res, err := r.Forget(context.Background(), &RetentionPolicy{KeepLastN: 3})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to prune snapshots: %v", err)
|
||||
}
|
||||
@@ -280,8 +278,45 @@ func TestResticForget(t *testing.T) {
|
||||
if !reflect.DeepEqual(keptIds, ids[7:]) {
|
||||
t.Errorf("wanted kept ids to be %v, got: %v", ids[7:], keptIds)
|
||||
}
|
||||
}
|
||||
|
||||
if !strings.Contains(output.String(), "total prune") {
|
||||
t.Errorf("wanted prune output, got: %s", output.String())
|
||||
func TestResticPrune(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
repo := t.TempDir()
|
||||
r := NewRepo(helpers.ResticBinary(t), &v1.Repo{
|
||||
Id: "test",
|
||||
Uri: repo,
|
||||
Password: "test",
|
||||
}, WithFlags("--no-cache"))
|
||||
if err := r.Init(context.Background()); err != nil {
|
||||
t.Fatalf("failed to init repo: %v", err)
|
||||
}
|
||||
|
||||
testData := helpers.CreateTestData(t)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
_, err := r.Backup(context.Background(), nil, WithBackupPaths(testData))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to backup: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// forget recent snapshots
|
||||
_, err := r.Forget(context.Background(), &RetentionPolicy{KeepLastN: 1})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to forget snapshots: %v", err)
|
||||
}
|
||||
|
||||
// prune all snapshots
|
||||
output := bytes.NewBuffer(nil)
|
||||
if err := r.Prune(context.Background(), output); err != nil {
|
||||
t.Fatalf("failed to prune snapshots: %v", err)
|
||||
}
|
||||
|
||||
wantStr := "collecting packs for deletion and repacking"
|
||||
|
||||
if !bytes.Contains(output.Bytes(), []byte(wantStr)) {
|
||||
t.Errorf("wanted output to contain 'keep 1 snapshots', got: %s", output.String())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,4 +44,6 @@ message RetentionPolicy {
|
||||
int32 keep_monthly = 6; // keep the last n monthly snapshots.
|
||||
int32 keep_yearly = 7; // keep the last n yearly snapshots.
|
||||
string keep_within_duration = 8; // keep snapshots within a duration e.g. 1y2m3d4h5m6s
|
||||
|
||||
bool prune = 9; // prune snapshots after forget.
|
||||
}
|
||||
|
||||
@@ -30,6 +30,8 @@ message Operation {
|
||||
oneof op {
|
||||
OperationBackup operation_backup = 100;
|
||||
OperationIndexSnapshot operation_index_snapshot = 101;
|
||||
OperationForget operation_forget = 102;
|
||||
OperationPrune operation_prune = 103;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,9 +67,12 @@ message OperationIndexSnapshot {
|
||||
ResticSnapshot snapshot = 2;
|
||||
}
|
||||
|
||||
// OperationForget tracks a forget operation and may additionally track prune output if a prune was run.
|
||||
// OperationForget tracks a forget operation.
|
||||
message OperationForget {
|
||||
repeated ResticSnapshot forget = 1;
|
||||
bool pruned = 2;
|
||||
string prune_output = 3;
|
||||
}
|
||||
|
||||
// OperationPrune tracks a prune operation.
|
||||
message OperationPrune {
|
||||
string output = 1;
|
||||
}
|
||||
|
||||
@@ -36,4 +36,5 @@ export type RetentionPolicy = {
|
||||
keepMonthly?: number
|
||||
keepYearly?: number
|
||||
keepWithinDuration?: string
|
||||
prune?: boolean
|
||||
}
|
||||
@@ -48,7 +48,7 @@ type BaseOperation = {
|
||||
}
|
||||
|
||||
export type Operation = BaseOperation
|
||||
& OneOf<{ operationBackup: OperationBackup; operationIndexSnapshot: OperationIndexSnapshot }>
|
||||
& OneOf<{ operationBackup: OperationBackup; operationIndexSnapshot: OperationIndexSnapshot; operationForget: OperationForget; operationPrune: OperationPrune }>
|
||||
|
||||
export type OperationEvent = {
|
||||
type?: OperationEventType
|
||||
@@ -65,6 +65,8 @@ export type OperationIndexSnapshot = {
|
||||
|
||||
export type OperationForget = {
|
||||
forget?: V1Restic.ResticSnapshot[]
|
||||
pruned?: boolean
|
||||
pruneOutput?: string
|
||||
}
|
||||
|
||||
export type OperationPrune = {
|
||||
output?: string
|
||||
}
|
||||
@@ -68,7 +68,6 @@ export const App: React.FC = () => {
|
||||
|
||||
return (
|
||||
<Layout style={{ height: "auto" }}>
|
||||
<OperationNotificationGenerator />
|
||||
<Header style={{ display: "flex", alignItems: "center" }}>
|
||||
<h1>
|
||||
<a
|
||||
@@ -167,49 +166,3 @@ const getSidenavItems = (config: Config | null): MenuProps["items"] => {
|
||||
},
|
||||
];
|
||||
};
|
||||
|
||||
const OperationNotificationGenerator = () => {
|
||||
const alertApi = useAlertApi()!;
|
||||
const setContent = useSetContent();
|
||||
const config = useRecoilValue(configState);
|
||||
|
||||
useEffect(() => {
|
||||
// TODO: factor notification generator into a separate file.
|
||||
const listener = (event: OperationEvent) => {
|
||||
if (event.type != OperationEventType.EVENT_CREATED) return;
|
||||
const planId = event.operation!.planId!;
|
||||
const repoId = event.operation!.repoId!;
|
||||
|
||||
const onClick = () => {
|
||||
const plan = config.plans!.find((p) => p.id == planId);
|
||||
if (!plan) return;
|
||||
setContent(<PlanView plan={plan} />, [
|
||||
{ title: "Plans" },
|
||||
{ title: planId || "" },
|
||||
]);
|
||||
};
|
||||
|
||||
if (event.operation?.operationBackup) {
|
||||
alertApi.info({
|
||||
content: `Backup started for plan ${planId}.`,
|
||||
onClick: onClick,
|
||||
});
|
||||
} else if (event.operation?.operationIndexSnapshot) {
|
||||
const indexOp = event.operation.operationIndexSnapshot;
|
||||
alertApi.info({
|
||||
content: `Indexed snapshot ${normalizeSnapshotId(
|
||||
indexOp.snapshot!.id!
|
||||
)} for plan ${planId}.`,
|
||||
onClick: onClick,
|
||||
});
|
||||
}
|
||||
};
|
||||
subscribeToOperations(listener);
|
||||
|
||||
return () => {
|
||||
unsubscribeFromOperations(listener);
|
||||
};
|
||||
}, [config]);
|
||||
|
||||
return <></>;
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user