mirror of
https://github.com/garethgeorge/backrest.git
synced 2025-12-16 10:35:32 +00:00
fix: use new orchestrator queue
This commit is contained in:
43
.github/workflows/build.yml
vendored
Normal file
43
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
# This workflow will build a golang project
|
||||||
|
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
|
||||||
|
|
||||||
|
name: Build Snapshot Relaese
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ["main"]
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
|
|
||||||
|
- name: Setup NodeJS
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: "20"
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
uses: goreleaser/goreleaser-action@v5
|
||||||
|
with:
|
||||||
|
distribution: goreleaser
|
||||||
|
version: latest
|
||||||
|
args: release --snapshot --clean
|
||||||
|
|
||||||
|
- name: Upload Artifacts
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: backrest-snapshot-builds
|
||||||
|
path: |
|
||||||
|
dist/*.tar.gz
|
||||||
|
dist/*.zip
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
# This workflow will build a golang project
|
# This workflow will build a golang project
|
||||||
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
|
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
|
||||||
|
|
||||||
name: Build and Test
|
name: Test
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
@@ -11,14 +11,11 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-nix:
|
test-nix:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
|
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
@@ -29,17 +26,16 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
node-version: "20"
|
node-version: "20"
|
||||||
|
|
||||||
|
- name: Generate
|
||||||
|
run: go generate ./...
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
uses: goreleaser/goreleaser-action@v5
|
run: go build ./...
|
||||||
with:
|
|
||||||
distribution: goreleaser
|
|
||||||
version: latest
|
|
||||||
args: release --snapshot --clean
|
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: PATH=$(pwd):$PATH go test ./... --race
|
run: PATH=$(pwd):$PATH go test ./... --race
|
||||||
|
|
||||||
build-win:
|
test-win:
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/garethgeorge/backrest/internal/config"
|
"github.com/garethgeorge/backrest/internal/config"
|
||||||
"github.com/garethgeorge/backrest/internal/hook"
|
"github.com/garethgeorge/backrest/internal/hook"
|
||||||
"github.com/garethgeorge/backrest/internal/oplog"
|
"github.com/garethgeorge/backrest/internal/oplog"
|
||||||
|
"github.com/garethgeorge/backrest/internal/queue"
|
||||||
"github.com/garethgeorge/backrest/internal/rotatinglog"
|
"github.com/garethgeorge/backrest/internal/rotatinglog"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
@@ -40,7 +41,7 @@ type Orchestrator struct {
|
|||||||
config *v1.Config
|
config *v1.Config
|
||||||
OpLog *oplog.OpLog
|
OpLog *oplog.OpLog
|
||||||
repoPool *resticRepoPool
|
repoPool *resticRepoPool
|
||||||
taskQueue taskQueue
|
taskQueue *queue.TimePriorityQueue[scheduledTask]
|
||||||
hookExecutor *hook.HookExecutor
|
hookExecutor *hook.HookExecutor
|
||||||
logStore *rotatinglog.RotatingLog
|
logStore *rotatinglog.RotatingLog
|
||||||
|
|
||||||
@@ -60,9 +61,7 @@ func NewOrchestrator(resticBin string, cfg *v1.Config, oplog *oplog.OpLog, logSt
|
|||||||
config: cfg,
|
config: cfg,
|
||||||
// repoPool created with a memory store to ensure the config is updated in an atomic operation with the repo pool's config value.
|
// repoPool created with a memory store to ensure the config is updated in an atomic operation with the repo pool's config value.
|
||||||
repoPool: newResticRepoPool(resticBin, &config.MemoryStore{Config: cfg}),
|
repoPool: newResticRepoPool(resticBin, &config.MemoryStore{Config: cfg}),
|
||||||
taskQueue: newTaskQueue(func() time.Time {
|
taskQueue: queue.NewTimePriorityQueue[scheduledTask](),
|
||||||
return o.curTime()
|
|
||||||
}),
|
|
||||||
hookExecutor: hook.NewHookExecutor(oplog, logStore),
|
hookExecutor: hook.NewHookExecutor(oplog, logStore),
|
||||||
logStore: logStore,
|
logStore: logStore,
|
||||||
}
|
}
|
||||||
@@ -194,29 +193,23 @@ func (o *Orchestrator) CancelOperation(operationId int64, status v1.OperationSta
|
|||||||
running.cancel()
|
running.cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
tasks := o.taskQueue.Reset()
|
allTasks := o.taskQueue.GetAll()
|
||||||
remaining := make([]scheduledTask, 0, len(tasks))
|
idx := slices.IndexFunc(allTasks, func(t scheduledTask) bool {
|
||||||
|
return t.task.OperationId() == operationId
|
||||||
|
})
|
||||||
|
if idx == -1 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
for _, t := range tasks {
|
t := allTasks[idx]
|
||||||
if t.task.OperationId() == operationId {
|
o.taskQueue.Remove(t)
|
||||||
if err := t.task.Cancel(status); err != nil {
|
if err := t.task.Cancel(status); err != nil {
|
||||||
return fmt.Errorf("cancel task %q: %w", t.task.Name(), err)
|
return fmt.Errorf("cancel task %q: %w", t.task.Name(), err)
|
||||||
}
|
}
|
||||||
|
if nextTime := t.task.Next(t.runAt.Add(1 * time.Second)); nextTime != nil {
|
||||||
// check if the task has a next after it's current 'runAt' time, if it does then we will schedule the next run.
|
t.runAt = *nextTime
|
||||||
if nextTime := t.task.Next(t.runAt); nextTime != nil {
|
o.taskQueue.Enqueue(*nextTime, t.priority, t)
|
||||||
remaining = append(remaining, scheduledTask{
|
|
||||||
task: t.task,
|
|
||||||
runAt: *nextTime,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
remaining = append(remaining, *t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
o.taskQueue.Push(remaining...)
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -231,7 +224,7 @@ func (o *Orchestrator) Run(mainCtx context.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t := o.taskQueue.Dequeue(mainCtx)
|
t := o.taskQueue.Dequeue(mainCtx)
|
||||||
if t == nil {
|
if t.task == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -272,7 +265,7 @@ func (o *Orchestrator) ScheduleTask(t Task, priority int, callbacks ...func(erro
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
zap.L().Info("scheduling task", zap.String("task", t.Name()), zap.String("runAt", nextRun.Format(time.RFC3339)))
|
zap.L().Info("scheduling task", zap.String("task", t.Name()), zap.String("runAt", nextRun.Format(time.RFC3339)))
|
||||||
o.taskQueue.Push(scheduledTask{
|
o.taskQueue.Enqueue(*nextRun, priority, scheduledTask{
|
||||||
task: t,
|
task: t,
|
||||||
runAt: *nextRun,
|
runAt: *nextRun,
|
||||||
priority: priority,
|
priority: priority,
|
||||||
|
|||||||
@@ -164,6 +164,17 @@ type scheduledTask struct {
|
|||||||
config *v1.Config
|
config *v1.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *scheduledTask) Less(other *scheduledTask) bool {
|
||||||
|
if s.priority != other.priority {
|
||||||
|
return s.priority > other.priority
|
||||||
|
}
|
||||||
|
return s.runAt.Before(other.runAt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s scheduledTask) Eq(other scheduledTask) bool {
|
||||||
|
return s.task == other.task && s.runAt.Equal(other.runAt) && s.priority == other.priority && s.config == other.config
|
||||||
|
}
|
||||||
|
|
||||||
type scheduledTaskHeap struct {
|
type scheduledTaskHeap struct {
|
||||||
tasks []*scheduledTask
|
tasks []*scheduledTask
|
||||||
comparator func(i, j *scheduledTask) bool
|
comparator func(i, j *scheduledTask) bool
|
||||||
|
|||||||
@@ -13,7 +13,12 @@ func (v val) Less(other val) bool {
|
|||||||
return v.v < other.v
|
return v.v < other.v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (v val) Eq(other val) bool {
|
||||||
|
return v.v == other.v
|
||||||
|
}
|
||||||
|
|
||||||
func TestGenericHeapInit(t *testing.T) {
|
func TestGenericHeapInit(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
genHeap := genericHeap[val]{{v: 3}, {v: 2}, {v: 1}}
|
genHeap := genericHeap[val]{{v: 3}, {v: 2}, {v: 1}}
|
||||||
heap.Init(&genHeap)
|
heap.Init(&genHeap)
|
||||||
|
|
||||||
@@ -30,6 +35,7 @@ func TestGenericHeapInit(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGenericHeapPushPop(t *testing.T) {
|
func TestGenericHeapPushPop(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
genHeap := genericHeap[val]{} // empty heap
|
genHeap := genericHeap[val]{} // empty heap
|
||||||
heap.Push(&genHeap, val{v: 3})
|
heap.Push(&genHeap, val{v: 3})
|
||||||
heap.Push(&genHeap, val{v: 2})
|
heap.Push(&genHeap, val{v: 2})
|
||||||
|
|||||||
@@ -8,13 +8,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// TimePriorityQueue is a priority queue that dequeues elements at (or after) a specified time, and prioritizes elements based on a priority value. It is safe for concurrent use.
|
// TimePriorityQueue is a priority queue that dequeues elements at (or after) a specified time, and prioritizes elements based on a priority value. It is safe for concurrent use.
|
||||||
type TimePriorityQueue[T any] struct {
|
type TimePriorityQueue[T equals[T]] struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
tqueue TimeQueue[priorityEntry[T]]
|
tqueue TimeQueue[priorityEntry[T]]
|
||||||
ready genericHeap[priorityEntry[T]]
|
ready genericHeap[priorityEntry[T]]
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTimePriorityQueue[T any]() *TimePriorityQueue[T] {
|
func NewTimePriorityQueue[T equals[T]]() *TimePriorityQueue[T] {
|
||||||
return &TimePriorityQueue[T]{
|
return &TimePriorityQueue[T]{
|
||||||
tqueue: TimeQueue[priorityEntry[T]]{},
|
tqueue: TimeQueue[priorityEntry[T]]{},
|
||||||
ready: genericHeap[priorityEntry[T]]{},
|
ready: genericHeap[priorityEntry[T]]{},
|
||||||
@@ -23,33 +23,80 @@ func NewTimePriorityQueue[T any]() *TimePriorityQueue[T] {
|
|||||||
|
|
||||||
func (t *TimePriorityQueue[T]) Len() int {
|
func (t *TimePriorityQueue[T]) Len() int {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
|
t.tqueue.mu.Lock()
|
||||||
defer t.mu.Unlock()
|
defer t.mu.Unlock()
|
||||||
return t.tqueue.Len() + t.ready.Len()
|
defer t.tqueue.mu.Unlock()
|
||||||
|
return t.tqueue.heap.Len() + t.ready.Len()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimePriorityQueue[T]) Peek() T {
|
func (t *TimePriorityQueue[T]) Peek() T {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
|
t.tqueue.mu.Lock()
|
||||||
defer t.mu.Unlock()
|
defer t.mu.Unlock()
|
||||||
|
defer t.tqueue.mu.Unlock()
|
||||||
|
|
||||||
if t.ready.Len() > 0 {
|
if t.ready.Len() > 0 {
|
||||||
return t.ready.Peek().v
|
return t.ready.Peek().v
|
||||||
}
|
}
|
||||||
return t.tqueue.Peek().v
|
if t.tqueue.heap.Len() > 0 {
|
||||||
|
return t.tqueue.heap.Peek().v.v
|
||||||
|
}
|
||||||
|
var zero T
|
||||||
|
return zero
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimePriorityQueue[T]) Reset() []T {
|
func (t *TimePriorityQueue[T]) Reset() []T {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
|
t.tqueue.mu.Lock()
|
||||||
defer t.mu.Unlock()
|
defer t.mu.Unlock()
|
||||||
|
defer t.tqueue.mu.Unlock()
|
||||||
|
|
||||||
var res []T
|
var res []T
|
||||||
for t.ready.Len() > 0 {
|
for t.ready.Len() > 0 {
|
||||||
res = append(res, heap.Pop(&t.ready).(priorityEntry[T]).v)
|
res = append(res, heap.Pop(&t.ready).(priorityEntry[T]).v)
|
||||||
}
|
}
|
||||||
for t.tqueue.Len() > 0 {
|
for t.tqueue.heap.Len() > 0 {
|
||||||
res = append(res, heap.Pop(&t.tqueue.heap).(timeQueueEntry[priorityEntry[T]]).v.v)
|
res = append(res, heap.Pop(&t.tqueue.heap).(timeQueueEntry[priorityEntry[T]]).v.v)
|
||||||
}
|
}
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *TimePriorityQueue[T]) GetAll() []T {
|
||||||
|
t.mu.Lock()
|
||||||
|
t.tqueue.mu.Lock()
|
||||||
|
defer t.mu.Unlock()
|
||||||
|
defer t.tqueue.mu.Unlock()
|
||||||
|
res := make([]T, 0, t.tqueue.heap.Len()+t.ready.Len())
|
||||||
|
for _, entry := range t.tqueue.heap {
|
||||||
|
res = append(res, entry.v.v)
|
||||||
|
}
|
||||||
|
for _, entry := range t.ready {
|
||||||
|
res = append(res, entry.v)
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TimePriorityQueue[T]) Remove(v T) {
|
||||||
|
t.mu.Lock()
|
||||||
|
t.tqueue.mu.Lock()
|
||||||
|
defer t.mu.Unlock()
|
||||||
|
defer t.tqueue.mu.Unlock()
|
||||||
|
|
||||||
|
for idx := 0; idx < t.tqueue.heap.Len(); idx++ {
|
||||||
|
if t.tqueue.heap[idx].v.v.Eq(v) {
|
||||||
|
heap.Remove(&t.tqueue.heap, idx)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx := 0; idx < t.ready.Len(); idx++ {
|
||||||
|
if t.ready[idx].v.Eq(v) {
|
||||||
|
heap.Remove(&t.ready, idx)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (t *TimePriorityQueue[T]) Enqueue(at time.Time, priority int, v T) {
|
func (t *TimePriorityQueue[T]) Enqueue(at time.Time, priority int, v T) {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
t.tqueue.Enqueue(at, priorityEntry[T]{at, priority, v})
|
t.tqueue.Enqueue(at, priorityEntry[T]{at, priority, v})
|
||||||
@@ -59,8 +106,9 @@ func (t *TimePriorityQueue[T]) Enqueue(at time.Time, priority int, v T) {
|
|||||||
func (t *TimePriorityQueue[T]) Dequeue(ctx context.Context) T {
|
func (t *TimePriorityQueue[T]) Dequeue(ctx context.Context) T {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
for {
|
for {
|
||||||
|
t.tqueue.mu.Lock()
|
||||||
for t.tqueue.heap.Len() > 0 {
|
for t.tqueue.heap.Len() > 0 {
|
||||||
thead := t.tqueue.Peek() // peek at the head of the time queue
|
thead := t.tqueue.heap.Peek() // peek at the head of the time queue
|
||||||
if thead.at.Before(time.Now()) {
|
if thead.at.Before(time.Now()) {
|
||||||
tqe := heap.Pop(&t.tqueue.heap).(timeQueueEntry[priorityEntry[T]])
|
tqe := heap.Pop(&t.tqueue.heap).(timeQueueEntry[priorityEntry[T]])
|
||||||
heap.Push(&t.ready, tqe.v)
|
heap.Push(&t.ready, tqe.v)
|
||||||
@@ -68,6 +116,7 @@ func (t *TimePriorityQueue[T]) Dequeue(ctx context.Context) T {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
t.tqueue.mu.Unlock()
|
||||||
if t.ready.Len() > 0 {
|
if t.ready.Len() > 0 {
|
||||||
defer t.mu.Unlock()
|
defer t.mu.Unlock()
|
||||||
return heap.Pop(&t.ready).(priorityEntry[T]).v
|
return heap.Pop(&t.ready).(priorityEntry[T]).v
|
||||||
@@ -80,7 +129,7 @@ func (t *TimePriorityQueue[T]) Dequeue(ctx context.Context) T {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type priorityEntry[T any] struct {
|
type priorityEntry[T equals[T]] struct {
|
||||||
at time.Time
|
at time.Time
|
||||||
priority int
|
priority int
|
||||||
v T
|
v T
|
||||||
@@ -89,3 +138,7 @@ type priorityEntry[T any] struct {
|
|||||||
func (t priorityEntry[T]) Less(other priorityEntry[T]) bool {
|
func (t priorityEntry[T]) Less(other priorityEntry[T]) bool {
|
||||||
return t.priority > other.priority
|
return t.priority > other.priority
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t priorityEntry[T]) Eq(other priorityEntry[T]) bool {
|
||||||
|
return t.at == other.at && t.priority == other.priority && t.v.Eq(other.v)
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package queue
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"slices"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
@@ -10,11 +11,11 @@ import (
|
|||||||
// TestTPQEnqueue tests that enqueued elements are retruned highest priority first.
|
// TestTPQEnqueue tests that enqueued elements are retruned highest priority first.
|
||||||
func TestTPQPriority(t *testing.T) {
|
func TestTPQPriority(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
tpq := NewTimePriorityQueue[int]()
|
tpq := NewTimePriorityQueue[val]()
|
||||||
|
|
||||||
now := time.Now().Add(-time.Second)
|
now := time.Now().Add(-time.Second)
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
tpq.Enqueue(now, i, i)
|
tpq.Enqueue(now, i, val{i})
|
||||||
}
|
}
|
||||||
|
|
||||||
if tpq.Len() != 100 {
|
if tpq.Len() != 100 {
|
||||||
@@ -23,7 +24,7 @@ func TestTPQPriority(t *testing.T) {
|
|||||||
|
|
||||||
for i := 99; i >= 0; i-- {
|
for i := 99; i >= 0; i-- {
|
||||||
v := tpq.Dequeue(context.Background())
|
v := tpq.Dequeue(context.Background())
|
||||||
if v != i {
|
if v.v != i {
|
||||||
t.Errorf("expected %d, got %d", i, v)
|
t.Errorf("expected %d, got %d", i, v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -31,14 +32,14 @@ func TestTPQPriority(t *testing.T) {
|
|||||||
|
|
||||||
func TestTPQMixedReadinessStates(t *testing.T) {
|
func TestTPQMixedReadinessStates(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
tpq := NewTimePriorityQueue[int]()
|
tpq := NewTimePriorityQueue[val]()
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
tpq.Enqueue(now.Add(-100*time.Millisecond), i, i)
|
tpq.Enqueue(now.Add(-100*time.Millisecond), i, val{i})
|
||||||
}
|
}
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
tpq.Enqueue(now.Add(100*time.Millisecond), i, i)
|
tpq.Enqueue(now.Add(100*time.Millisecond), i, val{i})
|
||||||
}
|
}
|
||||||
|
|
||||||
if tpq.Len() != 200 {
|
if tpq.Len() != 200 {
|
||||||
@@ -48,7 +49,7 @@ func TestTPQMixedReadinessStates(t *testing.T) {
|
|||||||
for j := 0; j < 2; j++ {
|
for j := 0; j < 2; j++ {
|
||||||
for i := 99; i >= 0; i-- {
|
for i := 99; i >= 0; i-- {
|
||||||
v := tpq.Dequeue(context.Background())
|
v := tpq.Dequeue(context.Background())
|
||||||
if v != i {
|
if v.v != i {
|
||||||
t.Errorf("pass %d expected %d, got %d", j, i, v)
|
t.Errorf("pass %d expected %d, got %d", j, i, v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -56,7 +57,8 @@ func TestTPQMixedReadinessStates(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTPQStress(t *testing.T) {
|
func TestTPQStress(t *testing.T) {
|
||||||
tpq := NewTimePriorityQueue[int]()
|
t.Parallel()
|
||||||
|
tpq := NewTimePriorityQueue[val]()
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
totalEnqueued := 0
|
totalEnqueued := 0
|
||||||
@@ -65,8 +67,8 @@ func TestTPQStress(t *testing.T) {
|
|||||||
go func() {
|
go func() {
|
||||||
ctx, _ := context.WithDeadline(context.Background(), start.Add(1*time.Second))
|
ctx, _ := context.WithDeadline(context.Background(), start.Add(1*time.Second))
|
||||||
for ctx.Err() == nil {
|
for ctx.Err() == nil {
|
||||||
v := rand.Intn(100)
|
v := rand.Intn(100) + 1
|
||||||
tpq.Enqueue(time.Now().Add(time.Duration(rand.Intn(1000)-500)*time.Millisecond), rand.Intn(5), v)
|
tpq.Enqueue(time.Now().Add(time.Duration(rand.Intn(1000)-500)*time.Millisecond), rand.Intn(5), val{v})
|
||||||
totalEnqueuedSum += v
|
totalEnqueuedSum += v
|
||||||
totalEnqueued++
|
totalEnqueued++
|
||||||
}
|
}
|
||||||
@@ -76,11 +78,123 @@ func TestTPQStress(t *testing.T) {
|
|||||||
totalDequeued := 0
|
totalDequeued := 0
|
||||||
sum := 0
|
sum := 0
|
||||||
for ctx.Err() == nil || totalDequeued < totalEnqueued {
|
for ctx.Err() == nil || totalDequeued < totalEnqueued {
|
||||||
sum += tpq.Dequeue(ctx)
|
v := tpq.Dequeue(ctx)
|
||||||
|
if v.v != 0 {
|
||||||
totalDequeued++
|
totalDequeued++
|
||||||
|
sum += v.v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if totalDequeued != totalEnqueued {
|
||||||
|
t.Errorf("expected totalDequeued to be %d, got %d", totalEnqueued, totalDequeued)
|
||||||
}
|
}
|
||||||
|
|
||||||
if sum != totalEnqueuedSum {
|
if sum != totalEnqueuedSum {
|
||||||
t.Errorf("expected sum to be %d, got %d", totalEnqueuedSum, sum)
|
t.Errorf("expected sum to be %d, got %d", totalEnqueuedSum, sum)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTPQRemove(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
tpq := NewTimePriorityQueue[val]()
|
||||||
|
|
||||||
|
now := time.Now().Add(-time.Second) // make sure the time is in the past
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
tpq.Enqueue(now, -i, val{i})
|
||||||
|
}
|
||||||
|
|
||||||
|
if tpq.Len() != 100 {
|
||||||
|
t.Errorf("expected length to be 100, got %d", tpq.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove all even numbers, dequeue the odd numbers
|
||||||
|
for i := 0; i < 100; i += 2 {
|
||||||
|
tpq.Remove(val{i})
|
||||||
|
v := tpq.Dequeue(context.Background())
|
||||||
|
if v.v != i+1 {
|
||||||
|
t.Errorf("expected %d, got %d", i+1, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tpq.Len() != 0 {
|
||||||
|
t.Errorf("expected length to be 0, got %d", tpq.Len())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTPQReset(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
tpq := NewTimePriorityQueue[val]()
|
||||||
|
|
||||||
|
now := time.Now() // make sure the time is in the past
|
||||||
|
for i := 0; i < 50; i++ {
|
||||||
|
tpq.Enqueue(now.Add(time.Second), i, val{i})
|
||||||
|
}
|
||||||
|
for i := 50; i < 100; i++ {
|
||||||
|
tpq.Enqueue(now.Add(-time.Second), i, val{i})
|
||||||
|
}
|
||||||
|
|
||||||
|
if tpq.Len() != 100 {
|
||||||
|
t.Errorf("expected length to be 100, got %d", tpq.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
dv := tpq.Dequeue(context.Background())
|
||||||
|
if dv.v != 99 {
|
||||||
|
t.Errorf("expected 99, got %d", dv.v)
|
||||||
|
}
|
||||||
|
|
||||||
|
vals := tpq.Reset()
|
||||||
|
|
||||||
|
if len(vals) != 99 {
|
||||||
|
t.Errorf("expected length to be 100, got %d", len(vals))
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(vals, func(i, j val) int {
|
||||||
|
if i.v > j.v {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
})
|
||||||
|
|
||||||
|
for i := 0; i < 99; i++ {
|
||||||
|
if vals[i].v != i {
|
||||||
|
t.Errorf("expected %d, got %d", i, vals[i].v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tpq.Len() != 0 {
|
||||||
|
t.Errorf("expected length to be 0, got %d", tpq.Len())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTPQGetAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
tpq := NewTimePriorityQueue[val]()
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
tpq.Enqueue(now.Add(time.Second), i, val{i})
|
||||||
|
}
|
||||||
|
|
||||||
|
if tpq.Len() != 100 {
|
||||||
|
t.Errorf("expected length to be 100, got %d", tpq.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
vals := tpq.GetAll()
|
||||||
|
|
||||||
|
if len(vals) != 100 {
|
||||||
|
t.Errorf("expected length to be 100, got %d", len(vals))
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(vals, func(i, j val) int {
|
||||||
|
if i.v > j.v {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
})
|
||||||
|
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
if vals[i].v != i {
|
||||||
|
t.Errorf("expected %d, got %d", i, vals[i].v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// TimeQueue is a priority queue that dequeues elements at (or after) a specified time. It is safe for concurrent use.
|
// TimeQueue is a priority queue that dequeues elements at (or after) a specified time. It is safe for concurrent use.
|
||||||
type TimeQueue[T any] struct {
|
type TimeQueue[T equals[T]] struct {
|
||||||
heap genericHeap[timeQueueEntry[T]]
|
heap genericHeap[timeQueueEntry[T]]
|
||||||
|
|
||||||
dequeueMu sync.Mutex
|
dequeueMu sync.Mutex
|
||||||
@@ -17,7 +17,7 @@ type TimeQueue[T any] struct {
|
|||||||
notify atomic.Pointer[chan struct{}]
|
notify atomic.Pointer[chan struct{}]
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTimeQueue[T any]() *TimeQueue[T] {
|
func NewTimeQueue[T equals[T]]() *TimeQueue[T] {
|
||||||
return &TimeQueue[T]{
|
return &TimeQueue[T]{
|
||||||
heap: genericHeap[timeQueueEntry[T]]{},
|
heap: genericHeap[timeQueueEntry[T]]{},
|
||||||
}
|
}
|
||||||
@@ -63,6 +63,29 @@ func (t *TimeQueue[T]) Reset() []T {
|
|||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *TimeQueue[T]) Remove(v T) {
|
||||||
|
t.mu.Lock()
|
||||||
|
defer t.mu.Unlock()
|
||||||
|
|
||||||
|
for idx := 0; idx < t.heap.Len(); idx++ {
|
||||||
|
if t.heap[idx].v.Eq(v) {
|
||||||
|
heap.Remove(&t.heap, idx)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TimeQueue[T]) GetAll() []T {
|
||||||
|
t.mu.Lock()
|
||||||
|
defer t.mu.Unlock()
|
||||||
|
|
||||||
|
res := make([]T, 0, t.heap.Len())
|
||||||
|
for _, entry := range t.heap {
|
||||||
|
res = append(res, entry.v)
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
func (t *TimeQueue[T]) Dequeue(ctx context.Context) T {
|
func (t *TimeQueue[T]) Dequeue(ctx context.Context) T {
|
||||||
t.dequeueMu.Lock()
|
t.dequeueMu.Lock()
|
||||||
defer t.dequeueMu.Unlock()
|
defer t.dequeueMu.Unlock()
|
||||||
@@ -130,3 +153,7 @@ func (t timeQueueEntry[T]) Less(other timeQueueEntry[T]) bool {
|
|||||||
func (t timeQueueEntry[T]) Eq(other timeQueueEntry[T]) bool {
|
func (t timeQueueEntry[T]) Eq(other timeQueueEntry[T]) bool {
|
||||||
return t.at.Equal(other.at)
|
return t.at.Equal(other.at)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type equals[T any] interface {
|
||||||
|
Eq(other T) bool
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user