chore: misc prerelease fixes
Some checks failed
Release Please / release-please (push) Waiting to run
Build Snapshot Release / build (push) Has been cancelled
Test / test-nix (push) Has been cancelled
Test / test-win (push) Has been cancelled

This commit is contained in:
Gareth George
2025-01-07 22:31:40 -08:00
parent b8acc12d6a
commit ace719f54b
8 changed files with 60 additions and 37 deletions

View File

@@ -62,7 +62,7 @@ func NewSyncClient(mgr *SyncManager, localInstanceID string, peer *v1.Multihost_
peer.GetInstanceUrl(),
)
return &SyncClient{
c := &SyncClient{
mgr: mgr,
localInstanceID: localInstanceID,
peer: peer,
@@ -70,7 +70,9 @@ func NewSyncClient(mgr *SyncManager, localInstanceID string, peer *v1.Multihost_
client: client,
oplog: oplog,
l: zap.L().Named(fmt.Sprintf("syncclient for %q", peer.GetInstanceId())),
}, nil
}
c.setConnectionState(v1.SyncConnectionState_CONNECTION_STATE_DISCONNECTED, "starting up")
return c, nil
}
func (c *SyncClient) setConnectionState(state v1.SyncConnectionState, message string) {
@@ -116,6 +118,7 @@ func (c *SyncClient) runSyncInternal(ctx context.Context) error {
stream := c.client.Sync(ctx)
ctx, cancelWithError := context.WithCancelCause(ctx)
defer cancelWithError(nil)
receiveError := make(chan error, 1)
receive := make(chan *v1.SyncStreamItem, 1)

View File

@@ -121,9 +121,16 @@ func (m *SyncManager) runSyncWithPeerInternal(ctx context.Context, config *v1.Co
if err != nil {
return fmt.Errorf("creating sync client: %w", err)
}
m.mu.Lock()
m.syncClients[knownHostPeer.InstanceId] = newClient
m.mu.Unlock()
go newClient.RunSync(ctx)
go func() {
newClient.RunSync(ctx)
m.mu.Lock()
delete(m.syncClients, knownHostPeer.InstanceId)
m.mu.Unlock()
}()
return nil
}

View File

@@ -79,10 +79,14 @@ func (o *OpLog) QueryMetadata(q Query, f func(OpMetadata) error) error {
}
func (o *OpLog) Subscribe(q Query, f *Subscription) {
o.subscribersMu.Lock()
defer o.subscribersMu.Unlock()
o.subscribers = append(o.subscribers, subAndQuery{f: f, q: q})
}
func (o *OpLog) Unsubscribe(f *Subscription) error {
o.subscribersMu.Lock()
defer o.subscribersMu.Unlock()
for i, sub := range o.subscribers {
if sub.f == f {
o.subscribers = append(o.subscribers[:i], o.subscribers[i+1:]...)

View File

@@ -30,7 +30,6 @@ type SqliteStore struct {
dbpool *sqlitex.Pool
lastIDVal atomic.Int64
dblock *flock.Flock
querymu sync.RWMutex
ogidCache *lru.TwoQueueCache[opGroupInfo, int64]
@@ -222,8 +221,6 @@ func (m *SqliteStore) buildQueryWhereClause(q oplog.Query, includeSelectClauses
}
func (m *SqliteStore) Query(q oplog.Query, f func(*v1.Operation) error) error {
m.querymu.RLock()
defer m.querymu.RUnlock()
conn, err := m.dbpool.Take(context.Background())
if err != nil {
return fmt.Errorf("query: %v", err)
@@ -251,8 +248,6 @@ func (m *SqliteStore) Query(q oplog.Query, f func(*v1.Operation) error) error {
}
func (m *SqliteStore) QueryMetadata(q oplog.Query, f func(oplog.OpMetadata) error) error {
m.querymu.RLock()
defer m.querymu.RUnlock()
conn, err := m.dbpool.Take(context.Background())
if err != nil {
return fmt.Errorf("query metadata: %v", err)
@@ -323,8 +318,6 @@ func (m *SqliteStore) findOrCreateGroup(conn *sqlite.Conn, op *v1.Operation) (og
}
func (m *SqliteStore) Transform(q oplog.Query, f func(*v1.Operation) (*v1.Operation, error)) error {
m.querymu.Lock()
defer m.querymu.Unlock()
conn, err := m.dbpool.Take(context.Background())
if err != nil {
return fmt.Errorf("transform: %v", err)
@@ -332,7 +325,7 @@ func (m *SqliteStore) Transform(q oplog.Query, f func(*v1.Operation) (*v1.Operat
defer m.dbpool.Put(conn)
where, args := m.buildQueryWhereClause(q, true)
return withSqliteTransaction(conn, func() error {
return withImmediateSqliteTransaction(conn, func() error {
return sqlitex.ExecuteTransient(conn, "SELECT operations.operation FROM operations JOIN operation_groups ON operations.ogid = operation_groups.ogid WHERE "+where, &sqlitex.ExecOptions{
Args: args,
ResultFunc: func(stmt *sqlite.Stmt) error {
@@ -391,15 +384,13 @@ func (m *SqliteStore) addInternal(conn *sqlite.Conn, op ...*v1.Operation) error
}
func (m *SqliteStore) Add(op ...*v1.Operation) error {
m.querymu.Lock()
defer m.querymu.Unlock()
conn, err := m.dbpool.Take(context.Background())
if err != nil {
return fmt.Errorf("add operation: %v", err)
}
defer m.dbpool.Put(conn)
return withSqliteTransaction(conn, func() error {
return withImmediateSqliteTransaction(conn, func() error {
for _, o := range op {
o.Id = m.lastIDVal.Add(1)
if o.FlowId == 0 {
@@ -415,15 +406,13 @@ func (m *SqliteStore) Add(op ...*v1.Operation) error {
}
func (m *SqliteStore) Update(op ...*v1.Operation) error {
m.querymu.Lock()
defer m.querymu.Unlock()
conn, err := m.dbpool.Take(context.Background())
if err != nil {
return fmt.Errorf("update operation: %v", err)
}
defer m.dbpool.Put(conn)
return withSqliteTransaction(conn, func() error {
return withImmediateSqliteTransaction(conn, func() error {
return m.updateInternal(conn, op...)
})
}
@@ -456,8 +445,6 @@ func (m *SqliteStore) updateInternal(conn *sqlite.Conn, op ...*v1.Operation) err
}
func (m *SqliteStore) Get(opID int64) (*v1.Operation, error) {
m.querymu.RLock()
defer m.querymu.RUnlock()
conn, err := m.dbpool.Take(context.Background())
if err != nil {
return nil, fmt.Errorf("get operation: %v", err)
@@ -491,8 +478,6 @@ func (m *SqliteStore) Get(opID int64) (*v1.Operation, error) {
}
func (m *SqliteStore) Delete(opID ...int64) ([]*v1.Operation, error) {
m.querymu.Lock()
defer m.querymu.Unlock()
conn, err := m.dbpool.Take(context.Background())
if err != nil {
return nil, fmt.Errorf("delete operation: %v", err)
@@ -500,7 +485,7 @@ func (m *SqliteStore) Delete(opID ...int64) ([]*v1.Operation, error) {
defer m.dbpool.Put(conn)
ops := make([]*v1.Operation, 0, len(opID))
return ops, withSqliteTransaction(conn, func() error {
return ops, withImmediateSqliteTransaction(conn, func() error {
// fetch all the operations we're about to delete
predicate := []string{"operations.id IN ("}
args := []any{}
@@ -548,8 +533,6 @@ func (m *SqliteStore) Delete(opID ...int64) ([]*v1.Operation, error) {
}
func (m *SqliteStore) ResetForTest(t *testing.T) error {
m.querymu.Lock()
defer m.querymu.Unlock()
conn, err := m.dbpool.Take(context.Background())
if err != nil {
return fmt.Errorf("reset for test: %v", err)

View File

@@ -5,6 +5,7 @@ import (
"zombiezen.com/go/sqlite/sqlitex"
)
// withSqliteTransaction should be used when the function only executes reads
func withSqliteTransaction(conn *sqlite.Conn, f func() error) error {
var err error
endFunc := sqlitex.Transaction(conn)
@@ -12,3 +13,25 @@ func withSqliteTransaction(conn *sqlite.Conn, f func() error) error {
endFunc(&err)
return err
}
func withImmediateSqliteTransaction(conn *sqlite.Conn, f func() error) error {
var err error
endFunc, err := sqlitex.ImmediateTransaction(conn)
if err != nil {
return err
}
err = f()
endFunc(&err)
return err
}
func withExclusiveSqliteTransaction(conn *sqlite.Conn, f func() error) error {
var err error
endFunc, err := sqlitex.ExclusiveTransaction(conn)
if err != nil {
return err
}
err = f()
endFunc(&err)
return err
}

View File

@@ -15,20 +15,20 @@ fi
# Check if MacOS
if [ "$(uname)" = "Darwin" ]; then
if [ -d "/Volumes/RAM_Disk_512MB" ]; then
echo "RAM disk /Volumes/RAM_Disk_512MB already exists."
if [ -d "/Volumes/RAM_Disk_1GB" ]; then
echo "RAM disk /Volumes/RAM_Disk_1GB already exists."
else
sudo diskutil erasevolume HFS+ RAM_Disk_512MB $(hdiutil attach -nomount ram://1048576)
sudo diskutil erasevolume HFS+ RAM_Disk_1GB $(hdiutil attach -nomount ram://2048000)
fi
export TMPDIR="/Volumes/RAM_Disk_512MB"
export TMPDIR="/Volumes/RAM_Disk_1GB"
export RESTIC_CACHE_DIR="$TMPDIR/.cache"
echo "Created 512MB RAM disk at /Volumes/RAM_Disk_512MB"
echo "Created 512MB RAM disk at /Volumes/RAM_Disk_1GB"
echo "TMPDIR=$TMPDIR"
echo "RESTIC_CACHE_DIR=$RESTIC_CACHE_DIR"
elif [ "$(expr substr $(uname -s) 1 5)" == "Linux" ]; then
# Create ramdisk
sudo mkdir -p /mnt/ramdisk
sudo mount -t tmpfs -o size=512M tmpfs /mnt/ramdisk
sudo mount -t tmpfs -o size=1024M tmpfs /mnt/ramdisk
export TMPDIR="/mnt/ramdisk"
export RESTIC_CACHE_DIR="$TMPDIR/.cache"
fi

View File

@@ -15,11 +15,11 @@ fi
# Check if MacOS
if [ "$(uname)" = "Darwin" ]; then
sudo diskutil unmount /Volumes/RAM_Disk_512MB
hdiutil detach /Volumes/RAM_Disk_512MB
sudo diskutil unmount /Volumes/RAM_Disk_1GB
hdiutil detach /Volumes/RAM_Disk_1GB
elif [ "$(expr substr $(uname -s) 1 5)" == "Linux" ]; then
sudo umount /mnt/ramdisk
fi
unset TMPDIR
unset XDG_CACHE_HOME
unset XDG_CACHE_HOME

View File

@@ -126,14 +126,15 @@ export const OperationTreeView = ({
const otherTrees: React.ReactNode[] = [];
for (const instance of Object.keys(backupsByInstance)) {
const instanceOps = backupsByInstance[instance];
const instanceBackups = backupsByInstance[instance];
const instTree = (
<DisplayOperationTree
operations={backups}
operations={instanceBackups}
isPlanView={isPlanView}
onSelect={(flow) => {
setSelectedBackupId(flow ? flow.flowID : null);
}}
expand={instance === config!.instance}
/>
);
@@ -205,10 +206,12 @@ const DisplayOperationTree = ({
operations,
isPlanView,
onSelect,
expand,
}: {
operations: FlowDisplayInfo[];
isPlanView?: boolean;
onSelect?: (flow: FlowDisplayInfo | null) => any;
expand?: boolean;
}) => {
const [treeData, setTreeData] = useState<{
tree: OpTreeNode[];
@@ -237,7 +240,7 @@ const DisplayOperationTree = ({
<Tree<OpTreeNode>
treeData={treeData.tree}
showIcon
defaultExpandedKeys={treeData.expanded}
defaultExpandedKeys={expand ? treeData.expanded : []}
onSelect={(keys, info) => {
if (info.selectedNodes.length === 0) return;
const backup = info.selectedNodes[0].backup;
@@ -433,7 +436,7 @@ const buildTree = (
expanded = expandTree(tree, 5, 0, 2);
} else {
tree = buildTreePlan(operations);
expanded = expandTree(tree, 5, 2, 4);
expanded = expandTree(tree, 5, 1, 3);
}
return { tree, expanded };
};