Merge pull request #17 from kiliantyler/feat/lru-cache

Merging so we can fix conflicts before the chat MR, this looks good to me and works fine
This commit is contained in:
fccview
2026-01-03 07:42:08 +00:00
committed by GitHub
7 changed files with 83 additions and 18 deletions
+1 -1
View File
@@ -122,7 +122,7 @@ Add this to your `services.yaml`:
| Variable | Default | Description |
| --- | --- | --- |
| `PORT` | `3000` | The port the web dashboard listens on. Since `--network host` is used, this port opens directly on the host. |
| `MAX_PEERS` | `10000` | Maximum number of peers to track in the swarm. Unless you're expecting the entire internet to join, the default is probably fine. |
| `MAX_PEERS` | `1000000` | Maximum number of peers to track in the swarm. Unless you're expecting the entire internet to join, the default is probably fine. |
## » Usage
+2 -1
View File
@@ -16,11 +16,12 @@ const main = async () => {
const diagnostics = new DiagnosticsManager();
const sseManager = new SSEManager();
peerManager.addOrUpdatePeer(identity.id, peerManager.getSeq(), null);
peerManager.addOrUpdatePeer(identity.id, peerManager.getSeq());
const broadcastUpdate = () => {
sseManager.broadcastUpdate({
count: peerManager.size,
totalUnique: peerManager.totalUniquePeers,
direct: swarmManager.getSwarm().connections.size,
id: identity.id,
diagnostics: diagnostics.getStats(),
+1 -1
View File
@@ -13,7 +13,7 @@ const TOPIC = crypto.createHash("sha256").update(TOPIC_NAME).digest();
*/
const POW_PREFIX = "0000";
const MAX_PEERS = parseInt(process.env.MAX_PEERS) || 10000;
const MAX_PEERS = parseInt(process.env.MAX_PEERS) || 1000000;
const MAX_MESSAGE_SIZE = 2048;
const MAX_RELAY_HOPS = 2;
const MAX_CONNECTIONS = 32;
+12 -11
View File
@@ -42,13 +42,11 @@ class MessageHandler {
if (!sig) return;
try {
let key;
if (stored && stored.key) {
key = stored.key;
} else {
if (!this.peerManager.canAcceptPeer(id)) return;
key = createPublicKey(id);
}
// Check if we can accept new peers (only matters for new peers)
if (!stored && !this.peerManager.canAcceptPeer(id)) return;
// Derive public key on-demand from peer ID
const key = createPublicKey(id);
if (!verifySignature(`seq:${seq}`, sig, key)) {
this.diagnostics.increment("invalidSig");
@@ -59,7 +57,7 @@ class MessageHandler {
sourceSocket.peerId = id;
}
const wasNew = this.peerManager.addOrUpdatePeer(id, seq, key);
const wasNew = this.peerManager.addOrUpdatePeer(id, seq);
if (wasNew) {
this.diagnostics.increment("newPeersAdded");
@@ -83,10 +81,13 @@ class MessageHandler {
if (!sig) return;
const stored = this.peerManager.getPeer(id);
if (!stored || !stored.key) return;
// Only process leave messages for peers we know about
if (!this.peerManager.hasPeer(id)) return;
if (!verifySignature(`type:LEAVE:${id}`, sig, stored.key)) {
// Derive public key on-demand from peer ID
const key = createPublicKey(id);
if (!verifySignature(`type:LEAVE:${id}`, sig, key)) {
this.diagnostics.increment("invalidSig");
return;
}
+52
View File
@@ -0,0 +1,52 @@
/**
* Simple LRU (Least Recently Used) Cache
* Uses Map's insertion order guarantee to track recency
*/
class LRUCache {
constructor(capacity) {
this.capacity = capacity;
this.cache = new Map();
}
get(key) {
if (!this.cache.has(key)) return undefined;
const value = this.cache.get(key);
// Move to end (most recently used)
this.cache.delete(key);
this.cache.set(key, value);
return value;
}
set(key, value) {
// Delete first to update position if key exists
this.cache.delete(key);
this.cache.set(key, value);
// Evict oldest if over capacity
if (this.cache.size > this.capacity) {
const oldestKey = this.cache.keys().next().value;
this.cache.delete(oldestKey);
}
}
has(key) {
return this.cache.has(key);
}
delete(key) {
return this.cache.delete(key);
}
get size() {
return this.cache.size;
}
/**
* Iterate over entries (for cleanup operations)
*/
*entries() {
yield* this.cache.entries();
}
}
module.exports = { LRUCache };
+13 -4
View File
@@ -1,19 +1,24 @@
const { MAX_PEERS, PEER_TIMEOUT } = require("../config/constants");
const { LRUCache } = require("./lru");
const { HyperLogLog } = require("./hyperloglog");
class PeerManager {
constructor() {
this.seenPeers = new Map();
this.seenPeers = new LRUCache(MAX_PEERS);
this.uniquePeersHLL = new HyperLogLog(10);
this.mySeq = 0;
}
addOrUpdatePeer(id, seq, key) {
addOrUpdatePeer(id, seq) {
const stored = this.seenPeers.get(id);
const wasNew = !stored;
// Track in HyperLogLog for total unique estimation
this.uniquePeersHLL.add(id);
this.seenPeers.set(id, {
seq,
lastSeen: Date.now(),
key,
});
return wasNew;
@@ -40,7 +45,7 @@ class PeerManager {
const now = Date.now();
let removed = 0;
for (const [id, data] of this.seenPeers) {
for (const [id, data] of this.seenPeers.entries()) {
if (now - data.lastSeen > PEER_TIMEOUT) {
this.seenPeers.delete(id);
removed++;
@@ -54,6 +59,10 @@ class PeerManager {
return this.seenPeers.size;
}
get totalUniquePeers() {
return this.uniquePeersHLL.count();
}
incrementSeq() {
return ++this.mySeq;
}
+2
View File
@@ -30,6 +30,7 @@ const setupRoutes = (app, identity, peerManager, swarm, sseManager, diagnostics)
const data = JSON.stringify({
count: peerManager.size,
totalUnique: peerManager.totalUniquePeers,
direct: swarm.getSwarm().connections.size,
id: identity.id,
diagnostics: diagnostics.getStats(),
@@ -44,6 +45,7 @@ const setupRoutes = (app, identity, peerManager, swarm, sseManager, diagnostics)
app.get("/api/stats", (req, res) => {
res.json({
count: peerManager.size,
totalUnique: peerManager.totalUniquePeers,
direct: swarm.getSwarm().connections.size,
id: identity.id,
diagnostics: diagnostics.getStats(),