fix: update system kills widget timing

This commit is contained in:
guarzo
2025-06-19 17:19:11 -04:00
parent af0869a39b
commit 7cdba4b507
9 changed files with 182 additions and 118 deletions

View File

@@ -49,7 +49,7 @@ export const KillsCounter = ({
content={
<div className="overflow-hidden flex w-[450px] flex-col" style={{ height: `${tooltipHeight}px` }}>
<div className="flex-1 h-full">
<SystemKillsList kills={limitedKills} onlyOneSystem />
<SystemKillsList kills={limitedKills} onlyOneSystem timeRange={1} />
</div>
</div>
}

View File

@@ -48,7 +48,7 @@ export const SolarSystemNodeDefault = memo((props: NodeProps<MapSolarSystemType>
>
<div className={clsx(classes.BookmarkWithIcon)}>
<span className={clsx(PrimeIcons.BOLT, classes.icon)} />
<span className={clsx(classes.text)}>{nodeVars.killsCount}</span>
<span className={clsx(classes.text)}>{localKillsCount}</span>
</div>
</KillsCounter>
)}

View File

@@ -47,7 +47,7 @@ export const SolarSystemNodeTheme = memo((props: NodeProps<MapSolarSystemType>)
>
<div className={clsx(classes.BookmarkWithIcon)}>
<span className={clsx(PrimeIcons.BOLT, classes.icon)} />
<span className={clsx(classes.text)}>{nodeVars.killsCount}</span>
<span className={clsx(classes.text)}>{localKillsCount}</span>
</div>
</KillsCounter>
)}

View File

@@ -22,6 +22,7 @@ export function useKillsCounter({ realSystemId }: UseKillsCounterProps) {
systemId: realSystemId,
outCommand,
showAllVisible: false,
sinceHours: 1,
});
const filteredKills = useMemo(() => {

View File

@@ -1,6 +1,7 @@
import { useEffect, useState, useCallback } from 'react';
import { useEffect, useState, useCallback, useMemo } from 'react';
import { useMapEventListener } from '@/hooks/Mapper/events';
import { Commands } from '@/hooks/Mapper/types';
import { useMapRootState } from '@/hooks/Mapper/mapRootProvider';
interface Kill {
solar_system_id: number | string;
@@ -9,29 +10,51 @@ interface Kill {
interface MapEvent {
name: Commands;
data?: any;
data?: unknown;
payload?: Kill[];
}
export function useNodeKillsCount(systemId: number | string, initialKillsCount: number | null): number | null {
const [killsCount, setKillsCount] = useState<number | null>(initialKillsCount);
const { data: mapData } = useMapRootState();
const { detailedKills = {} } = mapData;
// Calculate 1-hour kill count from detailed kills
const oneHourKillCount = useMemo(() => {
const systemKills = detailedKills[systemId] || [];
if (systemKills.length === 0) return null;
const oneHourAgo = Date.now() - 60 * 60 * 1000; // 1 hour in milliseconds
const recentKills = systemKills.filter(kill => {
if (!kill.kill_time) return false;
const killTime = new Date(kill.kill_time).getTime();
if (isNaN(killTime)) return false;
return killTime >= oneHourAgo;
});
return recentKills.length > 0 ? recentKills.length : null;
}, [detailedKills, systemId]);
useEffect(() => {
setKillsCount(initialKillsCount);
}, [initialKillsCount]);
// Use 1-hour count if available, otherwise fall back to initial count
setKillsCount(oneHourKillCount !== null ? oneHourKillCount : initialKillsCount);
}, [oneHourKillCount, initialKillsCount]);
const handleEvent = useCallback(
(event: MapEvent): boolean => {
if (event.name === Commands.killsUpdated && Array.isArray(event.payload)) {
const killForSystem = event.payload.find(kill => kill.solar_system_id.toString() === systemId.toString());
if (killForSystem && typeof killForSystem.kills === 'number') {
// Only update if we don't have detailed kills data
if (!detailedKills[systemId] || detailedKills[systemId].length === 0) {
setKillsCount(killForSystem.kills);
}
}
return true;
}
return false;
},
[systemId],
[systemId, detailedKills],
);
useMapEventListener(handleEvent);

View File

@@ -13,17 +13,14 @@ interface UseSystemKillsProps {
sinceHours?: number;
}
function combineKills(existing: DetailedKill[], incoming: DetailedKill[], sinceHours: number): DetailedKill[] {
const cutoff = Date.now() - sinceHours * 60 * 60 * 1000;
function combineKills(existing: DetailedKill[], incoming: DetailedKill[]): DetailedKill[] {
// Don't filter by time when storing - let components filter when displaying
const byId: Record<string, DetailedKill> = {};
for (const kill of [...existing, ...incoming]) {
if (!kill.kill_time) continue;
const killTimeMs = new Date(kill.kill_time).valueOf();
if (killTimeMs >= cutoff) {
byId[kill.killmail_id] = kill;
}
}
return Object.values(byId);
}
@@ -55,14 +52,14 @@ export function useSystemKills({ systemId, outCommand, showAllVisible = false, s
for (const [sid, newKills] of Object.entries(killsMap)) {
const existing = updated[sid] ?? [];
const combined = combineKills(existing, newKills, effectiveSinceHours);
const combined = combineKills(existing, newKills);
updated[sid] = combined;
}
return { ...prev, detailedKills: updated };
});
},
[update, effectiveSinceHours],
[update],
);
const fetchKills = useCallback(

View File

@@ -16,11 +16,13 @@ defmodule WandererApp.Kills.Client do
@retry_delays [5_000, 10_000, 30_000, 60_000]
@max_retries 10
@health_check_interval :timer.seconds(30) # Check every 30 seconds
@message_timeout :timer.minutes(15) # No messages timeout
defstruct [
:socket_pid,
:retry_timer_ref,
:connection_timeout_ref,
:last_message_time,
connected: false,
connecting: false,
subscribed_systems: MapSet.new(),
@@ -162,7 +164,8 @@ defmodule WandererApp.Kills.Client do
connecting: false,
socket_pid: socket_pid,
retry_count: 0, # Reset retry count only on successful connection
last_error: nil
last_error: nil,
last_message_time: System.system_time(:millisecond)
}
|> cancel_retry()
|> cancel_connection_timeout()
@@ -255,16 +258,9 @@ defmodule WandererApp.Kills.Client do
{:noreply, state}
end
# Handle process DOWN messages for socket monitoring
def handle_info({:DOWN, _ref, :process, pid, reason}, %{socket_pid: pid} = state) do
Logger.error("[Client] Socket process died: #{inspect(reason)}")
send(self(), {:disconnected, {:socket_died, reason}})
{:noreply, state}
end
def handle_info({:DOWN, _ref, :process, _pid, _reason}, state) do
# Ignore DOWN messages for other processes
{:noreply, state}
def handle_info({:message_received, _type}, state) do
# Update last message time when we receive a kill message
{:noreply, %{state | last_message_time: System.system_time(:millisecond)}}
end
def handle_info(_msg, state), do: {:noreply, state}
@@ -454,6 +450,22 @@ defmodule WandererApp.Kills.Client do
:needs_reconnect
end
defp check_health(%{socket_pid: pid, last_message_time: last_msg_time} = state) when not is_nil(last_msg_time) do
cond do
not socket_alive?(pid) ->
Logger.warning("[Client] Health check: Socket process #{inspect(pid)} is dead")
:needs_reconnect
# Check if we haven't received a message in the configured timeout
System.system_time(:millisecond) - last_msg_time > @message_timeout ->
Logger.warning("[Client] Health check: No messages received for 15+ minutes, reconnecting")
:needs_reconnect
true ->
:healthy
end
end
defp check_health(%{socket_pid: pid} = state) do
if socket_alive?(pid) do
:healthy
@@ -565,6 +577,9 @@ defmodule WandererApp.Kills.Client do
def handle_message(topic, event, payload, _transport, state) do
case {topic, event} do
{"killmails:lobby", "killmail_update"} ->
# Notify parent that we received a message
send(state.parent, {:message_received, :killmail_update})
# Use supervised task to handle failures gracefully
Task.Supervisor.start_child(
WandererApp.Kills.TaskSupervisor,
@@ -572,6 +587,9 @@ defmodule WandererApp.Kills.Client do
)
{"killmails:lobby", "kill_count_update"} ->
# Notify parent that we received a message
send(state.parent, {:message_received, :kill_count_update})
# Use supervised task to handle failures gracefully
Task.Supervisor.start_child(
WandererApp.Kills.TaskSupervisor,

View File

@@ -7,6 +7,7 @@ defmodule WandererApp.Map.ZkbDataFetcher do
require Logger
alias WandererApp.Map.Server.Impl, as: MapServerImpl
@interval :timer.seconds(15)
@store_map_kills_timeout :timer.hours(1)
@@ -109,56 +110,32 @@ defmodule WandererApp.Map.ZkbDataFetcher do
{solar_system_id, MapSet.new(ids)}
end)
# Find systems with changed killmail lists
# Find systems with changed killmail lists or empty detailed kills
changed_systems =
new_ids_map
|> Enum.filter(fn {system_id, new_ids_set} ->
old_set = MapSet.new(Map.get(old_ids_map, system_id, []))
not MapSet.equal?(new_ids_set, old_set)
old_details = Map.get(old_details_map, system_id, [])
# Update if IDs changed OR if we have IDs but no detailed kills
not MapSet.equal?(new_ids_set, old_set) or
(MapSet.size(new_ids_set) > 0 and old_details == [])
end)
|> Enum.map(&elem(&1, 0))
if changed_systems == [] do
Logger.debug(fn ->
"[ZkbDataFetcher] No changes in detailed kills for map_id=#{map_id}"
end)
log_no_changes(map_id)
# Don't overwrite existing cache data when there are no changes
# Only initialize if cache doesn't exist
if old_details_map == %{} do
# First time initialization - create empty structure
empty_map = systems
|> Enum.into(%{}, fn {system_id, _} -> {system_id, []} end)
WandererApp.Cache.insert(cache_key_details, empty_map, ttl: :timer.hours(@killmail_ttl_hours))
end
maybe_initialize_empty_details_map(old_details_map, systems, cache_key_details)
:ok
else
# Build new details for each changed system
updated_details_map =
Enum.reduce(changed_systems, old_details_map, fn system_id, acc ->
kill_ids =
new_ids_map
|> Map.fetch!(system_id)
|> MapSet.to_list()
# Get killmail details from cache (populated by WebSocket)
kill_details =
kill_ids
|> Enum.map(&WandererApp.Cache.get("zkb:killmail:#{&1}"))
|> Enum.reject(&is_nil/1)
# Ensure system_id is an integer key
Map.put(acc, system_id, kill_details)
end)
updated_details_map = build_updated_details_map(changed_systems, old_details_map, new_ids_map)
# Update the ID map cache
updated_ids_map =
Enum.reduce(changed_systems, old_ids_map, fn system_id, acc ->
new_ids_list = new_ids_map[system_id] |> MapSet.to_list()
Map.put(acc, system_id, new_ids_list)
end)
updated_ids_map = build_updated_ids_map(changed_systems, old_ids_map, new_ids_map)
# Store updated caches
WandererApp.Cache.insert(cache_key_ids, updated_ids_map,
@@ -171,7 +148,7 @@ defmodule WandererApp.Map.ZkbDataFetcher do
# Broadcast changes
changed_data = Map.take(updated_details_map, changed_systems)
WandererApp.Map.Server.Impl.broadcast!(map_id, :detailed_kills_updated, changed_data)
MapServerImpl.broadcast!(map_id, :detailed_kills_updated, changed_data)
:ok
end
@@ -203,7 +180,7 @@ defmodule WandererApp.Map.ZkbDataFetcher do
payload = Map.take(new_kills_map, changed_system_ids)
WandererApp.Map.Server.Impl.broadcast!(map_id, :kills_updated, payload)
MapServerImpl.broadcast!(map_id, :kills_updated, payload)
:ok
end
@@ -217,4 +194,40 @@ defmodule WandererApp.Map.ZkbDataFetcher do
:ok
end
end
defp maybe_initialize_empty_details_map(%{}, systems, cache_key_details) do
# First time initialization - create empty structure
initial_map = Enum.into(systems, %{}, fn {system_id, _} -> {system_id, []} end)
WandererApp.Cache.insert(cache_key_details, initial_map, ttl: :timer.hours(@killmail_ttl_hours))
end
defp maybe_initialize_empty_details_map(_old_details_map, _systems, _cache_key_details), do: :ok
defp build_updated_details_map(changed_systems, old_details_map, new_ids_map) do
Enum.reduce(changed_systems, old_details_map, fn system_id, acc ->
kill_details = get_kill_details_for_system(system_id, new_ids_map)
Map.put(acc, system_id, kill_details)
end)
end
defp get_kill_details_for_system(system_id, new_ids_map) do
new_ids_map
|> Map.fetch!(system_id)
|> MapSet.to_list()
|> Enum.map(&WandererApp.Cache.get("zkb:killmail:#{&1}"))
|> Enum.reject(&is_nil/1)
end
defp build_updated_ids_map(changed_systems, old_ids_map, new_ids_map) do
Enum.reduce(changed_systems, old_ids_map, fn system_id, acc ->
new_ids_list = new_ids_map[system_id] |> MapSet.to_list()
Map.put(acc, system_id, new_ids_list)
end)
end
defp log_no_changes(map_id) do
Logger.debug(fn ->
"[ZkbDataFetcher] No changes in detailed kills for map_id=#{map_id}"
end)
end
end

View File

@@ -7,7 +7,7 @@ defmodule WandererAppWeb.MapKillsEventHandler do
use WandererAppWeb, :live_component
require Logger
alias WandererAppWeb.{MapEventHandler, MapCoreEventHandler}
alias WandererAppWeb.{MapCoreEventHandler, MapEventHandler}
def handle_server_event(
%{event: :init_kills},
@@ -18,37 +18,13 @@ defmodule WandererAppWeb.MapKillsEventHandler do
case WandererApp.Map.get_map(map_id) do
{:ok, %{systems: systems}} ->
kill_counts =
systems
|> Enum.into(%{}, fn {solar_system_id, _system} ->
# Use explicit cache lookup with validation from WandererApp.Cache
kills_count =
case WandererApp.Cache.get("zkb:kills:#{solar_system_id}") do
count when is_integer(count) and count >= 0 ->
count
nil ->
0
invalid_data ->
Logger.warning(
"[#{__MODULE__}] Invalid kill count data for system #{solar_system_id}: #{inspect(invalid_data)}"
)
0
end
{solar_system_id, kills_count}
end)
|> Enum.filter(fn {_system_id, count} -> count > 0 end)
|> Enum.into(%{})
kill_counts = build_kill_counts(systems)
kills_payload = kill_counts
|> Enum.map(fn {system_id, kills} ->
%{solar_system_id: system_id, kills: kills}
end)
MapEventHandler.push_map_event(
socket,
"kills_updated",
@@ -169,6 +145,7 @@ defmodule WandererAppWeb.MapKillsEventHandler do
defp handle_get_system_kills(sid, sh, payload, socket) do
with {:ok, system_id} <- parse_id(sid),
# Parse since_hours for validation, but filtering is done on frontend
{:ok, _since_hours} <- parse_id(sh) do
cache_key = "map:#{socket.assigns.map_id}:zkb:detailed_kills"
@@ -210,43 +187,20 @@ defmodule WandererAppWeb.MapKillsEventHandler do
end
defp handle_get_systems_kills(sids, sh, payload, socket) do
# Parse since_hours for validation, but filtering is done on frontend
with {:ok, _since_hours} <- parse_id(sh),
{:ok, parsed_ids} <- parse_system_ids(sids) do
cache_key = "map:#{socket.assigns.map_id}:zkb:detailed_kills"
# Get from WandererApp.Cache (not Cachex)
filtered_data =
case WandererApp.Cache.get(cache_key) do
cached_map when is_map(cached_map) ->
# Validate and filter cached data
parsed_ids
|> Enum.reduce(%{}, fn system_id, acc ->
case Map.get(cached_map, system_id) do
kills when is_list(kills) -> Map.put(acc, system_id, kills)
_ -> acc
end
end)
nil ->
%{}
invalid_data ->
Logger.warning(
"[#{__MODULE__}] Invalid cache data structure for key: #{cache_key}, got: #{inspect(invalid_data)}"
)
# Clear invalid cache entry
WandererApp.Cache.delete(cache_key)
%{}
end
filtered_data = get_kills_for_systems(cache_key, parsed_ids)
# filtered_data is already the final result, not wrapped in a tuple
systems_data = filtered_data
reply_payload = %{"systems_kills" => systems_data}
{:reply, reply_payload, socket}
else
:error ->
@@ -281,4 +235,62 @@ defmodule WandererAppWeb.MapKillsEventHandler do
end
defp parse_system_ids(_), do: :error
defp build_kill_counts(systems) do
systems
|> Enum.map(&extract_system_kill_count/1)
|> Enum.filter(fn {_system_id, count} -> count > 0 end)
|> Enum.into(%{})
end
defp extract_system_kill_count({solar_system_id, _system}) do
kills_count = get_validated_kill_count(solar_system_id)
{solar_system_id, kills_count}
end
defp get_validated_kill_count(solar_system_id) do
case WandererApp.Cache.get("zkb:kills:#{solar_system_id}") do
count when is_integer(count) and count >= 0 ->
count
nil ->
0
invalid_data ->
Logger.warning(
"[#{__MODULE__}] Invalid kill count data for system #{solar_system_id}: #{inspect(invalid_data)}"
)
0
end
end
defp get_kills_for_systems(cache_key, system_ids) do
case WandererApp.Cache.get(cache_key) do
cached_map when is_map(cached_map) ->
extract_cached_kills(cached_map, system_ids)
nil ->
%{}
invalid_data ->
Logger.warning(
"[#{__MODULE__}] Invalid cache data structure for key: #{cache_key}, got: #{inspect(invalid_data)}"
)
# Clear invalid cache entry
WandererApp.Cache.delete(cache_key)
%{}
end
end
defp extract_cached_kills(cached_map, system_ids) do
Enum.reduce(system_ids, %{}, fn system_id, acc ->
case Map.get(cached_map, system_id) do
kills when is_list(kills) ->
Map.put(acc, system_id, kills)
_ ->
acc
end
end)
end
end