mirror of
https://github.com/wanderer-industries/wanderer
synced 2025-12-12 10:45:54 +00:00
Merge pull request #430 from guarzo/guarzo/killcount
fix: only show previous hours kills in hover
This commit is contained in:
@@ -49,7 +49,7 @@ export const KillsCounter = ({
|
|||||||
content={
|
content={
|
||||||
<div className="overflow-hidden flex w-[450px] flex-col" style={{ height: `${tooltipHeight}px` }}>
|
<div className="overflow-hidden flex w-[450px] flex-col" style={{ height: `${tooltipHeight}px` }}>
|
||||||
<div className="flex-1 h-full">
|
<div className="flex-1 h-full">
|
||||||
<SystemKillsList kills={limitedKills} onlyOneSystem />
|
<SystemKillsList kills={limitedKills} onlyOneSystem timeRange={1} />
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ export const SolarSystemNodeDefault = memo((props: NodeProps<MapSolarSystemType>
|
|||||||
>
|
>
|
||||||
<div className={clsx(classes.BookmarkWithIcon)}>
|
<div className={clsx(classes.BookmarkWithIcon)}>
|
||||||
<span className={clsx(PrimeIcons.BOLT, classes.icon)} />
|
<span className={clsx(PrimeIcons.BOLT, classes.icon)} />
|
||||||
<span className={clsx(classes.text)}>{nodeVars.killsCount}</span>
|
<span className={clsx(classes.text)}>{localKillsCount}</span>
|
||||||
</div>
|
</div>
|
||||||
</KillsCounter>
|
</KillsCounter>
|
||||||
)}
|
)}
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ export const SolarSystemNodeTheme = memo((props: NodeProps<MapSolarSystemType>)
|
|||||||
>
|
>
|
||||||
<div className={clsx(classes.BookmarkWithIcon)}>
|
<div className={clsx(classes.BookmarkWithIcon)}>
|
||||||
<span className={clsx(PrimeIcons.BOLT, classes.icon)} />
|
<span className={clsx(PrimeIcons.BOLT, classes.icon)} />
|
||||||
<span className={clsx(classes.text)}>{nodeVars.killsCount}</span>
|
<span className={clsx(classes.text)}>{localKillsCount}</span>
|
||||||
</div>
|
</div>
|
||||||
</KillsCounter>
|
</KillsCounter>
|
||||||
)}
|
)}
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ export function useKillsCounter({ realSystemId }: UseKillsCounterProps) {
|
|||||||
systemId: realSystemId,
|
systemId: realSystemId,
|
||||||
outCommand,
|
outCommand,
|
||||||
showAllVisible: false,
|
showAllVisible: false,
|
||||||
|
sinceHours: 1,
|
||||||
});
|
});
|
||||||
|
|
||||||
const filteredKills = useMemo(() => {
|
const filteredKills = useMemo(() => {
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
import { useEffect, useState, useCallback } from 'react';
|
import { useEffect, useState, useCallback, useMemo } from 'react';
|
||||||
import { useMapEventListener } from '@/hooks/Mapper/events';
|
import { useMapEventListener } from '@/hooks/Mapper/events';
|
||||||
import { Commands } from '@/hooks/Mapper/types';
|
import { Commands } from '@/hooks/Mapper/types';
|
||||||
|
import { useMapRootState } from '@/hooks/Mapper/mapRootProvider';
|
||||||
|
|
||||||
interface Kill {
|
interface Kill {
|
||||||
solar_system_id: number | string;
|
solar_system_id: number | string;
|
||||||
@@ -9,29 +10,51 @@ interface Kill {
|
|||||||
|
|
||||||
interface MapEvent {
|
interface MapEvent {
|
||||||
name: Commands;
|
name: Commands;
|
||||||
data?: any;
|
data?: unknown;
|
||||||
payload?: Kill[];
|
payload?: Kill[];
|
||||||
}
|
}
|
||||||
|
|
||||||
export function useNodeKillsCount(systemId: number | string, initialKillsCount: number | null): number | null {
|
export function useNodeKillsCount(systemId: number | string, initialKillsCount: number | null): number | null {
|
||||||
const [killsCount, setKillsCount] = useState<number | null>(initialKillsCount);
|
const [killsCount, setKillsCount] = useState<number | null>(initialKillsCount);
|
||||||
|
const { data: mapData } = useMapRootState();
|
||||||
|
const { detailedKills = {} } = mapData;
|
||||||
|
|
||||||
|
// Calculate 1-hour kill count from detailed kills
|
||||||
|
const oneHourKillCount = useMemo(() => {
|
||||||
|
const systemKills = detailedKills[systemId] || [];
|
||||||
|
if (systemKills.length === 0) return null;
|
||||||
|
|
||||||
|
const oneHourAgo = Date.now() - 60 * 60 * 1000; // 1 hour in milliseconds
|
||||||
|
const recentKills = systemKills.filter(kill => {
|
||||||
|
if (!kill.kill_time) return false;
|
||||||
|
const killTime = new Date(kill.kill_time).getTime();
|
||||||
|
if (isNaN(killTime)) return false;
|
||||||
|
return killTime >= oneHourAgo;
|
||||||
|
});
|
||||||
|
|
||||||
|
return recentKills.length > 0 ? recentKills.length : null;
|
||||||
|
}, [detailedKills, systemId]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
setKillsCount(initialKillsCount);
|
// Use 1-hour count if available, otherwise fall back to initial count
|
||||||
}, [initialKillsCount]);
|
setKillsCount(oneHourKillCount !== null ? oneHourKillCount : initialKillsCount);
|
||||||
|
}, [oneHourKillCount, initialKillsCount]);
|
||||||
|
|
||||||
const handleEvent = useCallback(
|
const handleEvent = useCallback(
|
||||||
(event: MapEvent): boolean => {
|
(event: MapEvent): boolean => {
|
||||||
if (event.name === Commands.killsUpdated && Array.isArray(event.payload)) {
|
if (event.name === Commands.killsUpdated && Array.isArray(event.payload)) {
|
||||||
const killForSystem = event.payload.find(kill => kill.solar_system_id.toString() === systemId.toString());
|
const killForSystem = event.payload.find(kill => kill.solar_system_id.toString() === systemId.toString());
|
||||||
if (killForSystem && typeof killForSystem.kills === 'number') {
|
if (killForSystem && typeof killForSystem.kills === 'number') {
|
||||||
|
// Only update if we don't have detailed kills data
|
||||||
|
if (!detailedKills[systemId] || detailedKills[systemId].length === 0) {
|
||||||
setKillsCount(killForSystem.kills);
|
setKillsCount(killForSystem.kills);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
},
|
},
|
||||||
[systemId],
|
[systemId, detailedKills],
|
||||||
);
|
);
|
||||||
|
|
||||||
useMapEventListener(handleEvent);
|
useMapEventListener(handleEvent);
|
||||||
|
|||||||
@@ -13,17 +13,14 @@ interface UseSystemKillsProps {
|
|||||||
sinceHours?: number;
|
sinceHours?: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
function combineKills(existing: DetailedKill[], incoming: DetailedKill[], sinceHours: number): DetailedKill[] {
|
function combineKills(existing: DetailedKill[], incoming: DetailedKill[]): DetailedKill[] {
|
||||||
const cutoff = Date.now() - sinceHours * 60 * 60 * 1000;
|
// Don't filter by time when storing - let components filter when displaying
|
||||||
const byId: Record<string, DetailedKill> = {};
|
const byId: Record<string, DetailedKill> = {};
|
||||||
|
|
||||||
for (const kill of [...existing, ...incoming]) {
|
for (const kill of [...existing, ...incoming]) {
|
||||||
if (!kill.kill_time) continue;
|
if (!kill.kill_time) continue;
|
||||||
const killTimeMs = new Date(kill.kill_time).valueOf();
|
|
||||||
if (killTimeMs >= cutoff) {
|
|
||||||
byId[kill.killmail_id] = kill;
|
byId[kill.killmail_id] = kill;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return Object.values(byId);
|
return Object.values(byId);
|
||||||
}
|
}
|
||||||
@@ -55,14 +52,14 @@ export function useSystemKills({ systemId, outCommand, showAllVisible = false, s
|
|||||||
|
|
||||||
for (const [sid, newKills] of Object.entries(killsMap)) {
|
for (const [sid, newKills] of Object.entries(killsMap)) {
|
||||||
const existing = updated[sid] ?? [];
|
const existing = updated[sid] ?? [];
|
||||||
const combined = combineKills(existing, newKills, effectiveSinceHours);
|
const combined = combineKills(existing, newKills);
|
||||||
updated[sid] = combined;
|
updated[sid] = combined;
|
||||||
}
|
}
|
||||||
|
|
||||||
return { ...prev, detailedKills: updated };
|
return { ...prev, detailedKills: updated };
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
[update, effectiveSinceHours],
|
[update],
|
||||||
);
|
);
|
||||||
|
|
||||||
const fetchKills = useCallback(
|
const fetchKills = useCallback(
|
||||||
|
|||||||
@@ -16,11 +16,13 @@ defmodule WandererApp.Kills.Client do
|
|||||||
@retry_delays [5_000, 10_000, 30_000, 60_000]
|
@retry_delays [5_000, 10_000, 30_000, 60_000]
|
||||||
@max_retries 10
|
@max_retries 10
|
||||||
@health_check_interval :timer.seconds(30) # Check every 30 seconds
|
@health_check_interval :timer.seconds(30) # Check every 30 seconds
|
||||||
|
@message_timeout :timer.minutes(15) # No messages timeout
|
||||||
|
|
||||||
defstruct [
|
defstruct [
|
||||||
:socket_pid,
|
:socket_pid,
|
||||||
:retry_timer_ref,
|
:retry_timer_ref,
|
||||||
:connection_timeout_ref,
|
:connection_timeout_ref,
|
||||||
|
:last_message_time,
|
||||||
connected: false,
|
connected: false,
|
||||||
connecting: false,
|
connecting: false,
|
||||||
subscribed_systems: MapSet.new(),
|
subscribed_systems: MapSet.new(),
|
||||||
@@ -162,7 +164,8 @@ defmodule WandererApp.Kills.Client do
|
|||||||
connecting: false,
|
connecting: false,
|
||||||
socket_pid: socket_pid,
|
socket_pid: socket_pid,
|
||||||
retry_count: 0, # Reset retry count only on successful connection
|
retry_count: 0, # Reset retry count only on successful connection
|
||||||
last_error: nil
|
last_error: nil,
|
||||||
|
last_message_time: System.system_time(:millisecond)
|
||||||
}
|
}
|
||||||
|> cancel_retry()
|
|> cancel_retry()
|
||||||
|> cancel_connection_timeout()
|
|> cancel_connection_timeout()
|
||||||
@@ -255,16 +258,9 @@ defmodule WandererApp.Kills.Client do
|
|||||||
{:noreply, state}
|
{:noreply, state}
|
||||||
end
|
end
|
||||||
|
|
||||||
# Handle process DOWN messages for socket monitoring
|
def handle_info({:message_received, _type}, state) do
|
||||||
def handle_info({:DOWN, _ref, :process, pid, reason}, %{socket_pid: pid} = state) do
|
# Update last message time when we receive a kill message
|
||||||
Logger.error("[Client] Socket process died: #{inspect(reason)}")
|
{:noreply, %{state | last_message_time: System.system_time(:millisecond)}}
|
||||||
send(self(), {:disconnected, {:socket_died, reason}})
|
|
||||||
{:noreply, state}
|
|
||||||
end
|
|
||||||
|
|
||||||
def handle_info({:DOWN, _ref, :process, _pid, _reason}, state) do
|
|
||||||
# Ignore DOWN messages for other processes
|
|
||||||
{:noreply, state}
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def handle_info(_msg, state), do: {:noreply, state}
|
def handle_info(_msg, state), do: {:noreply, state}
|
||||||
@@ -454,6 +450,22 @@ defmodule WandererApp.Kills.Client do
|
|||||||
:needs_reconnect
|
:needs_reconnect
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp check_health(%{socket_pid: pid, last_message_time: last_msg_time} = state) when not is_nil(last_msg_time) do
|
||||||
|
cond do
|
||||||
|
not socket_alive?(pid) ->
|
||||||
|
Logger.warning("[Client] Health check: Socket process #{inspect(pid)} is dead")
|
||||||
|
:needs_reconnect
|
||||||
|
|
||||||
|
# Check if we haven't received a message in the configured timeout
|
||||||
|
System.system_time(:millisecond) - last_msg_time > @message_timeout ->
|
||||||
|
Logger.warning("[Client] Health check: No messages received for 15+ minutes, reconnecting")
|
||||||
|
:needs_reconnect
|
||||||
|
|
||||||
|
true ->
|
||||||
|
:healthy
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
defp check_health(%{socket_pid: pid} = state) do
|
defp check_health(%{socket_pid: pid} = state) do
|
||||||
if socket_alive?(pid) do
|
if socket_alive?(pid) do
|
||||||
:healthy
|
:healthy
|
||||||
@@ -565,6 +577,9 @@ defmodule WandererApp.Kills.Client do
|
|||||||
def handle_message(topic, event, payload, _transport, state) do
|
def handle_message(topic, event, payload, _transport, state) do
|
||||||
case {topic, event} do
|
case {topic, event} do
|
||||||
{"killmails:lobby", "killmail_update"} ->
|
{"killmails:lobby", "killmail_update"} ->
|
||||||
|
# Notify parent that we received a message
|
||||||
|
send(state.parent, {:message_received, :killmail_update})
|
||||||
|
|
||||||
# Use supervised task to handle failures gracefully
|
# Use supervised task to handle failures gracefully
|
||||||
Task.Supervisor.start_child(
|
Task.Supervisor.start_child(
|
||||||
WandererApp.Kills.TaskSupervisor,
|
WandererApp.Kills.TaskSupervisor,
|
||||||
@@ -572,6 +587,9 @@ defmodule WandererApp.Kills.Client do
|
|||||||
)
|
)
|
||||||
|
|
||||||
{"killmails:lobby", "kill_count_update"} ->
|
{"killmails:lobby", "kill_count_update"} ->
|
||||||
|
# Notify parent that we received a message
|
||||||
|
send(state.parent, {:message_received, :kill_count_update})
|
||||||
|
|
||||||
# Use supervised task to handle failures gracefully
|
# Use supervised task to handle failures gracefully
|
||||||
Task.Supervisor.start_child(
|
Task.Supervisor.start_child(
|
||||||
WandererApp.Kills.TaskSupervisor,
|
WandererApp.Kills.TaskSupervisor,
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ defmodule WandererApp.Map.ZkbDataFetcher do
|
|||||||
|
|
||||||
require Logger
|
require Logger
|
||||||
|
|
||||||
|
alias WandererApp.Map.Server.Impl, as: MapServerImpl
|
||||||
|
|
||||||
@interval :timer.seconds(15)
|
@interval :timer.seconds(15)
|
||||||
@store_map_kills_timeout :timer.hours(1)
|
@store_map_kills_timeout :timer.hours(1)
|
||||||
@@ -109,56 +110,32 @@ defmodule WandererApp.Map.ZkbDataFetcher do
|
|||||||
{solar_system_id, MapSet.new(ids)}
|
{solar_system_id, MapSet.new(ids)}
|
||||||
end)
|
end)
|
||||||
|
|
||||||
# Find systems with changed killmail lists
|
# Find systems with changed killmail lists or empty detailed kills
|
||||||
changed_systems =
|
changed_systems =
|
||||||
new_ids_map
|
new_ids_map
|
||||||
|> Enum.filter(fn {system_id, new_ids_set} ->
|
|> Enum.filter(fn {system_id, new_ids_set} ->
|
||||||
old_set = MapSet.new(Map.get(old_ids_map, system_id, []))
|
old_set = MapSet.new(Map.get(old_ids_map, system_id, []))
|
||||||
not MapSet.equal?(new_ids_set, old_set)
|
old_details = Map.get(old_details_map, system_id, [])
|
||||||
|
# Update if IDs changed OR if we have IDs but no detailed kills
|
||||||
|
not MapSet.equal?(new_ids_set, old_set) or
|
||||||
|
(MapSet.size(new_ids_set) > 0 and old_details == [])
|
||||||
end)
|
end)
|
||||||
|> Enum.map(&elem(&1, 0))
|
|> Enum.map(&elem(&1, 0))
|
||||||
|
|
||||||
if changed_systems == [] do
|
if changed_systems == [] do
|
||||||
Logger.debug(fn ->
|
log_no_changes(map_id)
|
||||||
"[ZkbDataFetcher] No changes in detailed kills for map_id=#{map_id}"
|
|
||||||
end)
|
|
||||||
|
|
||||||
# Don't overwrite existing cache data when there are no changes
|
# Don't overwrite existing cache data when there are no changes
|
||||||
# Only initialize if cache doesn't exist
|
# Only initialize if cache doesn't exist
|
||||||
if old_details_map == %{} do
|
maybe_initialize_empty_details_map(old_details_map, systems, cache_key_details)
|
||||||
# First time initialization - create empty structure
|
|
||||||
empty_map = systems
|
|
||||||
|> Enum.into(%{}, fn {system_id, _} -> {system_id, []} end)
|
|
||||||
|
|
||||||
WandererApp.Cache.insert(cache_key_details, empty_map, ttl: :timer.hours(@killmail_ttl_hours))
|
|
||||||
end
|
|
||||||
|
|
||||||
:ok
|
:ok
|
||||||
else
|
else
|
||||||
# Build new details for each changed system
|
# Build new details for each changed system
|
||||||
updated_details_map =
|
updated_details_map = build_updated_details_map(changed_systems, old_details_map, new_ids_map)
|
||||||
Enum.reduce(changed_systems, old_details_map, fn system_id, acc ->
|
|
||||||
kill_ids =
|
|
||||||
new_ids_map
|
|
||||||
|> Map.fetch!(system_id)
|
|
||||||
|> MapSet.to_list()
|
|
||||||
|
|
||||||
# Get killmail details from cache (populated by WebSocket)
|
|
||||||
kill_details =
|
|
||||||
kill_ids
|
|
||||||
|> Enum.map(&WandererApp.Cache.get("zkb:killmail:#{&1}"))
|
|
||||||
|> Enum.reject(&is_nil/1)
|
|
||||||
|
|
||||||
# Ensure system_id is an integer key
|
|
||||||
Map.put(acc, system_id, kill_details)
|
|
||||||
end)
|
|
||||||
|
|
||||||
# Update the ID map cache
|
# Update the ID map cache
|
||||||
updated_ids_map =
|
updated_ids_map = build_updated_ids_map(changed_systems, old_ids_map, new_ids_map)
|
||||||
Enum.reduce(changed_systems, old_ids_map, fn system_id, acc ->
|
|
||||||
new_ids_list = new_ids_map[system_id] |> MapSet.to_list()
|
|
||||||
Map.put(acc, system_id, new_ids_list)
|
|
||||||
end)
|
|
||||||
|
|
||||||
# Store updated caches
|
# Store updated caches
|
||||||
WandererApp.Cache.insert(cache_key_ids, updated_ids_map,
|
WandererApp.Cache.insert(cache_key_ids, updated_ids_map,
|
||||||
@@ -171,7 +148,7 @@ defmodule WandererApp.Map.ZkbDataFetcher do
|
|||||||
|
|
||||||
# Broadcast changes
|
# Broadcast changes
|
||||||
changed_data = Map.take(updated_details_map, changed_systems)
|
changed_data = Map.take(updated_details_map, changed_systems)
|
||||||
WandererApp.Map.Server.Impl.broadcast!(map_id, :detailed_kills_updated, changed_data)
|
MapServerImpl.broadcast!(map_id, :detailed_kills_updated, changed_data)
|
||||||
|
|
||||||
:ok
|
:ok
|
||||||
end
|
end
|
||||||
@@ -203,7 +180,7 @@ defmodule WandererApp.Map.ZkbDataFetcher do
|
|||||||
|
|
||||||
payload = Map.take(new_kills_map, changed_system_ids)
|
payload = Map.take(new_kills_map, changed_system_ids)
|
||||||
|
|
||||||
WandererApp.Map.Server.Impl.broadcast!(map_id, :kills_updated, payload)
|
MapServerImpl.broadcast!(map_id, :kills_updated, payload)
|
||||||
|
|
||||||
:ok
|
:ok
|
||||||
end
|
end
|
||||||
@@ -217,4 +194,40 @@ defmodule WandererApp.Map.ZkbDataFetcher do
|
|||||||
:ok
|
:ok
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp maybe_initialize_empty_details_map(%{}, systems, cache_key_details) do
|
||||||
|
# First time initialization - create empty structure
|
||||||
|
initial_map = Enum.into(systems, %{}, fn {system_id, _} -> {system_id, []} end)
|
||||||
|
WandererApp.Cache.insert(cache_key_details, initial_map, ttl: :timer.hours(@killmail_ttl_hours))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_initialize_empty_details_map(_old_details_map, _systems, _cache_key_details), do: :ok
|
||||||
|
|
||||||
|
defp build_updated_details_map(changed_systems, old_details_map, new_ids_map) do
|
||||||
|
Enum.reduce(changed_systems, old_details_map, fn system_id, acc ->
|
||||||
|
kill_details = get_kill_details_for_system(system_id, new_ids_map)
|
||||||
|
Map.put(acc, system_id, kill_details)
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp get_kill_details_for_system(system_id, new_ids_map) do
|
||||||
|
new_ids_map
|
||||||
|
|> Map.fetch!(system_id)
|
||||||
|
|> MapSet.to_list()
|
||||||
|
|> Enum.map(&WandererApp.Cache.get("zkb:killmail:#{&1}"))
|
||||||
|
|> Enum.reject(&is_nil/1)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp build_updated_ids_map(changed_systems, old_ids_map, new_ids_map) do
|
||||||
|
Enum.reduce(changed_systems, old_ids_map, fn system_id, acc ->
|
||||||
|
new_ids_list = new_ids_map[system_id] |> MapSet.to_list()
|
||||||
|
Map.put(acc, system_id, new_ids_list)
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp log_no_changes(map_id) do
|
||||||
|
Logger.debug(fn ->
|
||||||
|
"[ZkbDataFetcher] No changes in detailed kills for map_id=#{map_id}"
|
||||||
|
end)
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ defmodule WandererAppWeb.MapKillsEventHandler do
|
|||||||
use WandererAppWeb, :live_component
|
use WandererAppWeb, :live_component
|
||||||
require Logger
|
require Logger
|
||||||
|
|
||||||
alias WandererAppWeb.{MapEventHandler, MapCoreEventHandler}
|
alias WandererAppWeb.{MapCoreEventHandler, MapEventHandler}
|
||||||
|
|
||||||
def handle_server_event(
|
def handle_server_event(
|
||||||
%{event: :init_kills},
|
%{event: :init_kills},
|
||||||
@@ -18,37 +18,13 @@ defmodule WandererAppWeb.MapKillsEventHandler do
|
|||||||
case WandererApp.Map.get_map(map_id) do
|
case WandererApp.Map.get_map(map_id) do
|
||||||
{:ok, %{systems: systems}} ->
|
{:ok, %{systems: systems}} ->
|
||||||
|
|
||||||
kill_counts =
|
kill_counts = build_kill_counts(systems)
|
||||||
systems
|
|
||||||
|> Enum.into(%{}, fn {solar_system_id, _system} ->
|
|
||||||
# Use explicit cache lookup with validation from WandererApp.Cache
|
|
||||||
kills_count =
|
|
||||||
case WandererApp.Cache.get("zkb:kills:#{solar_system_id}") do
|
|
||||||
count when is_integer(count) and count >= 0 ->
|
|
||||||
count
|
|
||||||
|
|
||||||
nil ->
|
|
||||||
0
|
|
||||||
|
|
||||||
invalid_data ->
|
|
||||||
Logger.warning(
|
|
||||||
"[#{__MODULE__}] Invalid kill count data for system #{solar_system_id}: #{inspect(invalid_data)}"
|
|
||||||
)
|
|
||||||
|
|
||||||
0
|
|
||||||
end
|
|
||||||
|
|
||||||
{solar_system_id, kills_count}
|
|
||||||
end)
|
|
||||||
|> Enum.filter(fn {_system_id, count} -> count > 0 end)
|
|
||||||
|> Enum.into(%{})
|
|
||||||
|
|
||||||
kills_payload = kill_counts
|
kills_payload = kill_counts
|
||||||
|> Enum.map(fn {system_id, kills} ->
|
|> Enum.map(fn {system_id, kills} ->
|
||||||
%{solar_system_id: system_id, kills: kills}
|
%{solar_system_id: system_id, kills: kills}
|
||||||
end)
|
end)
|
||||||
|
|
||||||
|
|
||||||
MapEventHandler.push_map_event(
|
MapEventHandler.push_map_event(
|
||||||
socket,
|
socket,
|
||||||
"kills_updated",
|
"kills_updated",
|
||||||
@@ -169,6 +145,7 @@ defmodule WandererAppWeb.MapKillsEventHandler do
|
|||||||
|
|
||||||
defp handle_get_system_kills(sid, sh, payload, socket) do
|
defp handle_get_system_kills(sid, sh, payload, socket) do
|
||||||
with {:ok, system_id} <- parse_id(sid),
|
with {:ok, system_id} <- parse_id(sid),
|
||||||
|
# Parse since_hours for validation, but filtering is done on frontend
|
||||||
{:ok, _since_hours} <- parse_id(sh) do
|
{:ok, _since_hours} <- parse_id(sh) do
|
||||||
cache_key = "map:#{socket.assigns.map_id}:zkb:detailed_kills"
|
cache_key = "map:#{socket.assigns.map_id}:zkb:detailed_kills"
|
||||||
|
|
||||||
@@ -210,43 +187,20 @@ defmodule WandererAppWeb.MapKillsEventHandler do
|
|||||||
end
|
end
|
||||||
|
|
||||||
defp handle_get_systems_kills(sids, sh, payload, socket) do
|
defp handle_get_systems_kills(sids, sh, payload, socket) do
|
||||||
|
# Parse since_hours for validation, but filtering is done on frontend
|
||||||
with {:ok, _since_hours} <- parse_id(sh),
|
with {:ok, _since_hours} <- parse_id(sh),
|
||||||
{:ok, parsed_ids} <- parse_system_ids(sids) do
|
{:ok, parsed_ids} <- parse_system_ids(sids) do
|
||||||
|
|
||||||
cache_key = "map:#{socket.assigns.map_id}:zkb:detailed_kills"
|
cache_key = "map:#{socket.assigns.map_id}:zkb:detailed_kills"
|
||||||
|
|
||||||
# Get from WandererApp.Cache (not Cachex)
|
# Get from WandererApp.Cache (not Cachex)
|
||||||
filtered_data =
|
filtered_data = get_kills_for_systems(cache_key, parsed_ids)
|
||||||
case WandererApp.Cache.get(cache_key) do
|
|
||||||
cached_map when is_map(cached_map) ->
|
|
||||||
# Validate and filter cached data
|
|
||||||
parsed_ids
|
|
||||||
|> Enum.reduce(%{}, fn system_id, acc ->
|
|
||||||
case Map.get(cached_map, system_id) do
|
|
||||||
kills when is_list(kills) -> Map.put(acc, system_id, kills)
|
|
||||||
_ -> acc
|
|
||||||
end
|
|
||||||
end)
|
|
||||||
|
|
||||||
nil ->
|
|
||||||
%{}
|
|
||||||
|
|
||||||
invalid_data ->
|
|
||||||
Logger.warning(
|
|
||||||
"[#{__MODULE__}] Invalid cache data structure for key: #{cache_key}, got: #{inspect(invalid_data)}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Clear invalid cache entry
|
|
||||||
WandererApp.Cache.delete(cache_key)
|
|
||||||
%{}
|
|
||||||
end
|
|
||||||
|
|
||||||
# filtered_data is already the final result, not wrapped in a tuple
|
# filtered_data is already the final result, not wrapped in a tuple
|
||||||
systems_data = filtered_data
|
systems_data = filtered_data
|
||||||
|
|
||||||
reply_payload = %{"systems_kills" => systems_data}
|
reply_payload = %{"systems_kills" => systems_data}
|
||||||
|
|
||||||
|
|
||||||
{:reply, reply_payload, socket}
|
{:reply, reply_payload, socket}
|
||||||
else
|
else
|
||||||
:error ->
|
:error ->
|
||||||
@@ -281,4 +235,62 @@ defmodule WandererAppWeb.MapKillsEventHandler do
|
|||||||
end
|
end
|
||||||
|
|
||||||
defp parse_system_ids(_), do: :error
|
defp parse_system_ids(_), do: :error
|
||||||
|
|
||||||
|
defp build_kill_counts(systems) do
|
||||||
|
systems
|
||||||
|
|> Enum.map(&extract_system_kill_count/1)
|
||||||
|
|> Enum.filter(fn {_system_id, count} -> count > 0 end)
|
||||||
|
|> Enum.into(%{})
|
||||||
|
end
|
||||||
|
|
||||||
|
defp extract_system_kill_count({solar_system_id, _system}) do
|
||||||
|
kills_count = get_validated_kill_count(solar_system_id)
|
||||||
|
{solar_system_id, kills_count}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp get_validated_kill_count(solar_system_id) do
|
||||||
|
case WandererApp.Cache.get("zkb:kills:#{solar_system_id}") do
|
||||||
|
count when is_integer(count) and count >= 0 ->
|
||||||
|
count
|
||||||
|
|
||||||
|
nil ->
|
||||||
|
0
|
||||||
|
|
||||||
|
invalid_data ->
|
||||||
|
Logger.warning(
|
||||||
|
"[#{__MODULE__}] Invalid kill count data for system #{solar_system_id}: #{inspect(invalid_data)}"
|
||||||
|
)
|
||||||
|
0
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp get_kills_for_systems(cache_key, system_ids) do
|
||||||
|
case WandererApp.Cache.get(cache_key) do
|
||||||
|
cached_map when is_map(cached_map) ->
|
||||||
|
extract_cached_kills(cached_map, system_ids)
|
||||||
|
|
||||||
|
nil ->
|
||||||
|
%{}
|
||||||
|
|
||||||
|
invalid_data ->
|
||||||
|
Logger.warning(
|
||||||
|
"[#{__MODULE__}] Invalid cache data structure for key: #{cache_key}, got: #{inspect(invalid_data)}"
|
||||||
|
)
|
||||||
|
# Clear invalid cache entry
|
||||||
|
WandererApp.Cache.delete(cache_key)
|
||||||
|
%{}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp extract_cached_kills(cached_map, system_ids) do
|
||||||
|
Enum.reduce(system_ids, %{}, fn system_id, acc ->
|
||||||
|
case Map.get(cached_map, system_id) do
|
||||||
|
kills when is_list(kills) ->
|
||||||
|
Map.put(acc, system_id, kills)
|
||||||
|
_ ->
|
||||||
|
acc
|
||||||
|
end
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|||||||
Reference in New Issue
Block a user