Compare commits

..

3 Commits

Author SHA1 Message Date
Dmitry Popov
9a8dc4dbe5 Merge branch 'main' into tests-fixes 2025-11-22 12:29:22 +01:00
Dmitry Popov
5e0965ead4 fix(tests): updated tests 2025-11-17 12:52:11 +01:00
Dmitry Popov
4c39c6fb39 fix(tests): updated tests 2025-11-17 00:09:10 +01:00
20 changed files with 868 additions and 598 deletions

View File

@@ -2,33 +2,6 @@
<!-- changelog -->
## [v1.86.0](https://github.com/wanderer-industries/wanderer/compare/v1.85.5...v1.86.0) (2025-11-25)
### Features:
* add date filter for character activity
## [v1.85.5](https://github.com/wanderer-industries/wanderer/compare/v1.85.4...v1.85.5) (2025-11-24)
### Bug Fixes:
* core: fixed connections cleanup and rally points delete issues
## [v1.85.4](https://github.com/wanderer-industries/wanderer/compare/v1.85.3...v1.85.4) (2025-11-22)
### Bug Fixes:
* core: invalidate map characters every 1 hour for any missing/revoked permissions
## [v1.85.3](https://github.com/wanderer-industries/wanderer/compare/v1.85.2...v1.85.3) (2025-11-22)

View File

@@ -1,7 +1,4 @@
import { Dialog } from 'primereact/dialog';
import { Menu } from 'primereact/menu';
import { MenuItem } from 'primereact/menuitem';
import { useState, useCallback, useRef, useMemo } from 'react';
import { CharacterActivityContent } from '@/hooks/Mapper/components/mapRootContent/components/CharacterActivity/CharacterActivityContent.tsx';
interface CharacterActivityProps {
@@ -9,69 +6,17 @@ interface CharacterActivityProps {
onHide: () => void;
}
const periodOptions = [
{ value: 30, label: '30 Days' },
{ value: 365, label: '1 Year' },
{ value: null, label: 'All Time' },
];
export const CharacterActivity = ({ visible, onHide }: CharacterActivityProps) => {
const [selectedPeriod, setSelectedPeriod] = useState<number | null>(30);
const menuRef = useRef<Menu>(null);
const handlePeriodChange = useCallback((days: number | null) => {
setSelectedPeriod(days);
}, []);
const menuItems: MenuItem[] = useMemo(
() => [
{
label: 'Period',
items: periodOptions.map(option => ({
label: option.label,
icon: selectedPeriod === option.value ? 'pi pi-check' : undefined,
command: () => handlePeriodChange(option.value),
})),
},
],
[selectedPeriod, handlePeriodChange],
);
const selectedPeriodLabel = useMemo(
() => periodOptions.find(opt => opt.value === selectedPeriod)?.label || 'All Time',
[selectedPeriod],
);
const headerIcons = (
<>
<button
type="button"
className="p-dialog-header-icon p-link"
onClick={e => menuRef.current?.toggle(e)}
aria-label="Filter options"
>
<span className="pi pi-bars" />
</button>
<Menu model={menuItems} popup ref={menuRef} />
</>
);
return (
<Dialog
header={
<div className="flex items-center gap-2">
<span>Character Activity</span>
<span className="text-xs text-stone-400">({selectedPeriodLabel})</span>
</div>
}
header="Character Activity"
visible={visible}
className="w-[550px] max-h-[90vh]"
onHide={onHide}
dismissableMask
contentClassName="p-0 h-full flex flex-col"
icons={headerIcons}
>
<CharacterActivityContent selectedPeriod={selectedPeriod} />
<CharacterActivityContent />
</Dialog>
);
};

View File

@@ -7,28 +7,16 @@ import {
} from '@/hooks/Mapper/components/mapRootContent/components/CharacterActivity/helpers.tsx';
import { Column } from 'primereact/column';
import { useMapRootState } from '@/hooks/Mapper/mapRootProvider';
import { useMemo, useEffect } from 'react';
import { useCharacterActivityHandlers } from '@/hooks/Mapper/components/mapRootContent/hooks/useCharacterActivityHandlers';
import { useMemo } from 'react';
interface CharacterActivityContentProps {
selectedPeriod: number | null;
}
export const CharacterActivityContent = ({ selectedPeriod }: CharacterActivityContentProps) => {
export const CharacterActivityContent = () => {
const {
data: { characterActivityData },
} = useMapRootState();
const { handleShowActivity } = useCharacterActivityHandlers();
const activity = useMemo(() => characterActivityData?.activity || [], [characterActivityData]);
const loading = useMemo(() => characterActivityData?.loading !== false, [characterActivityData]);
// Reload activity data when period changes
useEffect(() => {
handleShowActivity(selectedPeriod);
}, [selectedPeriod, handleShowActivity]);
if (loading) {
return (
<div className="flex flex-col items-center justify-center h-full w-full">

View File

@@ -23,17 +23,17 @@ export const useCharacterActivityHandlers = () => {
/**
* Handle showing the character activity dialog
*/
const handleShowActivity = useCallback((days?: number | null) => {
const handleShowActivity = useCallback(() => {
// Update local state to show the dialog
update(state => ({
...state,
showCharacterActivity: true,
}));
// Send the command to the server with optional days parameter
// Send the command to the server
outCommand({
type: OutCommand.showActivity,
data: days !== undefined ? { days } : {},
data: {},
});
}, [outCommand, update]);

View File

@@ -68,5 +68,4 @@ export interface ActivitySummary {
passages: number;
connections: number;
signatures: number;
timestamp?: string;
}

View File

@@ -43,14 +43,13 @@ defmodule WandererApp.Character.Activity do
## Parameters
- `map_id`: ID of the map
- `current_user`: Current user struct (used only to get user settings)
- `days`: Optional number of days to filter activity (nil for all time)
## Returns
- List of processed activity data
"""
def process_character_activity(map_id, current_user, days \\ nil) do
def process_character_activity(map_id, current_user) do
with {:ok, map_user_settings} <- get_map_user_settings(map_id, current_user.id),
{:ok, raw_activity} <- WandererApp.Map.get_character_activity(map_id, days),
{:ok, raw_activity} <- WandererApp.Map.get_character_activity(map_id),
{:ok, user_characters} <-
WandererApp.Api.Character.active_by_user(%{user_id: current_user.id}) do
process_activity_data(raw_activity, map_user_settings, user_characters)

View File

@@ -30,7 +30,6 @@ defmodule WandererApp.Character.TrackerPool do
defp location_concurrency do
Application.get_env(:wanderer_app, :location_concurrency, System.schedulers_online() * 12)
end
# Other operations can use lower concurrency
@standard_concurrency System.schedulers_online() * 2
@@ -298,7 +297,7 @@ defmodule WandererApp.Character.TrackerPool do
)
# Warn if location updates are falling behind (taking > 800ms for 100 chars)
if duration > 2000 do
if duration > 800 do
Logger.warning(
"[Tracker Pool] Location updates falling behind: #{duration}ms for #{length(characters)} chars (pool: #{state.uuid})"
)

View File

@@ -463,8 +463,7 @@ defmodule WandererApp.Esi.ApiClient do
{:error, reason} ->
# Check if this is a Finch pool error
if is_exception(reason) and
Exception.message(reason) =~ "unable to provide a connection" do
if is_exception(reason) and Exception.message(reason) =~ "unable to provide a connection" do
:telemetry.execute(
[:wanderer_app, :finch, :pool_exhausted],
%{count: 1},
@@ -678,8 +677,7 @@ defmodule WandererApp.Esi.ApiClient do
{:error, reason} ->
# Check if this is a Finch pool error
if is_exception(reason) and
Exception.message(reason) =~ "unable to provide a connection" do
if is_exception(reason) and Exception.message(reason) =~ "unable to provide a connection" do
:telemetry.execute(
[:wanderer_app, :finch, :pool_exhausted],
%{count: 1},

View File

@@ -240,10 +240,8 @@ defmodule WandererApp.Map.Routes do
{:ok, result}
{:error, _error} ->
error_file_path = save_error_params(origin, hubs, params)
@logger.error(
"Error getting custom routes for #{inspect(origin)}: #{inspect(params)}. Params saved to: #{error_file_path}"
"Error getting custom routes for #{inspect(origin)}: #{inspect(params)}"
)
WandererApp.Esi.get_routes_eve(hubs, origin, params, opts)
@@ -251,35 +249,6 @@ defmodule WandererApp.Map.Routes do
end
end
defp save_error_params(origin, hubs, params) do
timestamp = DateTime.utc_now() |> DateTime.to_unix(:millisecond)
filename = "#{timestamp}_route_error_params.json"
filepath = Path.join([System.tmp_dir!(), filename])
error_data = %{
timestamp: DateTime.utc_now() |> DateTime.to_iso8601(),
origin: origin,
hubs: hubs,
params: params
}
case Jason.encode(error_data, pretty: true) do
{:ok, json_string} ->
File.write!(filepath, json_string)
filepath
{:error, _reason} ->
# Fallback: save as Elixir term if JSON encoding fails
filepath_term = Path.join([System.tmp_dir!(), "#{timestamp}_route_error_params.term"])
File.write!(filepath_term, inspect(error_data, pretty: true))
filepath_term
end
rescue
e ->
@logger.error("Failed to save error params: #{inspect(e)}")
"error_saving_params"
end
defp remove_intersection(pairs_arr) do
tuples = pairs_arr |> Enum.map(fn x -> {x.first, x.second} end)

View File

@@ -410,7 +410,7 @@ defmodule WandererApp.Map.Server.ConnectionsImpl do
map_id,
%{solar_system_id: solar_system_source}
),
target_system when not is_nil(target_system) <-
target_system when not is_nil(source_system) <-
WandererApp.Map.find_system_by_location(
map_id,
%{solar_system_id: solar_system_target}

View File

@@ -72,53 +72,39 @@ defmodule WandererApp.Map.Server.PingsImpl do
type: type
} = _ping_info
) do
case WandererApp.MapPingsRepo.get_by_id(ping_id) do
{:ok,
%{system: %{id: system_id, name: system_name, solar_system_id: solar_system_id}} = ping} ->
with {:ok, character} <- WandererApp.Character.get_character(character_id),
:ok <- WandererApp.MapPingsRepo.destroy(ping) do
Impl.broadcast!(map_id, :ping_cancelled, %{
id: ping_id,
solar_system_id: solar_system_id,
type: type
})
with {:ok, character} <- WandererApp.Character.get_character(character_id),
{:ok,
%{system: %{id: system_id, name: system_name, solar_system_id: solar_system_id}} = ping} <-
WandererApp.MapPingsRepo.get_by_id(ping_id),
:ok <- WandererApp.MapPingsRepo.destroy(ping) do
Impl.broadcast!(map_id, :ping_cancelled, %{
id: ping_id,
solar_system_id: solar_system_id,
type: type
})
# Broadcast rally point removal events to external clients (webhooks/SSE)
if type == 1 do
WandererApp.ExternalEvents.broadcast(map_id, :rally_point_removed, %{
id: ping_id,
solar_system_id: solar_system_id,
system_id: system_id,
character_id: character_id,
character_name: character.name,
character_eve_id: character.eve_id,
system_name: system_name
})
end
WandererApp.User.ActivityTracker.track_map_event(:map_rally_cancelled, %{
character_id: character_id,
user_id: user_id,
map_id: map_id,
solar_system_id: solar_system_id
})
else
error ->
Logger.error("Failed to destroy ping: #{inspect(error, pretty: true)}")
end
{:error, %Ash.Error.Query.NotFound{}} ->
# Ping already deleted (possibly by cascade deletion from map/system/character removal,
# auto-expiry, or concurrent cancellation). This is not an error - the desired state
# (ping is gone) is already achieved. Just broadcast the cancellation event.
Logger.debug(
"Ping #{ping_id} not found during cancellation - already deleted, skipping broadcast"
)
:ok
# Broadcast rally point removal events to external clients (webhooks/SSE)
if type == 1 do
WandererApp.ExternalEvents.broadcast(map_id, :rally_point_removed, %{
id: ping_id,
solar_system_id: solar_system_id,
system_id: system_id,
character_id: character_id,
character_name: character.name,
character_eve_id: character.eve_id,
system_name: system_name
})
end
WandererApp.User.ActivityTracker.track_map_event(:map_rally_cancelled, %{
character_id: character_id,
user_id: user_id,
map_id: map_id,
solar_system_id: solar_system_id
})
else
error ->
Logger.error("Failed to fetch ping for cancellation: #{inspect(error, pretty: true)}")
Logger.error("Failed to cancel_ping: #{inspect(error, pretty: true)}")
end
end
end

View File

@@ -30,17 +30,14 @@ defmodule WandererAppWeb.MapActivityEventHandler do
def handle_ui_event(
"show_activity",
params,
_,
%{assigns: %{map_id: map_id, current_user: current_user}} = socket
) do
Task.async(fn ->
try do
# Extract days parameter (nil if not provided)
days = Map.get(params, "days")
# Get raw activity data from the domain logic
result =
WandererApp.Character.Activity.process_character_activity(map_id, current_user, days)
WandererApp.Character.Activity.process_character_activity(map_id, current_user)
# Group activities by user_id and summarize
summarized_result =

View File

@@ -98,7 +98,7 @@ defmodule WandererAppWeb.Telemetry do
),
counter("wanderer_app.tracker_pool.location_lag.count",
tags: [:pool_uuid],
description: "Count of location updates falling behind (>2s)"
description: "Count of location updates falling behind (>800ms)"
),
counter("wanderer_app.tracker_pool.ship_skipped.count",
tags: [:pool_uuid, :reason],

View File

@@ -3,7 +3,7 @@ defmodule WandererApp.MixProject do
@source_url "https://github.com/wanderer-industries/wanderer"
@version "1.86.0"
@version "1.85.3"
def project do
[

View File

@@ -19,10 +19,11 @@ defmodule WandererApp.Map.CharacterLocationTrackingTest do
use WandererApp.DataCase, async: false
import WandererApp.MapTestHelpers
alias WandererApp.Map.Server.CharactersImpl
alias WandererApp.Map.Server.SystemsImpl
@test_map_id 999_999_001
@test_character_eve_id 2_123_456_789
# EVE Online solar system IDs for testing
@@ -32,212 +33,64 @@ defmodule WandererApp.Map.CharacterLocationTrackingTest do
@system_rens 30_002_510
setup do
# Clean up any existing test data
cleanup_test_data()
# Setup system static info cache for test systems
setup_system_static_info_cache()
# Setup DDRT (R-tree) mock stubs for system positioning
setup_ddrt_mocks()
# Create test user (let Ash generate the ID)
user = create_user(%{name: "Test User", hash: "test_hash_#{:rand.uniform(1_000_000)}"})
# Create test character with location tracking scopes
character =
create_character(%{
eve_id: "#{@test_character_eve_id}",
name: "Test Character",
user_id: user.id,
scopes: "esi-location.read_location.v1 esi-location.read_ship_type.v1",
tracking_pool: "default"
})
character = create_character(%{
eve_id: "#{@test_character_eve_id}",
name: "Test Character",
user_id: user.id,
scopes: "esi-location.read_location.v1 esi-location.read_ship_type.v1",
tracking_pool: "default"
})
# Create test map
map =
create_map(%{
id: @test_map_id,
name: "Test Char Track",
slug: "test-char-tracking-#{:rand.uniform(1_000_000)}",
owner_id: character.id,
scope: :none,
only_tracked_characters: false
})
# Note: scope: :all is used because :none prevents system addition
# (is_connection_valid returns false for :none scope)
map = create_map(%{
name: "Test Char Track",
slug: "test-char-tracking-#{:rand.uniform(1_000_000)}",
owner_id: character.id,
scope: :all,
only_tracked_characters: false
})
on_exit(fn ->
cleanup_test_data()
cleanup_test_data(map.id)
end)
{:ok, user: user, character: character, map: map}
end
defp cleanup_test_data do
# Note: We can't clean up character-specific caches in setup
# because we don't have the character.id yet. Tests will clean
# up their own caches in on_exit if needed.
# Clean up map-level presence tracking
WandererApp.Cache.delete("map_#{@test_map_id}:presence_character_ids")
end
defp cleanup_character_caches(character_id) do
# Clean up character location caches
WandererApp.Cache.delete("map_#{@test_map_id}:character:#{character_id}:solar_system_id")
WandererApp.Cache.delete(
"map_#{@test_map_id}:character:#{character_id}:start_solar_system_id"
)
WandererApp.Cache.delete("map_#{@test_map_id}:character:#{character_id}:station_id")
WandererApp.Cache.delete("map_#{@test_map_id}:character:#{character_id}:structure_id")
# Clean up character cache
if Cachex.exists?(:character_cache, character_id) do
Cachex.del(:character_cache, character_id)
end
# Clean up character state cache
if Cachex.exists?(:character_state_cache, character_id) do
Cachex.del(:character_state_cache, character_id)
end
end
defp set_character_location(character_id, solar_system_id, opts \\ []) do
"""
Helper to simulate character location update in cache.
This mimics what the Character.Tracker does when it polls ESI.
"""
structure_id = opts[:structure_id]
station_id = opts[:station_id]
# Capsule
ship_type_id = opts[:ship_type_id] || 670
# First get the existing character from cache or database to maintain all fields
{:ok, existing_character} = WandererApp.Character.get_character(character_id)
# Update character cache (mimics Character.update_character/2)
character_data =
Map.merge(existing_character, %{
solar_system_id: solar_system_id,
structure_id: structure_id,
station_id: station_id,
ship_type_id: ship_type_id,
updated_at: DateTime.utc_now()
})
Cachex.put(:character_cache, character_id, character_data)
end
defp ensure_map_started(map_id) do
"""
Ensure the map server is started for the given map.
This is required for character updates to work.
"""
case WandererApp.Map.Manager.start_map(map_id) do
{:ok, _pid} -> :ok
{:error, {:already_started, _pid}} -> :ok
other -> other
end
end
defp add_character_to_map_presence(map_id, character_id) do
"""
Helper to add character to map's presence list.
This mimics what PresenceGracePeriodManager does.
"""
{:ok, current_chars} = WandererApp.Cache.lookup("map_#{map_id}:presence_character_ids", [])
updated_chars = Enum.uniq([character_id | current_chars])
WandererApp.Cache.insert("map_#{map_id}:presence_character_ids", updated_chars)
end
defp get_map_systems(map_id) do
"""
Helper to get all systems currently on the map.
"""
case WandererApp.Map.get_map_state(map_id) do
{:ok, %{map: %{systems: systems}}} when is_map(systems) ->
Map.values(systems)
{:ok, _} ->
[]
end
end
defp system_on_map?(map_id, solar_system_id) do
"""
Check if a specific system is on the map.
"""
systems = get_map_systems(map_id)
Enum.any?(systems, fn sys -> sys.solar_system_id == solar_system_id end)
end
defp wait_for_system_on_map(map_id, solar_system_id, timeout \\ 2000) do
"""
Wait for a system to appear on the map (for async operations).
"""
deadline = System.monotonic_time(:millisecond) + timeout
Stream.repeatedly(fn ->
if system_on_map?(map_id, solar_system_id) do
{:ok, true}
else
if System.monotonic_time(:millisecond) < deadline do
Process.sleep(50)
:continue
else
{:error, :timeout}
end
end
end)
|> Enum.find(fn result -> result != :continue end)
|> case do
{:ok, true} -> true
{:error, :timeout} -> false
end
end
# Note: Helper functions moved to WandererApp.MapTestHelpers
# Functions available via import:
# - setup_ddrt_mocks/0
# - setup_system_static_info_cache/0
# - set_character_location/3
# - ensure_map_started/1
# - wait_for_map_started/2
# - add_character_to_map_presence/2
# - get_map_systems/1
# - system_on_map?/2
# - wait_for_system_on_map/3
# - cleanup_character_caches/2
# - cleanup_test_data/1
describe "Basic character location tracking" do
@tag :skip
@tag :integration
test "character location update adds system to map", %{map: map, character: character} do
# This test verifies the basic flow:
# 1. Character starts tracking on a map
# 2. Character location is updated in cache
# 1. Character starts tracking on a map at Jita
# 2. Character moves to Amarr
# 3. update_characters() is called
# 4. System is added to the map
# Setup: Ensure map is started
ensure_map_started(map.id)
# Setup: Add character to presence
add_character_to_map_presence(map.id, character.id)
# Setup: Set character location
set_character_location(character.id, @system_jita)
# Setup: Set start_solar_system_id (this happens when tracking starts)
WandererApp.Cache.insert(
"map_#{map.id}:character:#{character.id}:start_solar_system_id",
@system_jita
)
# Execute: Run character update
CharactersImpl.update_characters(map.id)
# Verify: Jita should be added to the map
assert wait_for_system_on_map(map.id, @system_jita),
"Jita should have been added to map when character tracking started"
end
@tag :skip
@tag :integration
test "character movement from A to B adds both systems", %{map: map, character: character} do
# This test verifies:
# 1. Character starts at system A
# 2. Character moves to system B
# 3. update_characters() processes the change
# 4. Both systems are on the map
# 4. Both systems are added to the map
# Setup: Ensure map is started
ensure_map_started(map.id)
@@ -248,73 +101,118 @@ defmodule WandererApp.Map.CharacterLocationTrackingTest do
# Setup: Character starts at Jita
set_character_location(character.id, @system_jita)
# Setup: Set start_solar_system_id (this happens when tracking starts)
# Note: The start system is NOT added until the character moves
WandererApp.Cache.insert(
"map_#{map.id}:character:#{character.id}:start_solar_system_id",
"map:#{map.id}:character:#{character.id}:start_solar_system_id",
@system_jita
)
# First update - adds Jita
# Execute: First update - start system is intentionally NOT added yet
CharactersImpl.update_characters(map.id)
assert wait_for_system_on_map(map.id, @system_jita), "Jita should be on map initially"
# Verify: Jita should NOT be on map yet (design: start position not added)
refute system_on_map?(map.id, @system_jita),
"Start system should not be added until character moves"
# Character moves to Amarr
set_character_location(character.id, @system_amarr)
# Second update - should add Amarr
# Execute: Second update - should add both systems
CharactersImpl.update_characters(map.id)
# Verify: Both systems should be on map
assert wait_for_system_on_map(map.id, @system_jita), "Jita should still be on map"
assert wait_for_system_on_map(map.id, @system_amarr), "Amarr should have been added to map"
# Verify: Both systems should now be on map
assert wait_for_system_on_map(map.id, @system_jita),
"Jita should be added after character moves"
assert wait_for_system_on_map(map.id, @system_amarr),
"Amarr should be added as the new location"
end
@tag :integration
test "character movement from A to B adds both systems", %{map: map, character: character} do
# This test verifies:
# 1. Character starts at system A
# 2. Character moves to system B
# 3. update_characters() processes the change
# 4. Both systems are on the map
# Note: The start system is NOT added until the character moves (design decision)
# Setup: Ensure map is started
ensure_map_started(map.id)
# Setup: Add character to presence
add_character_to_map_presence(map.id, character.id)
# Setup: Character starts at Jita
set_character_location(character.id, @system_jita)
WandererApp.Cache.insert(
"map:#{map.id}:character:#{character.id}:start_solar_system_id",
@system_jita
)
# First update - start system is intentionally NOT added yet
CharactersImpl.update_characters(map.id)
refute system_on_map?(map.id, @system_jita),
"Start system should not be added until character moves"
# Character moves to Amarr
set_character_location(character.id, @system_amarr)
# Second update - should add both systems
CharactersImpl.update_characters(map.id)
# Verify: Both systems should be on map after character moves
assert wait_for_system_on_map(map.id, @system_jita), "Jita should be added after character moves"
assert wait_for_system_on_map(map.id, @system_amarr), "Amarr should be added as the new location"
end
end
describe "Rapid character movement (Race Condition Tests)" do
@tag :skip
@tag :integration
test "rapid movement A→B→C adds all three systems", %{map: map, character: character} do
# This test verifies the critical race condition fix:
# When a character moves rapidly through multiple systems,
# all systems should be added to the map, not just the start and end.
# Note: Start system is NOT added until character moves (design decision)
ensure_map_started(map.id)
add_character_to_map_presence(map.id, character.id)
# Character starts at Jita
set_character_location(character.id, @system_jita)
WandererApp.Cache.insert(
"map_#{map.id}:character:#{character.id}:start_solar_system_id",
"map:#{map.id}:character:#{character.id}:start_solar_system_id",
@system_jita
)
# First update - start system is intentionally NOT added yet
CharactersImpl.update_characters(map.id)
assert wait_for_system_on_map(map.id, @system_jita)
refute system_on_map?(map.id, @system_jita),
"Start system should not be added until character moves"
# Rapid jump to Amarr (intermediate system)
set_character_location(character.id, @system_amarr)
# Before update_characters can process, character jumps again to Dodixie
# This simulates the race condition
# Should process Jita→Amarr
# Second update - should add both Jita (start) and Amarr (current)
CharactersImpl.update_characters(map.id)
# Character already at Dodixie before second update
# Verify both Jita and Amarr are now on map
assert wait_for_system_on_map(map.id, @system_jita), "Jita (start) should be on map after movement"
assert wait_for_system_on_map(map.id, @system_amarr), "Amarr should be on map"
# Rapid jump to Dodixie before next update cycle
set_character_location(character.id, @system_dodixie)
# Should process Amarr→Dodixie
# Third update - should add Dodixie
CharactersImpl.update_characters(map.id)
# Verify: All three systems should be on map
assert wait_for_system_on_map(map.id, @system_jita), "Jita (start) should be on map"
assert wait_for_system_on_map(map.id, @system_amarr),
"Amarr (intermediate) should be on map - this is the critical test"
assert wait_for_system_on_map(map.id, @system_jita), "Jita (start) should still be on map"
assert wait_for_system_on_map(map.id, @system_amarr), "Amarr (intermediate) should still be on map - this is the critical test"
assert wait_for_system_on_map(map.id, @system_dodixie), "Dodixie (end) should be on map"
end
@tag :skip
@tag :integration
test "concurrent location updates don't lose intermediate systems", %{
map: map,
@@ -328,9 +226,8 @@ defmodule WandererApp.Map.CharacterLocationTrackingTest do
# Start at Jita
set_character_location(character.id, @system_jita)
WandererApp.Cache.insert(
"map_#{map.id}:character:#{character.id}:start_solar_system_id",
"map:#{map.id}:character:#{character.id}:start_solar_system_id",
@system_jita
)
@@ -358,7 +255,6 @@ defmodule WandererApp.Map.CharacterLocationTrackingTest do
end
describe "start_solar_system_id persistence" do
@tag :skip
@tag :integration
test "start_solar_system_id persists through multiple updates", %{
map: map,
@@ -375,7 +271,7 @@ defmodule WandererApp.Map.CharacterLocationTrackingTest do
# Set start_solar_system_id
WandererApp.Cache.insert(
"map_#{map.id}:character:#{character.id}:start_solar_system_id",
"map:#{map.id}:character:#{character.id}:start_solar_system_id",
@system_jita
)
@@ -384,7 +280,9 @@ defmodule WandererApp.Map.CharacterLocationTrackingTest do
# Verify start_solar_system_id still exists after first update
{:ok, start_system} =
WandererApp.Cache.lookup("map_#{map.id}:character:#{character.id}:start_solar_system_id")
WandererApp.Cache.lookup(
"map:#{map.id}:character:#{character.id}:start_solar_system_id"
)
assert start_system == @system_jita,
"start_solar_system_id should persist after first update (not be taken/removed)"
@@ -400,7 +298,6 @@ defmodule WandererApp.Map.CharacterLocationTrackingTest do
assert wait_for_system_on_map(map.id, @system_amarr)
end
@tag :skip
@tag :integration
test "first system addition uses correct logic when start_solar_system_id exists", %{
map: map,
@@ -408,6 +305,7 @@ defmodule WandererApp.Map.CharacterLocationTrackingTest do
} do
# This test verifies that the first system addition logic
# works correctly with start_solar_system_id
# Design: Start system is NOT added until character moves
ensure_map_started(map.id)
add_character_to_map_presence(map.id, character.id)
@@ -417,114 +315,265 @@ defmodule WandererApp.Map.CharacterLocationTrackingTest do
# Set start_solar_system_id
WandererApp.Cache.insert(
"map_#{map.id}:character:#{character.id}:start_solar_system_id",
"map:#{map.id}:character:#{character.id}:start_solar_system_id",
@system_jita
)
# No old location in map cache (first time tracking)
# This triggers the special first-system-addition logic
# First update - character still at start position
CharactersImpl.update_characters(map.id)
# Verify Jita is added
# Verify Jita is NOT added yet (design: start position not added until movement)
refute system_on_map?(map.id, @system_jita),
"Start system should not be added until character moves"
# Character moves to Amarr
set_character_location(character.id, @system_amarr)
# Second update - should add both systems
CharactersImpl.update_characters(map.id)
# Verify both systems are added after movement
assert wait_for_system_on_map(map.id, @system_jita),
"First system should be added when character starts tracking"
"Jita should be added after character moves away"
assert wait_for_system_on_map(map.id, @system_amarr),
"Amarr should be added as the new location"
end
end
describe "Database failure handling" do
@tag :integration
test "database failure during system creation is logged and retried", %{
map: map,
character: character
} do
# This test verifies that database failures don't silently succeed
# and are properly retried
test "system addition failures emit telemetry events", %{map: map, character: character} do
# This test verifies that database failures emit proper telemetry events
# Current implementation logs errors and emits telemetry for failures
# (Retry logic not yet implemented)
# NOTE: This test would need to mock the database to simulate failures
# For now, we document the expected behavior
ensure_map_started(map.id)
add_character_to_map_presence(map.id, character.id)
# Expected behavior:
# 1. maybe_add_system encounters DB error
# 2. Error is logged with context
# 3. Operation is retried (3 attempts with backoff)
# 4. If all retries fail, error tuple is returned (not :ok)
# 5. Telemetry event is emitted for the failure
test_pid = self()
:ok
# Attach handler for system addition error events
:telemetry.attach(
"test-system-addition-error",
[:wanderer_app, :map, :system_addition, :error],
fn event, measurements, metadata, _config ->
send(test_pid, {:telemetry_event, event, measurements, metadata})
end,
nil
)
# Set character at Jita and set start location
set_character_location(character.id, @system_jita)
WandererApp.Cache.insert(
"map:#{map.id}:character:#{character.id}:start_solar_system_id",
@system_jita
)
# Trigger update which may encounter database issues
# In production, database failures would emit telemetry
CharactersImpl.update_characters(map.id)
# Note: In a real database failure scenario, we would receive the telemetry event
# For this test, we verify the mechanism works by checking if the map was started correctly
# and that character updates can complete without crashing
# Verify update_characters completed (returned :ok without crashing)
assert :ok == CharactersImpl.update_characters(map.id)
:telemetry.detach("test-system-addition-error")
end
@tag :integration
test "transient database errors succeed on retry", %{map: map, character: character} do
# This test verifies retry logic for transient failures
# Expected behavior:
# 1. First attempt fails with transient error (timeout, connection, etc.)
# 2. Retry succeeds
# 3. System is added successfully
# 4. Telemetry emitted for both failure and success
:ok
end
@tag :integration
test "permanent database errors don't break update_characters for other characters", %{
test "character update errors are logged but don't crash update_characters", %{
map: map,
character: character
} do
# This test verifies that a failure for one character
# doesn't prevent processing other characters
# This test verifies that errors in character processing are caught
# and logged without crashing the entire update_characters cycle
# Expected behavior:
# 1. Multiple characters being tracked
# 2. One character's update fails permanently
# 3. Other characters' updates succeed
# 4. Error is logged with character context
# 5. update_characters completes for all characters
ensure_map_started(map.id)
add_character_to_map_presence(map.id, character.id)
:ok
# Set up character location
set_character_location(character.id, @system_jita)
WandererApp.Cache.insert(
"map:#{map.id}:character:#{character.id}:start_solar_system_id",
@system_jita
)
# Run update_characters - should complete even if individual character updates fail
result = CharactersImpl.update_characters(map.id)
assert result == :ok
# Verify the function is resilient and can be called multiple times
result = CharactersImpl.update_characters(map.id)
assert result == :ok
end
@tag :integration
test "errors processing one character don't affect other characters", %{map: map} do
# This test verifies that update_characters processes characters independently
# using Task.async_stream, so one failure doesn't block others
ensure_map_started(map.id)
# Create a second character
user2 = create_user(%{name: "Test User 2", hash: "test_hash_#{:rand.uniform(1_000_000)}"})
character2 = create_character(%{
eve_id: "#{@test_character_eve_id + 1}",
name: "Test Character 2",
user_id: user2.id,
scopes: "esi-location.read_location.v1 esi-location.read_ship_type.v1",
tracking_pool: "default"
})
# Add both characters to map presence
add_character_to_map_presence(map.id, character2.id)
# Set locations for both characters
set_character_location(character2.id, @system_amarr)
WandererApp.Cache.insert(
"map:#{map.id}:character:#{character2.id}:start_solar_system_id",
@system_amarr
)
# Run update_characters - should process both characters independently
result = CharactersImpl.update_characters(map.id)
assert result == :ok
# Clean up character 2 caches
cleanup_character_caches(map.id, character2.id)
end
end
describe "Task timeout handling" do
@tag :integration
@tag :slow
test "character update timeout doesn't lose state permanently", %{
map: map,
character: character
} do
# This test verifies that timeouts during update_characters
# don't cause permanent state loss
test "update_characters is resilient to processing delays", %{map: map, character: character} do
# This test verifies that update_characters handles task processing
# without crashing, even when individual character updates might be slow
# (Current implementation: 15-second timeout per task with :kill_task)
# Note: Recovery ETS table not yet implemented
# Expected behavior:
# 1. Character update takes > 15 seconds (simulated slow DB)
# 2. Task times out and is killed
# 3. State is preserved in recovery ETS table
# 4. Next update_characters cycle recovers and processes the update
# 5. System is eventually added to map
# 6. Telemetry emitted for timeout and recovery
ensure_map_started(map.id)
add_character_to_map_presence(map.id, character.id)
:ok
# Set up character with location
set_character_location(character.id, @system_jita)
WandererApp.Cache.insert(
"map:#{map.id}:character:#{character.id}:start_solar_system_id",
@system_jita
)
# Run multiple update cycles to verify stability
# If there were timeout/recovery issues, this would fail
for _i <- 1..3 do
result = CharactersImpl.update_characters(map.id)
assert result == :ok
Process.sleep(100)
end
# Verify the map server is still functional
systems = get_map_systems(map.id)
assert is_list(systems)
end
@tag :integration
test "multiple concurrent timeouts don't corrupt cache", %{map: map, character: character} do
# This test verifies that multiple simultaneous timeouts
# don't cause cache corruption
test "concurrent character updates don't cause crashes", %{map: map} do
# This test verifies that processing multiple characters concurrently
# (using Task.async_stream) doesn't cause crashes or corruption
# Even if some tasks might timeout or fail
# Expected behavior:
# 1. Multiple characters timing out simultaneously
# 2. Each timeout is handled independently
# 3. No cache corruption or race conditions
# 4. All characters eventually recover
# 5. Telemetry tracks recovery health
ensure_map_started(map.id)
:ok
# Create multiple characters for concurrent processing
characters = for i <- 1..5 do
user = create_user(%{
name: "Test User #{i}",
hash: "test_hash_#{:rand.uniform(1_000_000)}"
})
character = create_character(%{
eve_id: "#{@test_character_eve_id + i}",
name: "Test Character #{i}",
user_id: user.id,
scopes: "esi-location.read_location.v1 esi-location.read_ship_type.v1",
tracking_pool: "default"
})
# Add character to presence and set location
add_character_to_map_presence(map.id, character.id)
solar_system_id = Enum.at([@system_jita, @system_amarr, @system_dodixie, @system_rens], rem(i, 4))
set_character_location(character.id, solar_system_id)
WandererApp.Cache.insert(
"map:#{map.id}:character:#{character.id}:start_solar_system_id",
solar_system_id
)
character
end
# Run update_characters - should handle all characters concurrently
result = CharactersImpl.update_characters(map.id)
assert result == :ok
# Run again to verify stability
result = CharactersImpl.update_characters(map.id)
assert result == :ok
# Clean up character caches
Enum.each(characters, fn char ->
cleanup_character_caches(map.id, char.id)
end)
end
@tag :integration
test "update_characters emits telemetry for error cases", %{map: map, character: character} do
# This test verifies that errors during update_characters
# emit proper telemetry events for monitoring
ensure_map_started(map.id)
add_character_to_map_presence(map.id, character.id)
test_pid = self()
# Attach handlers for update_characters telemetry
:telemetry.attach_many(
"test-update-characters-telemetry",
[
[:wanderer_app, :map, :update_characters, :start],
[:wanderer_app, :map, :update_characters, :complete],
[:wanderer_app, :map, :update_characters, :error]
],
fn event, measurements, metadata, _config ->
send(test_pid, {:telemetry_event, event, measurements, metadata})
end,
nil
)
# Set up character location
set_character_location(character.id, @system_jita)
# Trigger update_characters
CharactersImpl.update_characters(map.id)
# Should receive start and complete events (or error event if something failed)
assert_receive {:telemetry_event, [:wanderer_app, :map, :update_characters, :start], _, _}, 1000
# Should receive either complete or error event
receive do
{:telemetry_event, [:wanderer_app, :map, :update_characters, :complete], _, _} -> :ok
{:telemetry_event, [:wanderer_app, :map, :update_characters, :error], _, _} -> :ok
after
1000 -> flunk("Expected to receive complete or error telemetry event")
end
:telemetry.detach("test-update-characters-telemetry")
end
end
describe "Cache consistency" do
@tag :skip
@tag :integration
test "character cache and map cache stay in sync", %{map: map, character: character} do
# This test verifies that the three character location caches
@@ -540,9 +589,8 @@ defmodule WandererApp.Map.CharacterLocationTrackingTest do
# Set location in character cache
set_character_location(character.id, @system_jita)
WandererApp.Cache.insert(
"map_#{map.id}:character:#{character.id}:start_solar_system_id",
"map:#{map.id}:character:#{character.id}:start_solar_system_id",
@system_jita
)
@@ -550,7 +598,7 @@ defmodule WandererApp.Map.CharacterLocationTrackingTest do
# Verify map cache was updated
{:ok, map_cached_location} =
WandererApp.Cache.lookup("map_#{map.id}:character:#{character.id}:solar_system_id")
WandererApp.Cache.lookup("map:#{map.id}:character:#{character.id}:solar_system_id")
assert map_cached_location == @system_jita,
"Map-specific cache should match character cache"
@@ -561,19 +609,17 @@ defmodule WandererApp.Map.CharacterLocationTrackingTest do
# Verify both caches updated
{:ok, character_data} = Cachex.get(:character_cache, character.id)
{:ok, map_cached_location} =
WandererApp.Cache.lookup("map_#{map.id}:character:#{character.id}:solar_system_id")
WandererApp.Cache.lookup("map:#{map.id}:character:#{character.id}:solar_system_id")
assert character_data.solar_system_id == @system_amarr
assert map_cached_location == @system_amarr,
"Both caches should be consistent after update"
end
end
describe "Telemetry and observability" do
test "telemetry events are emitted for location updates", %{character: character} do
test "telemetry events are emitted for location updates", %{character: character, map: map} do
# This test verifies that telemetry is emitted for tracking debugging
test_pid = self()
@@ -597,7 +643,7 @@ defmodule WandererApp.Map.CharacterLocationTrackingTest do
:telemetry.execute(
[:wanderer_app, :character, :location_update, :start],
%{system_time: System.system_time()},
%{character_id: character.id, map_id: @test_map_id}
%{character_id: character.id, map_id: map.id}
)
:telemetry.execute(
@@ -605,7 +651,7 @@ defmodule WandererApp.Map.CharacterLocationTrackingTest do
%{duration: 100, system_time: System.system_time()},
%{
character_id: character.id,
map_id: @test_map_id,
map_id: map.id,
from_system: @system_jita,
to_system: @system_amarr
}

View File

@@ -117,16 +117,18 @@ defmodule WandererApp.DataCase do
:ok
end
end)
end
@doc """
Grants database access to a process with comprehensive monitoring.
# Grant database access to MapPoolSupervisor and all its dynamically started children
case Process.whereis(WandererApp.Map.MapPoolSupervisor) do
pid when is_pid(pid) ->
# Grant access to the supervisor and its entire supervision tree
# This ensures dynamically started map servers get database access
owner_pid = Process.get(:sandbox_owner_pid) || self()
WandererApp.Test.DatabaseAccessManager.grant_supervision_tree_access(pid, owner_pid)
This function provides enhanced database access granting with monitoring
for child processes and automatic access granting.
"""
def allow_database_access(pid, owner_pid \\ self()) do
WandererApp.Test.DatabaseAccessManager.grant_database_access(pid, owner_pid)
_ ->
:ok
end
end
@doc """

View File

@@ -22,6 +22,9 @@ defmodule WandererApp.Test.IntegrationConfig do
# Ensure PubSub server is started for integration tests
ensure_pubsub_server()
# Ensure map supervisors are started for map-related integration tests
ensure_map_supervisors_started()
:ok
end
@@ -57,6 +60,42 @@ defmodule WandererApp.Test.IntegrationConfig do
end
end
@doc """
Ensures map supervisors are started for integration tests.
This starts both MapPoolSupervisor and Map.Manager which are
required for character location tracking and map management tests.
IMPORTANT: MapPoolSupervisor must be started BEFORE Map.Manager
because Map.Manager depends on the registries created by MapPoolSupervisor.
"""
def ensure_map_supervisors_started do
# Start MapPoolSupervisor FIRST if not running
# This supervisor creates the required registries (:map_pool_registry, :unique_map_pool_registry)
# and starts MapPoolDynamicSupervisor
case Process.whereis(WandererApp.Map.MapPoolSupervisor) do
nil ->
{:ok, _} = WandererApp.Map.MapPoolSupervisor.start_link([])
_ ->
:ok
end
# Give the supervisor a moment to fully initialize its children
Process.sleep(100)
# Start Map.Manager AFTER MapPoolSupervisor
case GenServer.whereis(WandererApp.Map.Manager) do
nil ->
{:ok, _} = WandererApp.Map.Manager.start_link([])
_ ->
:ok
end
:ok
end
@doc """
Cleans up integration test environment.
@@ -74,6 +113,8 @@ defmodule WandererApp.Test.IntegrationConfig do
end
# Note: PubSub cleanup is handled by Phoenix during test shutdown
# Note: Map supervisors are not cleaned up here as they may be shared
# across tests and should persist for the test session
:ok
end

View File

@@ -1,8 +1,13 @@
defmodule WandererApp.MapTestHelpers do
@moduledoc """
Shared helper functions for map-related tests.
Shared helper functions for map-related integration tests.
This module provides common functionality for testing map servers,
character location tracking, and system management.
"""
import Mox
@doc """
Helper function to expect a map server error response.
This function is used across multiple test files to handle
@@ -17,4 +22,411 @@ defmodule WandererApp.MapTestHelpers do
:ok
end
end
@doc """
Ensures the map is started for the given map ID.
Uses async Map.Manager.start_map and waits for completion.
## Parameters
- map_id: The ID of the map to start
## Examples
iex> ensure_map_started(map.id)
:ok
"""
def ensure_map_started(map_id) do
# Queue the map for starting (async)
:ok = WandererApp.Map.Manager.start_map(map_id)
# Wait for the map to actually start
wait_for_map_started(map_id)
end
@doc """
Waits for a map to finish starting by polling the cache.
## Parameters
- map_id: The ID of the map to wait for
- timeout: Maximum time to wait in milliseconds (default: 10000)
## Examples
iex> wait_for_map_started(map.id, 5000)
:ok
"""
def wait_for_map_started(map_id, timeout \\ 10_000) do
deadline = System.monotonic_time(:millisecond) + timeout
Stream.repeatedly(fn ->
# Check both the map_started flag and the started_maps list
map_started_flag =
case WandererApp.Cache.lookup("map_#{map_id}:started") do
{:ok, true} -> true
_ -> false
end
in_started_maps_list =
case WandererApp.Cache.lookup("started_maps", []) do
{:ok, started_maps} when is_list(started_maps) ->
Enum.member?(started_maps, map_id)
_ ->
false
end
cond do
# Map is fully started
map_started_flag and in_started_maps_list ->
{:ok, :started}
# Map is partially started (in one but not both) - keep waiting
map_started_flag or in_started_maps_list ->
if System.monotonic_time(:millisecond) < deadline do
Process.sleep(100)
:continue
else
{:error, :timeout}
end
# Map not started yet
true ->
if System.monotonic_time(:millisecond) < deadline do
Process.sleep(100)
:continue
else
{:error, :timeout}
end
end
end)
|> Enum.find(fn result -> result != :continue end)
|> case do
{:ok, :started} ->
# Give it a bit more time to fully initialize all subsystems
Process.sleep(200)
:ok
{:error, :timeout} ->
raise "Timeout waiting for map #{map_id} to start. Check Map.Manager is running."
end
end
@doc """
Sets up DDRT (R-tree spatial index) mock stubs.
This is required for system positioning on the map.
We stub all R-tree operations to allow systems to be placed anywhere.
## Examples
iex> setup_ddrt_mocks()
:ok
"""
def setup_ddrt_mocks do
Test.DDRTMock
|> stub(:init_tree, fn _name, _opts -> :ok end)
|> stub(:insert, fn _data, _tree_name -> {:ok, %{}} end)
|> stub(:update, fn _id, _data, _tree_name -> {:ok, %{}} end)
|> stub(:delete, fn _ids, _tree_name -> {:ok, %{}} end)
# query returns empty list to indicate no spatial conflicts (position is available)
|> stub(:query, fn _bbox, _tree_name -> {:ok, []} end)
:ok
end
@doc """
Populates the system static info cache with data for common test systems.
This is required for SystemsImpl.maybe_add_system to work properly,
as it needs to fetch system names and other metadata.
## Parameters
- systems: Map of solar_system_id => system_info (optional, uses defaults if not provided)
## Examples
iex> setup_system_static_info_cache()
:ok
"""
def setup_system_static_info_cache(systems \\ nil) do
test_systems = systems || default_test_systems()
Enum.each(test_systems, fn {solar_system_id, system_info} ->
Cachex.put(:system_static_info_cache, solar_system_id, system_info)
end)
:ok
end
@doc """
Returns default test system configurations for common EVE systems.
## Examples
iex> default_test_systems()
%{30_000_142 => %{...}}
"""
def default_test_systems do
%{
# Jita
30_000_142 => %{
solar_system_id: 30_000_142,
region_id: 10_000_002,
constellation_id: 20_000_020,
solar_system_name: "Jita",
solar_system_name_lc: "jita",
constellation_name: "Kimotoro",
region_name: "The Forge",
system_class: 0,
security: "0.9",
type_description: "High Security",
class_title: "High Sec",
is_shattered: false,
effect_name: nil,
effect_power: nil,
statics: [],
wandering: [],
triglavian_invasion_status: nil,
sun_type_id: 45041
},
# Amarr
30_002_187 => %{
solar_system_id: 30_002_187,
region_id: 10_000_043,
constellation_id: 20_000_304,
solar_system_name: "Amarr",
solar_system_name_lc: "amarr",
constellation_name: "Throne Worlds",
region_name: "Domain",
system_class: 0,
security: "1.0",
type_description: "High Security",
class_title: "High Sec",
is_shattered: false,
effect_name: nil,
effect_power: nil,
statics: [],
wandering: [],
triglavian_invasion_status: nil,
sun_type_id: 45041
},
# Dodixie
30_002_659 => %{
solar_system_id: 30_002_659,
region_id: 10_000_032,
constellation_id: 20_000_413,
solar_system_name: "Dodixie",
solar_system_name_lc: "dodixie",
constellation_name: "Sinq Laison",
region_name: "Sinq Laison",
system_class: 0,
security: "0.9",
type_description: "High Security",
class_title: "High Sec",
is_shattered: false,
effect_name: nil,
effect_power: nil,
statics: [],
wandering: [],
triglavian_invasion_status: nil,
sun_type_id: 45041
},
# Rens
30_002_510 => %{
solar_system_id: 30_002_510,
region_id: 10_000_030,
constellation_id: 20_000_387,
solar_system_name: "Rens",
solar_system_name_lc: "rens",
constellation_name: "Frarn",
region_name: "Heimatar",
system_class: 0,
security: "0.9",
type_description: "High Security",
class_title: "High Sec",
is_shattered: false,
effect_name: nil,
effect_power: nil,
statics: [],
wandering: [],
triglavian_invasion_status: nil,
sun_type_id: 45041
}
}
end
@doc """
Helper to simulate character location update in cache.
This mimics what the Character.Tracker does when it polls ESI.
## Parameters
- character_id: The character ID to update
- solar_system_id: The solar system ID where the character is located
- opts: Optional parameters (structure_id, station_id, ship)
## Examples
iex> set_character_location(character.id, 30_000_142, ship: 670)
:ok
"""
def set_character_location(character_id, solar_system_id, opts \\ []) do
structure_id = opts[:structure_id]
station_id = opts[:station_id]
ship = opts[:ship] || 670 # Capsule
# First get the existing character from cache or database to maintain all fields
{:ok, existing_character} = WandererApp.Character.get_character(character_id)
# Update character cache (mimics Character.update_character/2)
character_data =
Map.merge(existing_character, %{
solar_system_id: solar_system_id,
structure_id: structure_id,
station_id: station_id,
ship: ship,
updated_at: DateTime.utc_now()
})
Cachex.put(:character_cache, character_id, character_data)
end
@doc """
Helper to add character to map's presence list.
This mimics what PresenceGracePeriodManager does.
## Parameters
- map_id: The map ID
- character_id: The character ID to add
## Examples
iex> add_character_to_map_presence(map.id, character.id)
:ok
"""
def add_character_to_map_presence(map_id, character_id) do
{:ok, current_chars} = WandererApp.Cache.lookup("map_#{map_id}:presence_character_ids", [])
updated_chars = Enum.uniq([character_id | current_chars])
WandererApp.Cache.insert("map_#{map_id}:presence_character_ids", updated_chars)
end
@doc """
Helper to get all systems currently on the map.
Uses :map_cache instead of :map_state_cache because add_system/2 updates :map_cache.
## Parameters
- map_id: The map ID
## Returns
- List of systems on the map
## Examples
iex> get_map_systems(map.id)
[%{solar_system_id: 30_000_142, ...}, ...]
"""
def get_map_systems(map_id) do
case WandererApp.Map.get_map(map_id) do
{:ok, %{systems: systems}} when is_map(systems) ->
Map.values(systems)
{:ok, _} ->
[]
{:error, _} ->
[]
end
end
@doc """
Checks if a specific system is on the map.
## Parameters
- map_id: The map ID
- solar_system_id: The solar system ID to check
## Returns
- true if the system is on the map, false otherwise
## Examples
iex> system_on_map?(map.id, 30_000_142)
true
"""
def system_on_map?(map_id, solar_system_id) do
systems = get_map_systems(map_id)
Enum.any?(systems, fn sys -> sys.solar_system_id == solar_system_id end)
end
@doc """
Waits for a system to appear on the map (for async operations).
## Parameters
- map_id: The map ID
- solar_system_id: The solar system ID to wait for
- timeout: Maximum time to wait in milliseconds (default: 2000)
## Returns
- true if the system appears on the map, false if timeout
## Examples
iex> wait_for_system_on_map(map.id, 30_000_142, 5000)
true
"""
def wait_for_system_on_map(map_id, solar_system_id, timeout \\ 2000) do
deadline = System.monotonic_time(:millisecond) + timeout
Stream.repeatedly(fn ->
if system_on_map?(map_id, solar_system_id) do
{:ok, true}
else
if System.monotonic_time(:millisecond) < deadline do
Process.sleep(50)
:continue
else
{:error, :timeout}
end
end
end)
|> Enum.find(fn result -> result != :continue end)
|> case do
{:ok, true} -> true
{:error, :timeout} -> false
end
end
@doc """
Cleans up character location caches for a specific character and map.
## Parameters
- map_id: The map ID
- character_id: The character ID
## Examples
iex> cleanup_character_caches(map.id, character.id)
:ok
"""
def cleanup_character_caches(map_id, character_id) do
# Clean up character location caches
WandererApp.Cache.delete("map:#{map_id}:character:#{character_id}:solar_system_id")
WandererApp.Cache.delete("map:#{map_id}:character:#{character_id}:start_solar_system_id")
WandererApp.Cache.delete("map:#{map_id}:character:#{character_id}:station_id")
WandererApp.Cache.delete("map:#{map_id}:character:#{character_id}:structure_id")
# Clean up character cache
if Cachex.exists?(:character_cache, character_id) do
Cachex.del(:character_cache, character_id)
end
# Clean up character state cache
if Cachex.exists?(:character_state_cache, character_id) do
Cachex.del(:character_state_cache, character_id)
end
:ok
end
@doc """
Cleans up test data for a map.
## Parameters
- map_id: The map ID
## Examples
iex> cleanup_test_data(map.id)
:ok
"""
def cleanup_test_data(map_id) do
# Clean up map-level presence tracking
WandererApp.Cache.delete("map_#{map_id}:presence_character_ids")
:ok
end
end

View File

@@ -176,103 +176,19 @@ defmodule WandererApp.TestHelpers do
@doc """
Ensures a map server is started for testing.
This function has been simplified to use the standard map startup flow.
For integration tests, use WandererApp.MapTestHelpers.ensure_map_started/1 instead.
"""
def ensure_map_server_started(map_id) do
case WandererApp.Map.Server.map_pid(map_id) do
pid when is_pid(pid) ->
# Make sure existing server has database access
WandererApp.DataCase.allow_database_access(pid)
# Also allow database access for any spawned processes
allow_map_server_children_database_access(pid)
# Ensure global Mox mode is maintained
if Code.ensure_loaded?(Mox), do: Mox.set_mox_global()
:ok
# Ensure global Mox mode is maintained
if Code.ensure_loaded?(Mox), do: Mox.set_mox_global()
nil ->
# Ensure global Mox mode before starting map server
if Code.ensure_loaded?(Mox), do: Mox.set_mox_global()
# Start the map server directly for tests
{:ok, pid} = start_map_server_directly(map_id)
# Grant database access to the new map server process
WandererApp.DataCase.allow_database_access(pid)
# Allow database access for any spawned processes
allow_map_server_children_database_access(pid)
:ok
end
end
# Use the standard map startup flow through Map.Manager
:ok = WandererApp.Map.Manager.start_map(map_id)
defp start_map_server_directly(map_id) do
# Use the same approach as MapManager.start_map_server/1
case DynamicSupervisor.start_child(
{:via, PartitionSupervisor, {WandererApp.Map.DynamicSupervisors, self()}},
{WandererApp.Map.ServerSupervisor, map_id: map_id}
) do
{:ok, pid} ->
# Allow database access for the supervisor and its children
WandererApp.DataCase.allow_genserver_database_access(pid)
# Wait a bit for the map to fully initialize
:timer.sleep(500)
# Allow Mox access for the supervisor process if in test mode
WandererApp.Test.MockAllowance.setup_genserver_mocks(pid)
# Also get the actual map server pid and allow access
case WandererApp.Map.Server.map_pid(map_id) do
server_pid when is_pid(server_pid) ->
WandererApp.DataCase.allow_genserver_database_access(server_pid)
# Allow Mox access for the map server process if in test mode
WandererApp.Test.MockAllowance.setup_genserver_mocks(server_pid)
_ ->
:ok
end
{:ok, pid}
{:error, {:already_started, pid}} ->
WandererApp.DataCase.allow_database_access(pid)
{:ok, pid}
{:error, :max_children} ->
# If we hit max children, wait a bit and retry
:timer.sleep(100)
start_map_server_directly(map_id)
error ->
error
end
end
defp allow_map_server_children_database_access(map_server_pid) do
# Allow database access for all children processes
# This is important for MapEventRelay and other spawned processes
# Wait a bit for children to spawn
:timer.sleep(100)
# Get all linked processes
case Process.info(map_server_pid, :links) do
{:links, linked_pids} ->
Enum.each(linked_pids, fn linked_pid ->
if is_pid(linked_pid) and Process.alive?(linked_pid) do
WandererApp.DataCase.allow_database_access(linked_pid)
# Also check for their children
case Process.info(linked_pid, :links) do
{:links, sub_links} ->
Enum.each(sub_links, fn sub_pid ->
if is_pid(sub_pid) and Process.alive?(sub_pid) and sub_pid != map_server_pid do
WandererApp.DataCase.allow_database_access(sub_pid)
end
end)
_ ->
:ok
end
end
end)
_ ->
:ok
end
:ok
end
end

View File

@@ -311,12 +311,12 @@ defmodule WandererApp.Map.SlugUniquenessTest do
defp create_test_user do
# Create a test user with necessary attributes
{:ok, user} =
WandererApp.Api.User.new(%{
name: "Test User #{:rand.uniform(10_000)}",
eve_id: :rand.uniform(100_000_000)
})
user
case Ash.create(WandererApp.Api.User, %{
name: "Test User #{:rand.uniform(10_000)}",
hash: "test_hash_#{:rand.uniform(100_000_000)}"
}) do
{:ok, user} -> user
{:error, reason} -> raise "Failed to create user: #{inspect(reason)}"
end
end
end