mirror of
https://github.com/wanderer-industries/wanderer
synced 2026-05-01 06:50:41 +00:00
Merge branch 'main' into develop
Build Test / 🚀 Deploy to test env (fly.io) (push) Has been cancelled
Build Test / 🛠 Build (1.17, 18.x, 27) (push) Has been cancelled
Build / 🛠 Build (1.17, 18.x, 27) (push) Has been cancelled
🧪 Test Suite / Test Suite (push) Has been cancelled
Build / 🛠 Build Docker Images (linux/amd64) (push) Has been cancelled
Build / 🛠 Build Docker Images (linux/arm64) (push) Has been cancelled
Build / merge (push) Has been cancelled
Build / 🏷 Create Release (push) Has been cancelled
Build Test / 🚀 Deploy to test env (fly.io) (push) Has been cancelled
Build Test / 🛠 Build (1.17, 18.x, 27) (push) Has been cancelled
Build / 🛠 Build (1.17, 18.x, 27) (push) Has been cancelled
🧪 Test Suite / Test Suite (push) Has been cancelled
Build / 🛠 Build Docker Images (linux/amd64) (push) Has been cancelled
Build / 🛠 Build Docker Images (linux/arm64) (push) Has been cancelled
Build / merge (push) Has been cancelled
Build / 🏷 Create Release (push) Has been cancelled
This commit is contained in:
@@ -0,0 +1,603 @@
|
||||
defmodule WandererApp.Map.CharacterLocationTrackingTest do
|
||||
@moduledoc """
|
||||
Integration tests for character location tracking and system addition.
|
||||
|
||||
These tests verify end-to-end character location tracking behavior including:
|
||||
- Character location updates trigger system additions to maps
|
||||
- Rapid character movements (A→B→C) add all systems correctly
|
||||
- Database failures are handled with retries and proper error reporting
|
||||
- start_solar_system_id persists correctly through multiple updates
|
||||
- Task timeouts don't cause permanent state loss
|
||||
- Cache consistency between character and map-specific caches
|
||||
|
||||
These tests focus on the critical issues identified in the location tracking system:
|
||||
1. Race conditions in cache updates during rapid movement
|
||||
2. Silent database failures masking system addition problems
|
||||
3. One-time start_solar_system_id flag being lost permanently
|
||||
4. Task timeout handling without recovery
|
||||
"""
|
||||
|
||||
use WandererApp.DataCase, async: false
|
||||
|
||||
alias WandererApp.Map.Server.CharactersImpl
|
||||
alias WandererApp.Map.Server.SystemsImpl
|
||||
|
||||
@test_map_id 999_999_001
|
||||
@test_character_eve_id 2_123_456_789
|
||||
|
||||
# EVE Online solar system IDs for testing
|
||||
@system_jita 30_000_142
|
||||
@system_amarr 30_002_187
|
||||
@system_dodixie 30_002_659
|
||||
@system_rens 30_002_510
|
||||
|
||||
setup do
|
||||
# Clean up any existing test data
|
||||
cleanup_test_data()
|
||||
|
||||
# Create test user (let Ash generate the ID)
|
||||
user = create_user(%{name: "Test User", hash: "test_hash_#{:rand.uniform(1_000_000)}"})
|
||||
|
||||
# Create test character with location tracking scopes
|
||||
character = create_character(%{
|
||||
eve_id: "#{@test_character_eve_id}",
|
||||
name: "Test Character",
|
||||
user_id: user.id,
|
||||
scopes: "esi-location.read_location.v1 esi-location.read_ship_type.v1",
|
||||
tracking_pool: "default"
|
||||
})
|
||||
|
||||
# Create test map
|
||||
map = create_map(%{
|
||||
id: @test_map_id,
|
||||
name: "Test Char Track",
|
||||
slug: "test-char-tracking-#{:rand.uniform(1_000_000)}",
|
||||
owner_id: character.id,
|
||||
scope: :none,
|
||||
only_tracked_characters: false
|
||||
})
|
||||
|
||||
on_exit(fn ->
|
||||
cleanup_test_data()
|
||||
end)
|
||||
|
||||
{:ok, user: user, character: character, map: map}
|
||||
end
|
||||
|
||||
defp cleanup_test_data do
|
||||
# Note: We can't clean up character-specific caches in setup
|
||||
# because we don't have the character.id yet. Tests will clean
|
||||
# up their own caches in on_exit if needed.
|
||||
|
||||
# Clean up map-level presence tracking
|
||||
WandererApp.Cache.delete("map_#{@test_map_id}:presence_character_ids")
|
||||
end
|
||||
|
||||
defp cleanup_character_caches(character_id) do
|
||||
# Clean up character location caches
|
||||
WandererApp.Cache.delete("map_#{@test_map_id}:character:#{character_id}:solar_system_id")
|
||||
WandererApp.Cache.delete("map_#{@test_map_id}:character:#{character_id}:start_solar_system_id")
|
||||
WandererApp.Cache.delete("map_#{@test_map_id}:character:#{character_id}:station_id")
|
||||
WandererApp.Cache.delete("map_#{@test_map_id}:character:#{character_id}:structure_id")
|
||||
|
||||
# Clean up character cache
|
||||
if Cachex.exists?(:character_cache, character_id) do
|
||||
Cachex.del(:character_cache, character_id)
|
||||
end
|
||||
|
||||
# Clean up character state cache
|
||||
if Cachex.exists?(:character_state_cache, character_id) do
|
||||
Cachex.del(:character_state_cache, character_id)
|
||||
end
|
||||
end
|
||||
|
||||
defp set_character_location(character_id, solar_system_id, opts \\ []) do
|
||||
"""
|
||||
Helper to simulate character location update in cache.
|
||||
This mimics what the Character.Tracker does when it polls ESI.
|
||||
"""
|
||||
structure_id = opts[:structure_id]
|
||||
station_id = opts[:station_id]
|
||||
ship_type_id = opts[:ship_type_id] || 670 # Capsule
|
||||
|
||||
# First get the existing character from cache or database to maintain all fields
|
||||
{:ok, existing_character} = WandererApp.Character.get_character(character_id)
|
||||
|
||||
# Update character cache (mimics Character.update_character/2)
|
||||
character_data = Map.merge(existing_character, %{
|
||||
solar_system_id: solar_system_id,
|
||||
structure_id: structure_id,
|
||||
station_id: station_id,
|
||||
ship_type_id: ship_type_id,
|
||||
updated_at: DateTime.utc_now()
|
||||
})
|
||||
|
||||
Cachex.put(:character_cache, character_id, character_data)
|
||||
end
|
||||
|
||||
defp ensure_map_started(map_id) do
|
||||
"""
|
||||
Ensure the map server is started for the given map.
|
||||
This is required for character updates to work.
|
||||
"""
|
||||
case WandererApp.Map.Manager.start_map(map_id) do
|
||||
{:ok, _pid} -> :ok
|
||||
{:error, {:already_started, _pid}} -> :ok
|
||||
other -> other
|
||||
end
|
||||
end
|
||||
|
||||
defp add_character_to_map_presence(map_id, character_id) do
|
||||
"""
|
||||
Helper to add character to map's presence list.
|
||||
This mimics what PresenceGracePeriodManager does.
|
||||
"""
|
||||
{:ok, current_chars} = WandererApp.Cache.lookup("map_#{map_id}:presence_character_ids", [])
|
||||
updated_chars = Enum.uniq([character_id | current_chars])
|
||||
WandererApp.Cache.insert("map_#{map_id}:presence_character_ids", updated_chars)
|
||||
end
|
||||
|
||||
defp get_map_systems(map_id) do
|
||||
"""
|
||||
Helper to get all systems currently on the map.
|
||||
"""
|
||||
case WandererApp.Map.get_map_state(map_id) do
|
||||
{:ok, %{map: %{systems: systems}}} when is_map(systems) ->
|
||||
Map.values(systems)
|
||||
|
||||
{:ok, _} ->
|
||||
[]
|
||||
end
|
||||
end
|
||||
|
||||
defp system_on_map?(map_id, solar_system_id) do
|
||||
"""
|
||||
Check if a specific system is on the map.
|
||||
"""
|
||||
systems = get_map_systems(map_id)
|
||||
Enum.any?(systems, fn sys -> sys.solar_system_id == solar_system_id end)
|
||||
end
|
||||
|
||||
defp wait_for_system_on_map(map_id, solar_system_id, timeout \\ 2000) do
|
||||
"""
|
||||
Wait for a system to appear on the map (for async operations).
|
||||
"""
|
||||
deadline = System.monotonic_time(:millisecond) + timeout
|
||||
|
||||
Stream.repeatedly(fn ->
|
||||
if system_on_map?(map_id, solar_system_id) do
|
||||
{:ok, true}
|
||||
else
|
||||
if System.monotonic_time(:millisecond) < deadline do
|
||||
Process.sleep(50)
|
||||
:continue
|
||||
else
|
||||
{:error, :timeout}
|
||||
end
|
||||
end
|
||||
end)
|
||||
|> Enum.find(fn result -> result != :continue end)
|
||||
|> case do
|
||||
{:ok, true} -> true
|
||||
{:error, :timeout} -> false
|
||||
end
|
||||
end
|
||||
|
||||
describe "Basic character location tracking" do
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "character location update adds system to map", %{map: map, character: character} do
|
||||
# This test verifies the basic flow:
|
||||
# 1. Character starts tracking on a map
|
||||
# 2. Character location is updated in cache
|
||||
# 3. update_characters() is called
|
||||
# 4. System is added to the map
|
||||
|
||||
# Setup: Ensure map is started
|
||||
ensure_map_started(map.id)
|
||||
|
||||
# Setup: Add character to presence
|
||||
add_character_to_map_presence(map.id, character.id)
|
||||
|
||||
# Setup: Set character location
|
||||
set_character_location(character.id, @system_jita)
|
||||
|
||||
# Setup: Set start_solar_system_id (this happens when tracking starts)
|
||||
WandererApp.Cache.insert(
|
||||
"map_#{map.id}:character:#{character.id}:start_solar_system_id",
|
||||
@system_jita
|
||||
)
|
||||
|
||||
# Execute: Run character update
|
||||
CharactersImpl.update_characters(map.id)
|
||||
|
||||
# Verify: Jita should be added to the map
|
||||
assert wait_for_system_on_map(map.id, @system_jita),
|
||||
"Jita should have been added to map when character tracking started"
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "character movement from A to B adds both systems", %{map: map, character: character} do
|
||||
# This test verifies:
|
||||
# 1. Character starts at system A
|
||||
# 2. Character moves to system B
|
||||
# 3. update_characters() processes the change
|
||||
# 4. Both systems are on the map
|
||||
|
||||
# Setup: Ensure map is started
|
||||
ensure_map_started(map.id)
|
||||
|
||||
# Setup: Add character to presence
|
||||
add_character_to_map_presence(map.id, character.id)
|
||||
|
||||
# Setup: Character starts at Jita
|
||||
set_character_location(character.id, @system_jita)
|
||||
WandererApp.Cache.insert(
|
||||
"map_#{map.id}:character:#{character.id}:start_solar_system_id",
|
||||
@system_jita
|
||||
)
|
||||
|
||||
# First update - adds Jita
|
||||
CharactersImpl.update_characters(map.id)
|
||||
assert wait_for_system_on_map(map.id, @system_jita), "Jita should be on map initially"
|
||||
|
||||
# Character moves to Amarr
|
||||
set_character_location(character.id, @system_amarr)
|
||||
|
||||
# Second update - should add Amarr
|
||||
CharactersImpl.update_characters(map.id)
|
||||
|
||||
# Verify: Both systems should be on map
|
||||
assert wait_for_system_on_map(map.id, @system_jita), "Jita should still be on map"
|
||||
assert wait_for_system_on_map(map.id, @system_amarr), "Amarr should have been added to map"
|
||||
end
|
||||
end
|
||||
|
||||
describe "Rapid character movement (Race Condition Tests)" do
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "rapid movement A→B→C adds all three systems", %{map: map, character: character} do
|
||||
# This test verifies the critical race condition fix:
|
||||
# When a character moves rapidly through multiple systems,
|
||||
# all systems should be added to the map, not just the start and end.
|
||||
|
||||
ensure_map_started(map.id)
|
||||
add_character_to_map_presence(map.id, character.id)
|
||||
|
||||
# Character starts at Jita
|
||||
set_character_location(character.id, @system_jita)
|
||||
WandererApp.Cache.insert(
|
||||
"map_#{map.id}:character:#{character.id}:start_solar_system_id",
|
||||
@system_jita
|
||||
)
|
||||
|
||||
CharactersImpl.update_characters(map.id)
|
||||
assert wait_for_system_on_map(map.id, @system_jita)
|
||||
|
||||
# Rapid jump to Amarr (intermediate system)
|
||||
set_character_location(character.id, @system_amarr)
|
||||
|
||||
# Before update_characters can process, character jumps again to Dodixie
|
||||
# This simulates the race condition
|
||||
CharactersImpl.update_characters(map.id) # Should process Jita→Amarr
|
||||
|
||||
# Character already at Dodixie before second update
|
||||
set_character_location(character.id, @system_dodixie)
|
||||
|
||||
CharactersImpl.update_characters(map.id) # Should process Amarr→Dodixie
|
||||
|
||||
# Verify: All three systems should be on map
|
||||
assert wait_for_system_on_map(map.id, @system_jita), "Jita (start) should be on map"
|
||||
assert wait_for_system_on_map(map.id, @system_amarr), "Amarr (intermediate) should be on map - this is the critical test"
|
||||
assert wait_for_system_on_map(map.id, @system_dodixie), "Dodixie (end) should be on map"
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "concurrent location updates don't lose intermediate systems", %{
|
||||
map: map,
|
||||
character: character
|
||||
} do
|
||||
# This test verifies that concurrent updates to character location
|
||||
# don't cause intermediate systems to be lost due to cache races.
|
||||
|
||||
ensure_map_started(map.id)
|
||||
add_character_to_map_presence(map.id, character.id)
|
||||
|
||||
# Start at Jita
|
||||
set_character_location(character.id, @system_jita)
|
||||
WandererApp.Cache.insert(
|
||||
"map_#{map.id}:character:#{character.id}:start_solar_system_id",
|
||||
@system_jita
|
||||
)
|
||||
|
||||
CharactersImpl.update_characters(map.id)
|
||||
|
||||
# Simulate rapid updates happening faster than update_characters cycle (1 second)
|
||||
# Jump through 4 systems in quick succession
|
||||
systems = [@system_amarr, @system_dodixie, @system_rens, @system_jita]
|
||||
|
||||
for system <- systems do
|
||||
set_character_location(character.id, system)
|
||||
# Small delay to allow cache to settle
|
||||
Process.sleep(10)
|
||||
CharactersImpl.update_characters(map.id)
|
||||
Process.sleep(10)
|
||||
end
|
||||
|
||||
# Verify: All systems should eventually be on the map
|
||||
# Even if some updates happened concurrently
|
||||
for system <- [@system_jita | systems] do
|
||||
assert wait_for_system_on_map(map.id, system),
|
||||
"System #{system} should be on map despite rapid movements"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe "start_solar_system_id persistence" do
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "start_solar_system_id persists through multiple updates", %{
|
||||
map: map,
|
||||
character: character
|
||||
} do
|
||||
# This test verifies the fix for the one-time flag bug:
|
||||
# start_solar_system_id should not be lost after first use
|
||||
|
||||
ensure_map_started(map.id)
|
||||
add_character_to_map_presence(map.id, character.id)
|
||||
|
||||
# Set character at Jita
|
||||
set_character_location(character.id, @system_jita)
|
||||
|
||||
# Set start_solar_system_id
|
||||
WandererApp.Cache.insert(
|
||||
"map_#{map.id}:character:#{character.id}:start_solar_system_id",
|
||||
@system_jita
|
||||
)
|
||||
|
||||
# First update
|
||||
CharactersImpl.update_characters(map.id)
|
||||
|
||||
# Verify start_solar_system_id still exists after first update
|
||||
{:ok, start_system} =
|
||||
WandererApp.Cache.lookup(
|
||||
"map_#{map.id}:character:#{character.id}:start_solar_system_id"
|
||||
)
|
||||
|
||||
assert start_system == @system_jita,
|
||||
"start_solar_system_id should persist after first update (not be taken/removed)"
|
||||
|
||||
# Character moves to Amarr
|
||||
set_character_location(character.id, @system_amarr)
|
||||
|
||||
# Second update
|
||||
CharactersImpl.update_characters(map.id)
|
||||
|
||||
# Verify both systems are on map
|
||||
assert wait_for_system_on_map(map.id, @system_jita)
|
||||
assert wait_for_system_on_map(map.id, @system_amarr)
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "first system addition uses correct logic when start_solar_system_id exists", %{
|
||||
map: map,
|
||||
character: character
|
||||
} do
|
||||
# This test verifies that the first system addition logic
|
||||
# works correctly with start_solar_system_id
|
||||
|
||||
ensure_map_started(map.id)
|
||||
add_character_to_map_presence(map.id, character.id)
|
||||
|
||||
# Character is at Jita, no previous location
|
||||
set_character_location(character.id, @system_jita)
|
||||
|
||||
# Set start_solar_system_id
|
||||
WandererApp.Cache.insert(
|
||||
"map_#{map.id}:character:#{character.id}:start_solar_system_id",
|
||||
@system_jita
|
||||
)
|
||||
|
||||
# No old location in map cache (first time tracking)
|
||||
# This triggers the special first-system-addition logic
|
||||
|
||||
CharactersImpl.update_characters(map.id)
|
||||
|
||||
# Verify Jita is added
|
||||
assert wait_for_system_on_map(map.id, @system_jita),
|
||||
"First system should be added when character starts tracking"
|
||||
end
|
||||
end
|
||||
|
||||
describe "Database failure handling" do
|
||||
@tag :integration
|
||||
test "database failure during system creation is logged and retried", %{
|
||||
map: map,
|
||||
character: character
|
||||
} do
|
||||
# This test verifies that database failures don't silently succeed
|
||||
# and are properly retried
|
||||
|
||||
# NOTE: This test would need to mock the database to simulate failures
|
||||
# For now, we document the expected behavior
|
||||
|
||||
# Expected behavior:
|
||||
# 1. maybe_add_system encounters DB error
|
||||
# 2. Error is logged with context
|
||||
# 3. Operation is retried (3 attempts with backoff)
|
||||
# 4. If all retries fail, error tuple is returned (not :ok)
|
||||
# 5. Telemetry event is emitted for the failure
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :integration
|
||||
test "transient database errors succeed on retry", %{map: map, character: character} do
|
||||
# This test verifies retry logic for transient failures
|
||||
|
||||
# Expected behavior:
|
||||
# 1. First attempt fails with transient error (timeout, connection, etc.)
|
||||
# 2. Retry succeeds
|
||||
# 3. System is added successfully
|
||||
# 4. Telemetry emitted for both failure and success
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :integration
|
||||
test "permanent database errors don't break update_characters for other characters", %{
|
||||
map: map,
|
||||
character: character
|
||||
} do
|
||||
# This test verifies that a failure for one character
|
||||
# doesn't prevent processing other characters
|
||||
|
||||
# Expected behavior:
|
||||
# 1. Multiple characters being tracked
|
||||
# 2. One character's update fails permanently
|
||||
# 3. Other characters' updates succeed
|
||||
# 4. Error is logged with character context
|
||||
# 5. update_characters completes for all characters
|
||||
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
describe "Task timeout handling" do
|
||||
@tag :integration
|
||||
@tag :slow
|
||||
test "character update timeout doesn't lose state permanently", %{
|
||||
map: map,
|
||||
character: character
|
||||
} do
|
||||
# This test verifies that timeouts during update_characters
|
||||
# don't cause permanent state loss
|
||||
|
||||
# Expected behavior:
|
||||
# 1. Character update takes > 15 seconds (simulated slow DB)
|
||||
# 2. Task times out and is killed
|
||||
# 3. State is preserved in recovery ETS table
|
||||
# 4. Next update_characters cycle recovers and processes the update
|
||||
# 5. System is eventually added to map
|
||||
# 6. Telemetry emitted for timeout and recovery
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :integration
|
||||
test "multiple concurrent timeouts don't corrupt cache", %{map: map, character: character} do
|
||||
# This test verifies that multiple simultaneous timeouts
|
||||
# don't cause cache corruption
|
||||
|
||||
# Expected behavior:
|
||||
# 1. Multiple characters timing out simultaneously
|
||||
# 2. Each timeout is handled independently
|
||||
# 3. No cache corruption or race conditions
|
||||
# 4. All characters eventually recover
|
||||
# 5. Telemetry tracks recovery health
|
||||
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
describe "Cache consistency" do
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "character cache and map cache stay in sync", %{map: map, character: character} do
|
||||
# This test verifies that the three character location caches
|
||||
# remain consistent through updates
|
||||
|
||||
# The three caches are:
|
||||
# 1. Cachex.get(:character_cache, character_id) - global character data
|
||||
# 2. WandererApp.Cache.lookup("map_#{map_id}:character:#{character_id}:solar_system_id") - map-specific location
|
||||
# 3. Cachex.get(:character_state_cache, character_id) - character state
|
||||
|
||||
ensure_map_started(map.id)
|
||||
add_character_to_map_presence(map.id, character.id)
|
||||
|
||||
# Set location in character cache
|
||||
set_character_location(character.id, @system_jita)
|
||||
WandererApp.Cache.insert(
|
||||
"map_#{map.id}:character:#{character.id}:start_solar_system_id",
|
||||
@system_jita
|
||||
)
|
||||
|
||||
CharactersImpl.update_characters(map.id)
|
||||
|
||||
# Verify map cache was updated
|
||||
{:ok, map_cached_location} =
|
||||
WandererApp.Cache.lookup("map_#{map.id}:character:#{character.id}:solar_system_id")
|
||||
|
||||
assert map_cached_location == @system_jita,
|
||||
"Map-specific cache should match character cache"
|
||||
|
||||
# Move character
|
||||
set_character_location(character.id, @system_amarr)
|
||||
CharactersImpl.update_characters(map.id)
|
||||
|
||||
# Verify both caches updated
|
||||
{:ok, character_data} = Cachex.get(:character_cache, character.id)
|
||||
{:ok, map_cached_location} =
|
||||
WandererApp.Cache.lookup("map_#{map.id}:character:#{character.id}:solar_system_id")
|
||||
|
||||
assert character_data.solar_system_id == @system_amarr
|
||||
assert map_cached_location == @system_amarr,
|
||||
"Both caches should be consistent after update"
|
||||
end
|
||||
end
|
||||
|
||||
describe "Telemetry and observability" do
|
||||
test "telemetry events are emitted for location updates", %{character: character} do
|
||||
# This test verifies that telemetry is emitted for tracking debugging
|
||||
|
||||
test_pid = self()
|
||||
|
||||
# Attach handlers for character location events
|
||||
:telemetry.attach_many(
|
||||
"test-character-location-events",
|
||||
[
|
||||
[:wanderer_app, :character, :location_update, :start],
|
||||
[:wanderer_app, :character, :location_update, :stop],
|
||||
[:wanderer_app, :map, :system_addition, :start],
|
||||
[:wanderer_app, :map, :system_addition, :stop]
|
||||
],
|
||||
fn event, measurements, metadata, _config ->
|
||||
send(test_pid, {:telemetry_event, event, measurements, metadata})
|
||||
end,
|
||||
nil
|
||||
)
|
||||
|
||||
# Simulate events (in real implementation, these would be in the actual code)
|
||||
:telemetry.execute(
|
||||
[:wanderer_app, :character, :location_update, :start],
|
||||
%{system_time: System.system_time()},
|
||||
%{character_id: character.id, map_id: @test_map_id}
|
||||
)
|
||||
|
||||
:telemetry.execute(
|
||||
[:wanderer_app, :character, :location_update, :stop],
|
||||
%{duration: 100, system_time: System.system_time()},
|
||||
%{
|
||||
character_id: character.id,
|
||||
map_id: @test_map_id,
|
||||
from_system: @system_jita,
|
||||
to_system: @system_amarr
|
||||
}
|
||||
)
|
||||
|
||||
# Verify events were received
|
||||
assert_receive {:telemetry_event, [:wanderer_app, :character, :location_update, :start], _,
|
||||
_},
|
||||
500
|
||||
|
||||
assert_receive {:telemetry_event, [:wanderer_app, :character, :location_update, :stop], _,
|
||||
_},
|
||||
500
|
||||
|
||||
:telemetry.detach("test-character-location-events")
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,463 @@
|
||||
defmodule WandererApp.Map.MapPoolCrashIntegrationTest do
|
||||
@moduledoc """
|
||||
Integration tests for MapPool crash recovery.
|
||||
|
||||
These tests verify end-to-end crash recovery behavior including:
|
||||
- MapPool GenServer crashes and restarts
|
||||
- State recovery from ETS
|
||||
- Registry and cache consistency after recovery
|
||||
- Telemetry events during recovery
|
||||
- Multi-pool scenarios
|
||||
|
||||
Note: Many tests are skipped as they require full map infrastructure
|
||||
(database, Server.Impl, map data, etc.) to be set up.
|
||||
"""
|
||||
|
||||
use ExUnit.Case, async: false
|
||||
|
||||
alias WandererApp.Map.{MapPool, MapPoolDynamicSupervisor, MapPoolState}
|
||||
|
||||
@cache :map_pool_cache
|
||||
@registry :map_pool_registry
|
||||
@unique_registry :unique_map_pool_registry
|
||||
@ets_table :map_pool_state_table
|
||||
|
||||
setup do
|
||||
# Clean up any existing test data
|
||||
cleanup_test_data()
|
||||
|
||||
# Check if required infrastructure is running
|
||||
supervisor_running? = Process.whereis(MapPoolDynamicSupervisor) != nil
|
||||
|
||||
ets_exists? =
|
||||
try do
|
||||
:ets.info(@ets_table) != :undefined
|
||||
rescue
|
||||
_ -> false
|
||||
end
|
||||
|
||||
on_exit(fn ->
|
||||
cleanup_test_data()
|
||||
end)
|
||||
|
||||
{:ok, supervisor_running: supervisor_running?, ets_exists: ets_exists?}
|
||||
end
|
||||
|
||||
defp cleanup_test_data do
|
||||
# Clean up test caches
|
||||
WandererApp.Cache.delete("started_maps")
|
||||
Cachex.clear(@cache)
|
||||
|
||||
# Clean up ETS entries
|
||||
if :ets.whereis(@ets_table) != :undefined do
|
||||
:ets.match_delete(@ets_table, {:"$1", :"$2", :"$3"})
|
||||
end
|
||||
end
|
||||
|
||||
defp find_pool_pid(uuid) do
|
||||
pool_name = Module.concat(MapPool, uuid)
|
||||
|
||||
case Registry.lookup(@unique_registry, pool_name) do
|
||||
[{pid, _value}] -> {:ok, pid}
|
||||
[] -> {:error, :not_found}
|
||||
end
|
||||
end
|
||||
|
||||
describe "End-to-end crash recovery" do
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "MapPool recovers all maps after abnormal crash" do
|
||||
# This test would:
|
||||
# 1. Start a MapPool with test maps via MapPoolDynamicSupervisor
|
||||
# 2. Verify maps are running and state is in ETS
|
||||
# 3. Simulate crash using GenServer.call(pool_pid, :error)
|
||||
# 4. Wait for supervisor to restart the pool
|
||||
# 5. Verify all maps are recovered
|
||||
# 6. Verify Registry, Cache, and ETS are consistent
|
||||
|
||||
# Requires:
|
||||
# - Test map data in database
|
||||
# - Server.Impl.start_map to work with test data
|
||||
# - Full supervision tree running
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "MapPool preserves ETS state on abnormal termination" do
|
||||
# This test would:
|
||||
# 1. Start a MapPool with maps
|
||||
# 2. Force crash
|
||||
# 3. Verify ETS state is preserved (not deleted)
|
||||
# 4. Verify new pool instance recovers from ETS
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "MapPool cleans ETS state on graceful shutdown" do
|
||||
# This test would:
|
||||
# 1. Start a MapPool with maps
|
||||
# 2. Gracefully stop the pool (GenServer.cast(pool_pid, :stop))
|
||||
# 3. Verify ETS state is deleted
|
||||
# 4. Verify new pool starts with empty state
|
||||
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
describe "Multi-pool crash scenarios" do
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "multiple pools crash and recover independently" do
|
||||
# This test would:
|
||||
# 1. Start multiple MapPool instances with different maps
|
||||
# 2. Crash one pool
|
||||
# 3. Verify only that pool recovers, others unaffected
|
||||
# 4. Verify no cross-pool state corruption
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "concurrent pool crashes don't corrupt recovery state" do
|
||||
# This test would:
|
||||
# 1. Start multiple pools
|
||||
# 2. Crash multiple pools simultaneously
|
||||
# 3. Verify all pools recover correctly
|
||||
# 4. Verify no ETS corruption or race conditions
|
||||
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
describe "State consistency after recovery" do
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "Registry state matches recovered state" do
|
||||
# This test would verify that after recovery:
|
||||
# - unique_registry has correct map_ids for pool UUID
|
||||
# - map_pool_registry has correct pool UUID entry
|
||||
# - All map_ids in Registry match ETS state
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "Cache state matches recovered state" do
|
||||
# This test would verify that after recovery:
|
||||
# - map_pool_cache has correct map_id -> uuid mappings
|
||||
# - started_maps cache includes all recovered maps
|
||||
# - No orphaned cache entries
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "Map servers are actually running after recovery" do
|
||||
# This test would:
|
||||
# 1. Recover maps from crash
|
||||
# 2. Verify each map's GenServer is actually running
|
||||
# 3. Verify maps respond to requests
|
||||
# 4. Verify map state is correct
|
||||
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
describe "Recovery failure handling" do
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "recovery continues when individual map fails to start" do
|
||||
# This test would:
|
||||
# 1. Save state with maps [1, 2, 3] to ETS
|
||||
# 2. Delete map 2 from database
|
||||
# 3. Trigger recovery
|
||||
# 4. Verify maps 1 and 3 recover successfully
|
||||
# 5. Verify map 2 failure is logged and telemetry emitted
|
||||
# 6. Verify pool continues with maps [1, 3]
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "recovery handles maps already running in different pool" do
|
||||
# This test would simulate a race condition where:
|
||||
# 1. Pool A crashes with map X
|
||||
# 2. Before recovery, map X is started in Pool B
|
||||
# 3. Pool A tries to recover map X
|
||||
# 4. Verify conflict is detected and handled gracefully
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "recovery handles corrupted ETS state" do
|
||||
# This test would:
|
||||
# 1. Manually corrupt ETS state (invalid map IDs, wrong types, etc.)
|
||||
# 2. Trigger recovery
|
||||
# 3. Verify pool handles corruption gracefully
|
||||
# 4. Verify telemetry emitted for failures
|
||||
# 5. Verify pool continues with valid maps only
|
||||
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
describe "Telemetry during recovery" do
|
||||
test "telemetry events emitted in correct order", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
test_pid = self()
|
||||
events = []
|
||||
|
||||
# Attach handlers for all recovery events
|
||||
:telemetry.attach_many(
|
||||
"test-recovery-events",
|
||||
[
|
||||
[:wanderer_app, :map_pool, :recovery, :start],
|
||||
[:wanderer_app, :map_pool, :recovery, :complete],
|
||||
[:wanderer_app, :map_pool, :recovery, :map_failed]
|
||||
],
|
||||
fn event, measurements, metadata, _config ->
|
||||
send(test_pid, {:telemetry_event, event, measurements, metadata})
|
||||
end,
|
||||
nil
|
||||
)
|
||||
|
||||
uuid = "test-pool-#{:rand.uniform(1_000_000)}"
|
||||
|
||||
# Simulate recovery sequence
|
||||
# 1. Start event
|
||||
:telemetry.execute(
|
||||
[:wanderer_app, :map_pool, :recovery, :start],
|
||||
%{recovered_map_count: 3, total_map_count: 3},
|
||||
%{pool_uuid: uuid}
|
||||
)
|
||||
|
||||
# 2. Complete event (in real recovery, this comes after all maps start)
|
||||
:telemetry.execute(
|
||||
[:wanderer_app, :map_pool, :recovery, :complete],
|
||||
%{recovered_count: 3, failed_count: 0, duration_ms: 100},
|
||||
%{pool_uuid: uuid}
|
||||
)
|
||||
|
||||
# Verify we received both events
|
||||
assert_receive {:telemetry_event, [:wanderer_app, :map_pool, :recovery, :start], _, _},
|
||||
500
|
||||
|
||||
assert_receive {:telemetry_event, [:wanderer_app, :map_pool, :recovery, :complete], _, _},
|
||||
500
|
||||
|
||||
:telemetry.detach("test-recovery-events")
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "telemetry includes accurate recovery statistics" do
|
||||
# This test would verify that:
|
||||
# - recovered_map_count matches actual recovered maps
|
||||
# - failed_count matches actual failed maps
|
||||
# - duration_ms is accurate
|
||||
# - All metadata is correct
|
||||
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
describe "Interaction with Reconciler" do
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "Reconciler doesn't interfere with crash recovery" do
|
||||
# This test would:
|
||||
# 1. Crash a pool with maps
|
||||
# 2. Trigger both recovery and reconciliation
|
||||
# 3. Verify they don't conflict
|
||||
# 4. Verify final state is consistent
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "Reconciler detects failed recovery" do
|
||||
# This test would:
|
||||
# 1. Crash a pool with map X
|
||||
# 2. Make recovery fail for map X
|
||||
# 3. Run reconciler
|
||||
# 4. Verify reconciler detects and potentially fixes the issue
|
||||
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
describe "Edge cases" do
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "recovery during pool at capacity" do
|
||||
# This test would:
|
||||
# 1. Create pool with 19 maps
|
||||
# 2. Crash pool while adding 20th map
|
||||
# 3. Verify recovery handles capacity limit
|
||||
# 4. Verify all maps start or overflow is handled
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "recovery with empty map list" do
|
||||
# This test would:
|
||||
# 1. Crash pool with empty map_ids
|
||||
# 2. Verify recovery completes successfully
|
||||
# 3. Verify pool starts with no maps
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "multiple crashes in quick succession" do
|
||||
# This test would:
|
||||
# 1. Crash pool
|
||||
# 2. Immediately crash again during recovery
|
||||
# 3. Verify supervisor's max_restarts is respected
|
||||
# 4. Verify state remains consistent
|
||||
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
describe "Performance under load" do
|
||||
@tag :slow
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "recovery completes within 2 seconds for 20 maps" do
|
||||
# This test would:
|
||||
# 1. Create pool with 20 maps (pool limit)
|
||||
# 2. Crash pool
|
||||
# 3. Measure time to full recovery
|
||||
# 4. Assert recovery < 2 seconds
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :slow
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "recovery doesn't block other pools" do
|
||||
# This test would:
|
||||
# 1. Start multiple pools
|
||||
# 2. Crash one pool with many maps
|
||||
# 3. Verify other pools continue to operate normally during recovery
|
||||
# 4. Measure performance impact on healthy pools
|
||||
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
describe "Supervisor interaction" do
|
||||
test "ETS table survives individual pool crash", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
# Verify ETS table is owned by supervisor, not individual pools
|
||||
table_info = :ets.info(@ets_table)
|
||||
owner_pid = Keyword.get(table_info, :owner)
|
||||
|
||||
# Owner should be alive and be the supervisor or a system process
|
||||
assert Process.alive?(owner_pid)
|
||||
|
||||
# Verify we can still access the table
|
||||
uuid = "test-pool-#{:rand.uniform(1_000_000)}"
|
||||
MapPoolState.save_pool_state(uuid, [1, 2, 3])
|
||||
assert {:ok, [1, 2, 3]} = MapPoolState.get_pool_state(uuid)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "supervisor restarts pool after crash" do
|
||||
# This test would:
|
||||
# 1. Start a pool via DynamicSupervisor
|
||||
# 2. Crash the pool
|
||||
# 3. Verify supervisor restarts it
|
||||
# 4. Verify new PID is different from old PID
|
||||
# 5. Verify pool is functional after restart
|
||||
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
describe "Database consistency" do
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "recovered maps load latest state from database" do
|
||||
# This test would:
|
||||
# 1. Start maps with initial state
|
||||
# 2. Modify map state in database
|
||||
# 3. Crash pool
|
||||
# 4. Verify recovered maps have latest database state
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "recovery uses MapState for map configuration" do
|
||||
# This test would:
|
||||
# 1. Verify recovery calls WandererApp.Map.get_map_state!/1
|
||||
# 2. Verify state comes from database MapState table
|
||||
# 3. Verify maps start with correct configuration
|
||||
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
describe "Real-world scenarios" do
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "recovery after OOM crash" do
|
||||
# This test would simulate recovery after out-of-memory crash:
|
||||
# 1. Start pool with maps
|
||||
# 2. Simulate OOM condition
|
||||
# 3. Verify recovery completes successfully
|
||||
# 4. Verify no memory leaks after recovery
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "recovery after network partition" do
|
||||
# This test would simulate recovery after network issues:
|
||||
# 1. Start maps with external dependencies
|
||||
# 2. Simulate network partition
|
||||
# 3. Crash pool
|
||||
# 4. Verify recovery handles network errors gracefully
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
@tag :integration
|
||||
test "recovery preserves user sessions" do
|
||||
# This test would:
|
||||
# 1. Start maps with active user sessions
|
||||
# 2. Crash pool
|
||||
# 3. Verify users can continue after recovery
|
||||
# 4. Verify presence tracking works after recovery
|
||||
|
||||
:ok
|
||||
end
|
||||
end
|
||||
end
|
||||
+49
-2
@@ -41,6 +41,7 @@ defmodule WandererApp.Test.Mocks do
|
||||
|
||||
# Set up default stubs for DDRT mock
|
||||
Test.DDRTMock
|
||||
|> Mox.stub(:init_tree, fn _tree_name, _opts -> :ok end)
|
||||
|> Mox.stub(:insert, fn _data, _tree_name -> :ok end)
|
||||
|> Mox.stub(:update, fn _id, _data, _tree_name -> :ok end)
|
||||
|> Mox.stub(:delete, fn _ids, _tree_name -> :ok end)
|
||||
@@ -71,10 +72,10 @@ defmodule WandererApp.Test.Mocks do
|
||||
sun_type_id: 45041
|
||||
}}
|
||||
|
||||
30_000_144 ->
|
||||
30_002_187 ->
|
||||
{:ok,
|
||||
%{
|
||||
solar_system_id: 30_000_144,
|
||||
solar_system_id: 30_002_187,
|
||||
region_id: 10_000_043,
|
||||
constellation_id: 20_000_304,
|
||||
solar_system_name: "Amarr",
|
||||
@@ -94,6 +95,52 @@ defmodule WandererApp.Test.Mocks do
|
||||
sun_type_id: 45041
|
||||
}}
|
||||
|
||||
30_002_659 ->
|
||||
{:ok,
|
||||
%{
|
||||
solar_system_id: 30_002_659,
|
||||
region_id: 10_000_032,
|
||||
constellation_id: 20_000_456,
|
||||
solar_system_name: "Dodixie",
|
||||
solar_system_name_lc: "dodixie",
|
||||
constellation_name: "Sinq Laison",
|
||||
region_name: "Sinq Laison",
|
||||
system_class: 0,
|
||||
security: "0.9",
|
||||
type_description: "High Security",
|
||||
class_title: "High Sec",
|
||||
is_shattered: false,
|
||||
effect_name: nil,
|
||||
effect_power: nil,
|
||||
statics: [],
|
||||
wandering: [],
|
||||
triglavian_invasion_status: nil,
|
||||
sun_type_id: 45041
|
||||
}}
|
||||
|
||||
30_002_510 ->
|
||||
{:ok,
|
||||
%{
|
||||
solar_system_id: 30_002_510,
|
||||
region_id: 10_000_030,
|
||||
constellation_id: 20_000_387,
|
||||
solar_system_name: "Rens",
|
||||
solar_system_name_lc: "rens",
|
||||
constellation_name: "Frarn",
|
||||
region_name: "Heimatar",
|
||||
system_class: 0,
|
||||
security: "0.9",
|
||||
type_description: "High Security",
|
||||
class_title: "High Sec",
|
||||
is_shattered: false,
|
||||
effect_name: nil,
|
||||
effect_power: nil,
|
||||
statics: [],
|
||||
wandering: [],
|
||||
triglavian_invasion_status: nil,
|
||||
sun_type_id: 45041
|
||||
}}
|
||||
|
||||
_ ->
|
||||
{:error, :not_found}
|
||||
end)
|
||||
|
||||
@@ -410,7 +410,7 @@ defmodule WandererApp.Map.CacheRTreeTest do
|
||||
# Check many positions for availability (simulating auto-positioning)
|
||||
test_positions = for x <- 0..20, y <- 0..20, do: {x * 100, y * 50}
|
||||
|
||||
for {x, y} do
|
||||
for {x, y} <- test_positions do
|
||||
box = [{x, x + 130}, {y, y + 34}]
|
||||
{:ok, _ids} = CacheRTree.query(box, name)
|
||||
# Not asserting anything, just verifying queries work
|
||||
|
||||
@@ -0,0 +1,561 @@
|
||||
defmodule WandererApp.Map.MapPoolCrashRecoveryTest do
|
||||
use ExUnit.Case, async: false
|
||||
|
||||
alias WandererApp.Map.{MapPool, MapPoolState}
|
||||
|
||||
@cache :map_pool_cache
|
||||
@registry :map_pool_registry
|
||||
@unique_registry :unique_map_pool_registry
|
||||
@ets_table :map_pool_state_table
|
||||
|
||||
setup do
|
||||
# Clean up any existing test data
|
||||
cleanup_test_data()
|
||||
|
||||
# Check if ETS table exists
|
||||
ets_exists? =
|
||||
try do
|
||||
:ets.info(@ets_table) != :undefined
|
||||
rescue
|
||||
_ -> false
|
||||
end
|
||||
|
||||
on_exit(fn ->
|
||||
cleanup_test_data()
|
||||
end)
|
||||
|
||||
{:ok, ets_exists: ets_exists?}
|
||||
end
|
||||
|
||||
defp cleanup_test_data do
|
||||
# Clean up test caches
|
||||
WandererApp.Cache.delete("started_maps")
|
||||
Cachex.clear(@cache)
|
||||
|
||||
# Clean up ETS entries for test pools
|
||||
if :ets.whereis(@ets_table) != :undefined do
|
||||
:ets.match_delete(@ets_table, {:"$1", :"$2", :"$3"})
|
||||
end
|
||||
end
|
||||
|
||||
defp create_test_pool_with_uuid(uuid, map_ids) do
|
||||
# Manually register in unique_registry
|
||||
{:ok, _} = Registry.register(@unique_registry, Module.concat(MapPool, uuid), map_ids)
|
||||
{:ok, _} = Registry.register(@registry, MapPool, uuid)
|
||||
|
||||
# Add to cache
|
||||
Enum.each(map_ids, fn map_id ->
|
||||
Cachex.put(@cache, map_id, uuid)
|
||||
end)
|
||||
|
||||
# Save to ETS
|
||||
MapPoolState.save_pool_state(uuid, map_ids)
|
||||
|
||||
uuid
|
||||
end
|
||||
|
||||
defp get_pool_map_ids(uuid) do
|
||||
case Registry.lookup(@unique_registry, Module.concat(MapPool, uuid)) do
|
||||
[{_pid, map_ids}] -> map_ids
|
||||
[] -> []
|
||||
end
|
||||
end
|
||||
|
||||
describe "MapPoolState - ETS operations" do
|
||||
test "save_pool_state stores state in ETS", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
uuid = "test-pool-#{:rand.uniform(1_000_000)}"
|
||||
map_ids = [1, 2, 3]
|
||||
|
||||
assert :ok = MapPoolState.save_pool_state(uuid, map_ids)
|
||||
|
||||
# Verify it's in ETS
|
||||
assert {:ok, ^map_ids} = MapPoolState.get_pool_state(uuid)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
test "get_pool_state returns not_found for non-existent pool", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
uuid = "non-existent-#{:rand.uniform(1_000_000)}"
|
||||
|
||||
assert {:error, :not_found} = MapPoolState.get_pool_state(uuid)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
test "delete_pool_state removes state from ETS", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
uuid = "test-pool-#{:rand.uniform(1_000_000)}"
|
||||
map_ids = [1, 2, 3]
|
||||
|
||||
MapPoolState.save_pool_state(uuid, map_ids)
|
||||
assert {:ok, ^map_ids} = MapPoolState.get_pool_state(uuid)
|
||||
|
||||
assert :ok = MapPoolState.delete_pool_state(uuid)
|
||||
assert {:error, :not_found} = MapPoolState.get_pool_state(uuid)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
test "save_pool_state updates existing state", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
uuid = "test-pool-#{:rand.uniform(1_000_000)}"
|
||||
|
||||
# Save initial state
|
||||
MapPoolState.save_pool_state(uuid, [1, 2])
|
||||
assert {:ok, [1, 2]} = MapPoolState.get_pool_state(uuid)
|
||||
|
||||
# Update state
|
||||
MapPoolState.save_pool_state(uuid, [1, 2, 3, 4])
|
||||
assert {:ok, [1, 2, 3, 4]} = MapPoolState.get_pool_state(uuid)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
test "list_all_states returns all pool states", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
# Clean first
|
||||
:ets.delete_all_objects(@ets_table)
|
||||
|
||||
uuid1 = "test-pool-1-#{:rand.uniform(1_000_000)}"
|
||||
uuid2 = "test-pool-2-#{:rand.uniform(1_000_000)}"
|
||||
|
||||
MapPoolState.save_pool_state(uuid1, [1, 2])
|
||||
MapPoolState.save_pool_state(uuid2, [3, 4])
|
||||
|
||||
states = MapPoolState.list_all_states()
|
||||
assert length(states) >= 2
|
||||
|
||||
# Verify our pools are in there
|
||||
uuids = Enum.map(states, fn {uuid, _map_ids, _timestamp} -> uuid end)
|
||||
assert uuid1 in uuids
|
||||
assert uuid2 in uuids
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
test "count_states returns correct count", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
# Clean first
|
||||
:ets.delete_all_objects(@ets_table)
|
||||
|
||||
uuid1 = "test-pool-1-#{:rand.uniform(1_000_000)}"
|
||||
uuid2 = "test-pool-2-#{:rand.uniform(1_000_000)}"
|
||||
|
||||
MapPoolState.save_pool_state(uuid1, [1, 2])
|
||||
MapPoolState.save_pool_state(uuid2, [3, 4])
|
||||
|
||||
count = MapPoolState.count_states()
|
||||
assert count >= 2
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe "MapPoolState - stale entry cleanup" do
|
||||
test "cleanup_stale_entries removes old entries", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
uuid = "stale-pool-#{:rand.uniform(1_000_000)}"
|
||||
|
||||
# Manually insert a stale entry (24+ hours old)
|
||||
stale_timestamp = System.system_time(:second) - 25 * 3600
|
||||
:ets.insert(@ets_table, {uuid, [1, 2], stale_timestamp})
|
||||
|
||||
assert {:ok, [1, 2]} = MapPoolState.get_pool_state(uuid)
|
||||
|
||||
# Clean up stale entries
|
||||
{:ok, deleted_count} = MapPoolState.cleanup_stale_entries()
|
||||
assert deleted_count >= 1
|
||||
|
||||
# Verify stale entry was removed
|
||||
assert {:error, :not_found} = MapPoolState.get_pool_state(uuid)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
test "cleanup_stale_entries preserves recent entries", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
uuid = "recent-pool-#{:rand.uniform(1_000_000)}"
|
||||
map_ids = [1, 2, 3]
|
||||
|
||||
# Save recent entry
|
||||
MapPoolState.save_pool_state(uuid, map_ids)
|
||||
|
||||
# Clean up
|
||||
MapPoolState.cleanup_stale_entries()
|
||||
|
||||
# Recent entry should still exist
|
||||
assert {:ok, ^map_ids} = MapPoolState.get_pool_state(uuid)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe "Crash recovery - basic scenarios" do
|
||||
@tag :skip
|
||||
test "MapPool recovers single map after crash" do
|
||||
# This test requires a full MapPool GenServer with actual map data
|
||||
# Skipping as it needs integration with Server.Impl.start_map
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
test "MapPool recovers multiple maps after crash" do
|
||||
# Similar to above - requires full integration
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
test "MapPool merges new and recovered map_ids" do
|
||||
# Tests that if pool crashes while starting a new map,
|
||||
# both the new map and recovered maps are started
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
describe "Crash recovery - telemetry" do
|
||||
test "recovery emits start telemetry event", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
test_pid = self()
|
||||
|
||||
# Attach telemetry handler
|
||||
:telemetry.attach(
|
||||
"test-recovery-start",
|
||||
[:wanderer_app, :map_pool, :recovery, :start],
|
||||
fn _event, measurements, metadata, _config ->
|
||||
send(test_pid, {:telemetry_start, measurements, metadata})
|
||||
end,
|
||||
nil
|
||||
)
|
||||
|
||||
uuid = "test-pool-#{:rand.uniform(1_000_000)}"
|
||||
recovered_maps = [1, 2, 3]
|
||||
|
||||
# Save state to ETS (simulating previous run)
|
||||
MapPoolState.save_pool_state(uuid, recovered_maps)
|
||||
|
||||
# Simulate init with recovery
|
||||
# Note: Can't actually start a MapPool here without full integration,
|
||||
# but we can verify the telemetry handler is set up correctly
|
||||
|
||||
# Manually emit the event to test handler
|
||||
:telemetry.execute(
|
||||
[:wanderer_app, :map_pool, :recovery, :start],
|
||||
%{recovered_map_count: 3, total_map_count: 3},
|
||||
%{pool_uuid: uuid}
|
||||
)
|
||||
|
||||
assert_receive {:telemetry_start, measurements, metadata}, 500
|
||||
|
||||
assert measurements.recovered_map_count == 3
|
||||
assert measurements.total_map_count == 3
|
||||
assert metadata.pool_uuid == uuid
|
||||
|
||||
# Cleanup
|
||||
:telemetry.detach("test-recovery-start")
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
test "recovery emits complete telemetry event", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
test_pid = self()
|
||||
|
||||
:telemetry.attach(
|
||||
"test-recovery-complete",
|
||||
[:wanderer_app, :map_pool, :recovery, :complete],
|
||||
fn _event, measurements, metadata, _config ->
|
||||
send(test_pid, {:telemetry_complete, measurements, metadata})
|
||||
end,
|
||||
nil
|
||||
)
|
||||
|
||||
uuid = "test-pool-#{:rand.uniform(1_000_000)}"
|
||||
|
||||
# Manually emit the event
|
||||
:telemetry.execute(
|
||||
[:wanderer_app, :map_pool, :recovery, :complete],
|
||||
%{recovered_count: 3, failed_count: 0, duration_ms: 100},
|
||||
%{pool_uuid: uuid}
|
||||
)
|
||||
|
||||
assert_receive {:telemetry_complete, measurements, metadata}, 500
|
||||
|
||||
assert measurements.recovered_count == 3
|
||||
assert measurements.failed_count == 0
|
||||
assert measurements.duration_ms == 100
|
||||
assert metadata.pool_uuid == uuid
|
||||
|
||||
:telemetry.detach("test-recovery-complete")
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
test "recovery emits map_failed telemetry event", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
test_pid = self()
|
||||
|
||||
:telemetry.attach(
|
||||
"test-recovery-map-failed",
|
||||
[:wanderer_app, :map_pool, :recovery, :map_failed],
|
||||
fn _event, measurements, metadata, _config ->
|
||||
send(test_pid, {:telemetry_map_failed, measurements, metadata})
|
||||
end,
|
||||
nil
|
||||
)
|
||||
|
||||
uuid = "test-pool-#{:rand.uniform(1_000_000)}"
|
||||
failed_map_id = 123
|
||||
|
||||
# Manually emit the event
|
||||
:telemetry.execute(
|
||||
[:wanderer_app, :map_pool, :recovery, :map_failed],
|
||||
%{map_id: failed_map_id},
|
||||
%{pool_uuid: uuid, reason: "Map not found"}
|
||||
)
|
||||
|
||||
assert_receive {:telemetry_map_failed, measurements, metadata}, 500
|
||||
|
||||
assert measurements.map_id == failed_map_id
|
||||
assert metadata.pool_uuid == uuid
|
||||
assert metadata.reason == "Map not found"
|
||||
|
||||
:telemetry.detach("test-recovery-map-failed")
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe "Crash recovery - state persistence" do
|
||||
@tag :skip
|
||||
test "state persisted after successful map start" do
|
||||
# Would need to start actual MapPool and trigger start_map
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
test "state persisted after successful map stop" do
|
||||
# Would need to start actual MapPool and trigger stop_map
|
||||
:ok
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
test "state persisted during backup_state" do
|
||||
# Would need to trigger backup_state handler
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
describe "Graceful shutdown cleanup" do
|
||||
test "ETS state cleaned on normal termination", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
uuid = "test-pool-#{:rand.uniform(1_000_000)}"
|
||||
map_ids = [1, 2, 3]
|
||||
|
||||
# Save state
|
||||
MapPoolState.save_pool_state(uuid, map_ids)
|
||||
assert {:ok, ^map_ids} = MapPoolState.get_pool_state(uuid)
|
||||
|
||||
# Simulate graceful shutdown by calling delete
|
||||
MapPoolState.delete_pool_state(uuid)
|
||||
|
||||
# State should be gone
|
||||
assert {:error, :not_found} = MapPoolState.get_pool_state(uuid)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@tag :skip
|
||||
test "ETS state preserved on abnormal termination" do
|
||||
# Would need to actually crash a MapPool to test this
|
||||
# The terminate callback would not call delete_pool_state
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
describe "Edge cases" do
|
||||
test "recovery with empty map_ids list", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
uuid = "test-pool-#{:rand.uniform(1_000_000)}"
|
||||
|
||||
# Save empty state
|
||||
MapPoolState.save_pool_state(uuid, [])
|
||||
assert {:ok, []} = MapPoolState.get_pool_state(uuid)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
test "recovery with duplicate map_ids gets deduplicated", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
# This tests the deduplication logic in init
|
||||
# If we have [1, 2] in ETS and [2, 3] in new map_ids,
|
||||
# result should be [1, 2, 3] after Enum.uniq
|
||||
|
||||
recovered_maps = [1, 2]
|
||||
new_maps = [2, 3]
|
||||
expected = Enum.uniq(recovered_maps ++ new_maps)
|
||||
|
||||
# Should be [1, 2, 3] or [2, 3, 1] depending on order
|
||||
assert 1 in expected
|
||||
assert 2 in expected
|
||||
assert 3 in expected
|
||||
assert length(expected) == 3
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
test "large number of maps in recovery", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
uuid = "test-pool-#{:rand.uniform(1_000_000)}"
|
||||
# Test with 20 maps (the pool limit)
|
||||
map_ids = Enum.to_list(1..20)
|
||||
|
||||
MapPoolState.save_pool_state(uuid, map_ids)
|
||||
assert {:ok, recovered} = MapPoolState.get_pool_state(uuid)
|
||||
assert length(recovered) == 20
|
||||
assert recovered == map_ids
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe "Concurrent operations" do
|
||||
test "multiple pools can save state concurrently", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
# Create 10 pools concurrently
|
||||
tasks =
|
||||
1..10
|
||||
|> Enum.map(fn i ->
|
||||
Task.async(fn ->
|
||||
uuid = "concurrent-pool-#{i}-#{:rand.uniform(1_000_000)}"
|
||||
map_ids = [i * 10, i * 10 + 1]
|
||||
MapPoolState.save_pool_state(uuid, map_ids)
|
||||
{uuid, map_ids}
|
||||
end)
|
||||
end)
|
||||
|
||||
results = Task.await_many(tasks, 5000)
|
||||
|
||||
# Verify all pools saved successfully
|
||||
Enum.each(results, fn {uuid, expected_map_ids} ->
|
||||
assert {:ok, ^expected_map_ids} = MapPoolState.get_pool_state(uuid)
|
||||
end)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
test "concurrent reads and writes don't corrupt state", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
uuid = "test-pool-#{:rand.uniform(1_000_000)}"
|
||||
MapPoolState.save_pool_state(uuid, [1, 2, 3])
|
||||
|
||||
# Spawn multiple readers and writers
|
||||
readers =
|
||||
1..5
|
||||
|> Enum.map(fn _ ->
|
||||
Task.async(fn ->
|
||||
MapPoolState.get_pool_state(uuid)
|
||||
end)
|
||||
end)
|
||||
|
||||
writers =
|
||||
1..5
|
||||
|> Enum.map(fn i ->
|
||||
Task.async(fn ->
|
||||
MapPoolState.save_pool_state(uuid, [i, i + 1])
|
||||
end)
|
||||
end)
|
||||
|
||||
# All operations should complete without error
|
||||
reader_results = Task.await_many(readers, 5000)
|
||||
writer_results = Task.await_many(writers, 5000)
|
||||
|
||||
assert Enum.all?(reader_results, fn
|
||||
{:ok, _} -> true
|
||||
_ -> false
|
||||
end)
|
||||
|
||||
assert Enum.all?(writer_results, fn :ok -> true end)
|
||||
|
||||
# Final state should be valid (one of the writer's values)
|
||||
assert {:ok, final_state} = MapPoolState.get_pool_state(uuid)
|
||||
assert is_list(final_state)
|
||||
assert length(final_state) == 2
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe "Performance" do
|
||||
@tag :slow
|
||||
test "recovery completes within acceptable time", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
uuid = "perf-pool-#{:rand.uniform(1_000_000)}"
|
||||
# Test with pool at limit (20 maps)
|
||||
map_ids = Enum.to_list(1..20)
|
||||
|
||||
# Measure save time
|
||||
{save_time_us, :ok} = :timer.tc(fn ->
|
||||
MapPoolState.save_pool_state(uuid, map_ids)
|
||||
end)
|
||||
|
||||
# Measure retrieval time
|
||||
{get_time_us, {:ok, _}} = :timer.tc(fn ->
|
||||
MapPoolState.get_pool_state(uuid)
|
||||
end)
|
||||
|
||||
# Both operations should be very fast (< 1ms)
|
||||
assert save_time_us < 1000, "Save took #{save_time_us}µs, expected < 1000µs"
|
||||
assert get_time_us < 1000, "Get took #{get_time_us}µs, expected < 1000µs"
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@tag :slow
|
||||
test "cleanup performance with many stale entries", %{ets_exists: ets_exists?} do
|
||||
if ets_exists? do
|
||||
# Insert 100 stale entries
|
||||
stale_timestamp = System.system_time(:second) - 25 * 3600
|
||||
|
||||
1..100
|
||||
|> Enum.each(fn i ->
|
||||
uuid = "stale-pool-#{i}"
|
||||
:ets.insert(@ets_table, {uuid, [i], stale_timestamp})
|
||||
end)
|
||||
|
||||
# Measure cleanup time
|
||||
{cleanup_time_us, {:ok, deleted_count}} = :timer.tc(fn ->
|
||||
MapPoolState.cleanup_stale_entries()
|
||||
end)
|
||||
|
||||
# Should have deleted at least 100 entries
|
||||
assert deleted_count >= 100
|
||||
|
||||
# Cleanup should be reasonably fast (< 100ms for 100 entries)
|
||||
assert cleanup_time_us < 100_000,
|
||||
"Cleanup took #{cleanup_time_us}µs, expected < 100,000µs"
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,320 @@
|
||||
defmodule WandererApp.Map.SlugUniquenessTest do
|
||||
@moduledoc """
|
||||
Tests for map slug uniqueness constraints and handling.
|
||||
|
||||
These tests verify that:
|
||||
1. Database unique constraint is enforced
|
||||
2. Application-level slug generation handles uniqueness
|
||||
3. Concurrent map creation doesn't create duplicates
|
||||
4. Error handling works correctly for slug conflicts
|
||||
"""
|
||||
use WandererApp.DataCase, async: false
|
||||
|
||||
alias WandererApp.Api.Map
|
||||
|
||||
require Logger
|
||||
|
||||
describe "slug uniqueness constraint" do
|
||||
setup do
|
||||
# Create a test user
|
||||
user = create_test_user()
|
||||
%{user: user}
|
||||
end
|
||||
|
||||
test "prevents duplicate slugs via database constraint", %{user: user} do
|
||||
# Create first map with a specific slug
|
||||
{:ok, _map1} =
|
||||
Map.new(%{
|
||||
name: "Test Map",
|
||||
slug: "test-map",
|
||||
owner_id: user.id,
|
||||
description: "First map",
|
||||
scope: "wormholes"
|
||||
})
|
||||
|
||||
# Attempt to create second map with same slug by bypassing Ash slug generation
|
||||
# This simulates a race condition where slug generation passes but DB insert fails
|
||||
result =
|
||||
Map.new(%{
|
||||
name: "Different Name",
|
||||
slug: "test-map",
|
||||
owner_id: user.id,
|
||||
description: "Second map",
|
||||
scope: "wormholes"
|
||||
})
|
||||
|
||||
# Should get a unique constraint error from database
|
||||
assert {:error, _error} = result
|
||||
end
|
||||
|
||||
test "automatically increments slug when duplicate detected", %{user: user} do
|
||||
# Create first map
|
||||
{:ok, map1} =
|
||||
Map.new(%{
|
||||
name: "Test Map",
|
||||
slug: "test-map",
|
||||
owner_id: user.id,
|
||||
description: "First map",
|
||||
scope: "wormholes"
|
||||
})
|
||||
|
||||
assert map1.slug == "test-map"
|
||||
|
||||
# Create second map with same name (should auto-increment slug)
|
||||
{:ok, map2} =
|
||||
Map.new(%{
|
||||
name: "Test Map",
|
||||
slug: "test-map",
|
||||
owner_id: user.id,
|
||||
description: "Second map",
|
||||
scope: "wormholes"
|
||||
})
|
||||
|
||||
# Slug should be automatically incremented
|
||||
assert map2.slug == "test-map-2"
|
||||
|
||||
# Create third map with same name
|
||||
{:ok, map3} =
|
||||
Map.new(%{
|
||||
name: "Test Map",
|
||||
slug: "test-map",
|
||||
owner_id: user.id,
|
||||
description: "Third map",
|
||||
scope: "wormholes"
|
||||
})
|
||||
|
||||
assert map3.slug == "test-map-3"
|
||||
end
|
||||
|
||||
test "handles many maps with similar names", %{user: user} do
|
||||
# Create 10 maps with the same base slug
|
||||
maps =
|
||||
for i <- 1..10 do
|
||||
{:ok, map} =
|
||||
Map.new(%{
|
||||
name: "Popular Name",
|
||||
slug: "popular-name",
|
||||
owner_id: user.id,
|
||||
description: "Map #{i}",
|
||||
scope: "wormholes"
|
||||
})
|
||||
|
||||
map
|
||||
end
|
||||
|
||||
# Verify all slugs are unique
|
||||
slugs = Enum.map(maps, & &1.slug)
|
||||
assert length(Enum.uniq(slugs)) == 10
|
||||
|
||||
# First should keep the base slug
|
||||
assert List.first(maps).slug == "popular-name"
|
||||
|
||||
# Others should be numbered
|
||||
assert "popular-name-2" in slugs
|
||||
assert "popular-name-10" in slugs
|
||||
end
|
||||
end
|
||||
|
||||
describe "concurrent slug creation (race condition)" do
|
||||
setup do
|
||||
user = create_test_user()
|
||||
%{user: user}
|
||||
end
|
||||
|
||||
@tag :slow
|
||||
test "handles concurrent map creation with identical slugs", %{user: user} do
|
||||
# Create 5 concurrent map creation requests with the same slug
|
||||
tasks =
|
||||
for i <- 1..5 do
|
||||
Task.async(fn ->
|
||||
Map.new(%{
|
||||
name: "Concurrent Test",
|
||||
slug: "concurrent-test",
|
||||
owner_id: user.id,
|
||||
description: "Concurrent map #{i}",
|
||||
scope: "wormholes"
|
||||
})
|
||||
end)
|
||||
end
|
||||
|
||||
# Wait for all tasks to complete
|
||||
results = Task.await_many(tasks, 10_000)
|
||||
|
||||
# All should either succeed or fail gracefully (no crashes)
|
||||
assert length(results) == 5
|
||||
|
||||
# Get successful results
|
||||
successful = Enum.filter(results, &match?({:ok, _}, &1))
|
||||
failed = Enum.filter(results, &match?({:error, _}, &1))
|
||||
|
||||
# At least some should succeed
|
||||
assert length(successful) > 0
|
||||
|
||||
# Extract maps from successful results
|
||||
maps = Enum.map(successful, fn {:ok, map} -> map end)
|
||||
|
||||
# Verify all successful maps have unique slugs
|
||||
slugs = Enum.map(maps, & &1.slug)
|
||||
assert length(Enum.uniq(slugs)) == length(slugs), "All successful maps should have unique slugs"
|
||||
|
||||
# Log results for visibility
|
||||
Logger.info("Concurrent test: #{length(successful)} succeeded, #{length(failed)} failed")
|
||||
Logger.info("Unique slugs created: #{inspect(slugs)}")
|
||||
end
|
||||
|
||||
@tag :slow
|
||||
test "concurrent creation with different names creates different base slugs", %{user: user} do
|
||||
# Create concurrent requests with different names (should all succeed)
|
||||
tasks =
|
||||
for i <- 1..5 do
|
||||
Task.async(fn ->
|
||||
Map.new(%{
|
||||
name: "Concurrent Map #{i}",
|
||||
slug: "concurrent-map-#{i}",
|
||||
owner_id: user.id,
|
||||
description: "Map #{i}",
|
||||
scope: "wormholes"
|
||||
})
|
||||
end)
|
||||
end
|
||||
|
||||
results = Task.await_many(tasks, 10_000)
|
||||
|
||||
# All should succeed
|
||||
assert Enum.all?(results, &match?({:ok, _}, &1))
|
||||
|
||||
# All should have different slugs
|
||||
slugs = Enum.map(results, fn {:ok, map} -> map.slug end)
|
||||
assert length(Enum.uniq(slugs)) == 5
|
||||
end
|
||||
end
|
||||
|
||||
describe "slug generation edge cases" do
|
||||
setup do
|
||||
user = create_test_user()
|
||||
%{user: user}
|
||||
end
|
||||
|
||||
test "handles very long slugs", %{user: user} do
|
||||
# Create map with name that would generate very long slug
|
||||
long_name = String.duplicate("a", 100)
|
||||
|
||||
{:ok, map} =
|
||||
Map.new(%{
|
||||
name: long_name,
|
||||
slug: long_name,
|
||||
owner_id: user.id,
|
||||
description: "Long name test",
|
||||
scope: "wormholes"
|
||||
})
|
||||
|
||||
# Slug should be truncated to max length (40 chars based on map.ex constraints)
|
||||
assert String.length(map.slug) <= 40
|
||||
end
|
||||
|
||||
test "handles special characters in slugs", %{user: user} do
|
||||
# Test that special characters are properly slugified
|
||||
{:ok, map} =
|
||||
Map.new(%{
|
||||
name: "Test: Map & Name!",
|
||||
slug: "test-map-name",
|
||||
owner_id: user.id,
|
||||
description: "Special chars test",
|
||||
scope: "wormholes"
|
||||
})
|
||||
|
||||
# Slug should only contain allowed characters
|
||||
assert map.slug =~ ~r/^[a-z0-9-]+$/
|
||||
end
|
||||
end
|
||||
|
||||
describe "slug update operations" do
|
||||
setup do
|
||||
user = create_test_user()
|
||||
|
||||
{:ok, map} =
|
||||
Map.new(%{
|
||||
name: "Original Map",
|
||||
slug: "original-map",
|
||||
owner_id: user.id,
|
||||
description: "Original",
|
||||
scope: "wormholes"
|
||||
})
|
||||
|
||||
%{user: user, map: map}
|
||||
end
|
||||
|
||||
test "updating map with same slug succeeds", %{map: map} do
|
||||
# Update other fields, keep same slug
|
||||
result =
|
||||
Map.update(map, %{
|
||||
description: "Updated description",
|
||||
slug: "original-map"
|
||||
})
|
||||
|
||||
assert {:ok, updated_map} = result
|
||||
assert updated_map.slug == "original-map"
|
||||
assert updated_map.description == "Updated description"
|
||||
end
|
||||
|
||||
test "updating to conflicting slug is handled", %{user: user, map: map} do
|
||||
# Create another map
|
||||
{:ok, _other_map} =
|
||||
Map.new(%{
|
||||
name: "Other Map",
|
||||
slug: "other-map",
|
||||
owner_id: user.id,
|
||||
description: "Other",
|
||||
scope: "wormholes"
|
||||
})
|
||||
|
||||
# Try to update first map to use other map's slug
|
||||
result =
|
||||
Map.update(map, %{
|
||||
slug: "other-map"
|
||||
})
|
||||
|
||||
# Should either fail or auto-increment
|
||||
case result do
|
||||
{:ok, updated_map} ->
|
||||
# If successful, slug should be different
|
||||
assert updated_map.slug != "other-map"
|
||||
assert updated_map.slug =~ ~r/^other-map-\d+$/
|
||||
|
||||
{:error, _} ->
|
||||
# Or it can fail with validation error
|
||||
:ok
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe "get_map_by_slug with duplicates" do
|
||||
setup do
|
||||
user = create_test_user()
|
||||
%{user: user}
|
||||
end
|
||||
|
||||
test "get_map_by_slug! raises on duplicates if they exist" do
|
||||
# Note: This test documents the behavior when duplicates somehow exist
|
||||
# In production, this should be prevented by our fixes
|
||||
# If duplicates exist (data integrity issue), the query should fail
|
||||
|
||||
# This is a documentation test - we can't easily create duplicates
|
||||
# due to the database constraint, but we document expected behavior
|
||||
assert true
|
||||
end
|
||||
end
|
||||
|
||||
# Helper functions
|
||||
|
||||
defp create_test_user do
|
||||
# Create a test user with necessary attributes
|
||||
{:ok, user} =
|
||||
WandererApp.Api.User.new(%{
|
||||
name: "Test User #{:rand.uniform(10_000)}",
|
||||
eve_id: :rand.uniform(100_000_000)
|
||||
})
|
||||
|
||||
user
|
||||
end
|
||||
end
|
||||
Reference in New Issue
Block a user