From abd7e4e15c738fd5ef3edae2221bc218d49c35c2 Mon Sep 17 00:00:00 2001 From: Dmitry Popov Date: Tue, 25 Nov 2025 12:28:31 +0100 Subject: [PATCH] chore: fix tests issues --- lib/wanderer_app/map/map_pool.ex | 20 +- .../api/map_audit_api_controller_test.exs | 1 - ...p_system_structure_api_controller_test.exs | 2 +- ...connection_api_controller_success_test.exs | 4 - ...map_system_api_controller_success_test.exs | 6 - test/support/api_case.ex | 4 +- test/support/data_case.ex | 21 +- test/support/factory.ex | 45 +++ test/support/integration_conn_case.ex | 45 +-- test/support/mock_allowance.ex | 2 +- test/support/mocks.ex | 12 +- test/unit/auth_test.exs | 70 ++-- test/unit/map/map_pool_test.exs.bak | 359 ----------------- test/unit/map/slug_uniqueness_test.exs.bak | 361 ------------------ test/unit/map_duplication_service_test.exs | 3 +- 15 files changed, 151 insertions(+), 804 deletions(-) delete mode 100644 test/unit/map/map_pool_test.exs.bak delete mode 100644 test/unit/map/slug_uniqueness_test.exs.bak diff --git a/lib/wanderer_app/map/map_pool.ex b/lib/wanderer_app/map/map_pool.ex index 7b5cf2e7..285fc25f 100644 --- a/lib/wanderer_app/map/map_pool.ex +++ b/lib/wanderer_app/map/map_pool.ex @@ -18,10 +18,20 @@ defmodule WandererApp.Map.MapPool do @map_pool_limit 10 @garbage_collection_interval :timer.hours(4) - @systems_cleanup_timeout :timer.minutes(30) - @characters_cleanup_timeout :timer.minutes(5) - @connections_cleanup_timeout :timer.minutes(5) - @backup_state_timeout :timer.minutes(1) + # Use very long timeouts in test environment to prevent background tasks from running during tests + # This avoids database connection ownership errors when tests finish before async tasks complete + @systems_cleanup_timeout if Mix.env() == :test, + do: :timer.hours(24), + else: :timer.minutes(30) + @characters_cleanup_timeout if Mix.env() == :test, + do: :timer.hours(24), + else: :timer.minutes(5) + @connections_cleanup_timeout if Mix.env() == :test, + do: :timer.hours(24), + else: :timer.minutes(5) + @backup_state_timeout if Mix.env() == :test, + do: :timer.hours(24), + else: :timer.minutes(1) def new(), do: __struct__() def new(args), do: __struct__(args) @@ -187,7 +197,7 @@ defmodule WandererApp.Map.MapPool do # Schedule periodic tasks Process.send_after(self(), :backup_state, @backup_state_timeout) - Process.send_after(self(), :cleanup_systems, 15_000) + Process.send_after(self(), :cleanup_systems, @systems_cleanup_timeout) Process.send_after(self(), :cleanup_characters, @characters_cleanup_timeout) Process.send_after(self(), :cleanup_connections, @connections_cleanup_timeout) Process.send_after(self(), :garbage_collect, @garbage_collection_interval) diff --git a/test/integration/api/map_audit_api_controller_test.exs b/test/integration/api/map_audit_api_controller_test.exs index b072f4fb..6a3b6837 100644 --- a/test/integration/api/map_audit_api_controller_test.exs +++ b/test/integration/api/map_audit_api_controller_test.exs @@ -104,7 +104,6 @@ defmodule WandererAppWeb.MapAuditAPIControllerIntegrationTest do assert length(events) >= 0 end - @tag :skip test "supports different period values", %{conn: conn, map: map} do character = Factory.insert(:character, %{eve_id: "123456789"}) user = Factory.insert(:user) diff --git a/test/integration/api/map_system_structure_api_controller_test.exs b/test/integration/api/map_system_structure_api_controller_test.exs index 684c959d..0be6dd8b 100644 --- a/test/integration/api/map_system_structure_api_controller_test.exs +++ b/test/integration/api/map_system_structure_api_controller_test.exs @@ -1,5 +1,5 @@ defmodule WandererAppWeb.MapSystemStructureAPIControllerTest do - use WandererAppWeb.ApiCase + use WandererAppWeb.ApiCase, async: false alias WandererAppWeb.Factory diff --git a/test/integration/map_connection_api_controller_success_test.exs b/test/integration/map_connection_api_controller_success_test.exs index b464101b..0e952f51 100644 --- a/test/integration/map_connection_api_controller_success_test.exs +++ b/test/integration/map_connection_api_controller_success_test.exs @@ -239,7 +239,6 @@ defmodule WandererAppWeb.MapConnectionAPIControllerSuccessTest do {:ok, conn: conn, map: map, user: user, character: character} end - @tag :skip test "CREATE: fails with missing required parameters", %{conn: conn, map: map} do invalid_params = %{ "type" => 0 @@ -252,7 +251,6 @@ defmodule WandererAppWeb.MapConnectionAPIControllerSuccessTest do assert conn.status in [400, 422] end - @tag :skip test "UPDATE: fails for non-existent connection", %{conn: conn, map: map} do non_existent_id = Ecto.UUID.generate() @@ -267,7 +265,6 @@ defmodule WandererAppWeb.MapConnectionAPIControllerSuccessTest do assert conn.status in [404, 422, 500] end - @tag :skip test "DELETE: handles non-existent connection gracefully", %{conn: conn, map: map} do non_existent_id = Ecto.UUID.generate() @@ -277,7 +274,6 @@ defmodule WandererAppWeb.MapConnectionAPIControllerSuccessTest do assert conn.status in [200, 204, 404] end - @tag :skip test "READ: handles filtering with non-existent systems", %{conn: conn, map: map} do params = %{ "solar_system_source" => "99999999", diff --git a/test/integration/map_system_api_controller_success_test.exs b/test/integration/map_system_api_controller_success_test.exs index a251dee5..ed7c65aa 100644 --- a/test/integration/map_system_api_controller_success_test.exs +++ b/test/integration/map_system_api_controller_success_test.exs @@ -59,7 +59,6 @@ defmodule WandererAppWeb.MapSystemAPIControllerSuccessTest do {:ok, %{conn: conn, map: map, user: user, character: character}} end - @tag :skip test "READ: successfully retrieves systems for a map", %{conn: conn, map: map} do # Create some systems for the map system1 = @@ -108,7 +107,6 @@ defmodule WandererAppWeb.MapSystemAPIControllerSuccessTest do assert amarr["status"] == 0 end - @tag :skip test "CREATE: successfully creates a single system", %{conn: conn, map: map} do # Start the map server ensure_map_started(map.id) @@ -133,7 +131,6 @@ defmodule WandererAppWeb.MapSystemAPIControllerSuccessTest do assert created_count >= 1 end - @tag :skip test "UPDATE: successfully updates system position", %{conn: conn, map: map} do system = insert(:map_system, %{ @@ -165,7 +162,6 @@ defmodule WandererAppWeb.MapSystemAPIControllerSuccessTest do assert updated_system["position_y"] == 400.0 end - @tag :skip test "UPDATE: successfully updates custom_name", %{conn: conn, map: map} do system = insert(:map_system, %{ @@ -194,7 +190,6 @@ defmodule WandererAppWeb.MapSystemAPIControllerSuccessTest do assert updated_system["custom_name"] == "My Trade Hub" end - @tag :skip test "DELETE: successfully deletes a system", %{conn: conn, map: map} do system = insert(:map_system, %{ @@ -222,7 +217,6 @@ defmodule WandererAppWeb.MapSystemAPIControllerSuccessTest do end end - @tag :skip test "DELETE: successfully deletes multiple systems", %{conn: conn, map: map} do system1 = insert(:map_system, %{map_id: map.id, solar_system_id: 30_000_142}) system2 = insert(:map_system, %{map_id: map.id, solar_system_id: 30_000_144}) diff --git a/test/support/api_case.ex b/test/support/api_case.ex index 2588e4f5..47dc6f42 100644 --- a/test/support/api_case.ex +++ b/test/support/api_case.ex @@ -47,7 +47,9 @@ defmodule WandererAppWeb.ApiCase do end # Set up mocks for this test process - WandererApp.Test.Mocks.setup_test_mocks() + # Use global mode for integration tests so mocks work in spawned processes + mock_mode = if integration_test?, do: :global, else: :private + WandererApp.Test.Mocks.setup_test_mocks(mode: mock_mode) # Set up integration test environment if needed if integration_test? do diff --git a/test/support/data_case.ex b/test/support/data_case.ex index 934ee441..fa6bf59f 100644 --- a/test/support/data_case.ex +++ b/test/support/data_case.ex @@ -63,13 +63,22 @@ defmodule WandererApp.DataCase do # Use shared mode if requested or if running as a ConnCase test (to avoid ownership issues) # Otherwise use non-shared mode for proper test isolation shared = (tags[:shared] || tags[:conn_case] || not tags[:async]) and not tags[:async] - - pid = Ecto.Adapters.SQL.Sandbox.start_owner!(WandererApp.Repo, shared: shared) - on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end) - # Store the sandbox owner pid for allowing background processes + # Start the sandbox owner and link it to the test process + pid = Ecto.Adapters.SQL.Sandbox.start_owner!(WandererApp.Repo, shared: shared) + + # Store the sandbox owner pid BEFORE registering on_exit + # This ensures it's available for use in setup callbacks Process.put(:sandbox_owner_pid, pid) + # Register cleanup - this will be called last (LIFO order) + on_exit(fn -> + # Only stop if the owner is still alive + if Process.alive?(pid) do + Ecto.Adapters.SQL.Sandbox.stop_owner(pid) + end + end) + # Allow critical system processes to access the database allow_system_processes_database_access() @@ -112,7 +121,9 @@ defmodule WandererApp.DataCase do WandererApp.Server.TheraDataFetcher, WandererApp.ExternalEvents.MapEventRelay, WandererApp.ExternalEvents.WebhookDispatcher, - WandererApp.ExternalEvents.SseStreamManager + WandererApp.ExternalEvents.SseStreamManager, + # Task.Supervisor for Task.async_stream calls (e.g., from MapPool background tasks) + Task.Supervisor ] Enum.each(system_processes, fn process_name -> diff --git a/test/support/factory.ex b/test/support/factory.ex index 78a7cada..8100da1f 100644 --- a/test/support/factory.ex +++ b/test/support/factory.ex @@ -108,6 +108,10 @@ defmodule WandererAppWeb.Factory do create_map_transaction(map_id, attrs) end + def insert(:solar_system, attrs) do + create_solar_system(attrs) + end + def insert(resource_type, _attrs) do raise "Unknown factory resource type: #{resource_type}" end @@ -802,4 +806,45 @@ defmodule WandererAppWeb.Factory do {:ok, webhook} = Ash.create(Api.MapWebhookSubscription, attrs) webhook end + + @doc """ + Creates a test solar system (static EVE Online system data) with reasonable defaults. + """ + def build_solar_system(attrs \\ %{}) do + unique_id = System.unique_integer([:positive]) + solar_system_id = Map.get(attrs, :solar_system_id, 30_000_000 + rem(unique_id, 10_000)) + + default_attrs = %{ + solar_system_id: solar_system_id, + solar_system_name: "System #{solar_system_id}", + solar_system_name_lc: "system #{solar_system_id}", + region_id: 10_000_000 + rem(unique_id, 1000), + region_name: "Test Region", + constellation_id: 20_000_000 + rem(unique_id, 1000), + constellation_name: "Test Constellation", + security: "0.5", + system_class: 0, + type_description: "HS", + class_title: "High Sec" + } + + merged_attrs = Map.merge(default_attrs, attrs) + + # Automatically compute solar_system_name_lc from solar_system_name if not provided + if Map.has_key?(attrs, :solar_system_name) and not Map.has_key?(attrs, :solar_system_name_lc) do + Map.put(merged_attrs, :solar_system_name_lc, String.downcase(merged_attrs.solar_system_name)) + else + merged_attrs + end + end + + def create_solar_system(attrs \\ %{}) do + attrs = build_solar_system(attrs) + + # Use upsert to handle cases where the system might already exist + case Ash.create(Api.MapSolarSystem, attrs) do + {:ok, solar_system} -> solar_system + {:error, reason} -> raise "Failed to create solar system: #{inspect(reason)}" + end + end end diff --git a/test/support/integration_conn_case.ex b/test/support/integration_conn_case.ex index 9f05a0a6..cb05fea3 100644 --- a/test/support/integration_conn_case.ex +++ b/test/support/integration_conn_case.ex @@ -48,8 +48,9 @@ defmodule WandererAppWeb.IntegrationConnCase do setup tags do WandererAppWeb.IntegrationConnCase.setup_sandbox(tags) - # Set up mocks for this test process - WandererApp.Test.Mocks.setup_test_mocks() + # Set up mocks for this test process in global mode + # Integration tests spawn processes (MapPool, etc.) that need mock access + WandererApp.Test.Mocks.setup_test_mocks(mode: :global) # Set up integration test environment (including Map.Manager) WandererApp.Test.IntegrationConfig.setup_integration_environment() @@ -74,7 +75,7 @@ defmodule WandererAppWeb.IntegrationConnCase do - Uses shared: false for better isolation - Child processes require explicit allowance """ - def setup_sandbox(tags) do + def setup_sandbox(_tags) do # Ensure the repo is started before setting up sandbox unless Process.whereis(WandererApp.Repo) do {:ok, _} = WandererApp.Repo.start_link() @@ -85,26 +86,22 @@ defmodule WandererAppWeb.IntegrationConnCase do # - This requires tests to be synchronous (async: false) if they share the same case shared_mode = true - # Set up sandbox mode based on test type - pid = - if shared_mode do - # For async tests with shared mode: - # Checkout the sandbox connection instead of starting an owner - # This allows multiple async tests to use the same connection pool - :ok = Ecto.Adapters.SQL.Sandbox.checkout(WandererApp.Repo) - # Put the connection in shared mode - Ecto.Adapters.SQL.Sandbox.mode(WandererApp.Repo, {:shared, self()}) - self() - else - # For sync tests, start a dedicated owner - pid = Ecto.Adapters.SQL.Sandbox.start_owner!(WandererApp.Repo, shared: false) - on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end) - pid - end + # Set up sandbox mode - always use start_owner! for proper ownership setup + # This ensures that spawned processes (like Ash transactions) can access the database + pid = Ecto.Adapters.SQL.Sandbox.start_owner!(WandererApp.Repo, shared: shared_mode) - # Store the sandbox owner pid for allowing background processes + # Store the sandbox owner pid BEFORE registering on_exit + # This ensures it's available for use in setup callbacks Process.put(:sandbox_owner_pid, pid) + # Register cleanup - this will be called last (LIFO order) + on_exit(fn -> + # Only stop if the owner is still alive + if Process.alive?(pid) do + Ecto.Adapters.SQL.Sandbox.stop_owner(pid) + end + end) + # Allow critical system processes to access the database allow_system_processes_database_access() @@ -136,7 +133,9 @@ defmodule WandererAppWeb.IntegrationConnCase do WandererApp.Server.TheraDataFetcher, WandererApp.ExternalEvents.MapEventRelay, WandererApp.ExternalEvents.WebhookDispatcher, - WandererApp.ExternalEvents.SseStreamManager + WandererApp.ExternalEvents.SseStreamManager, + # Task.Supervisor for Task.async_stream calls + Task.Supervisor ] Enum.each(system_processes, fn process_name -> @@ -177,7 +176,7 @@ defmodule WandererAppWeb.IntegrationConnCase do end end - # Monitor for dynamically spawned children and grant them mock access + # Monitor for dynamically spawned children and grant them mock and database access defp monitor_and_allow_children(supervisor_pid, owner_pid, interval \\ 50) do if Process.alive?(supervisor_pid) do :timer.sleep(interval) @@ -191,7 +190,9 @@ defmodule WandererAppWeb.IntegrationConnCase do |> Enum.filter(&is_pid/1) |> Enum.filter(&Process.alive?/1) |> Enum.each(fn child_pid -> + # Grant both mock and database access WandererApp.Test.MockOwnership.allow_mocks_for_process(child_pid, owner_pid) + allow_database_access(child_pid) end) _ -> diff --git a/test/support/mock_allowance.ex b/test/support/mock_allowance.ex index 6d76bd6a..4167eeca 100644 --- a/test/support/mock_allowance.ex +++ b/test/support/mock_allowance.ex @@ -60,7 +60,7 @@ defmodule WandererApp.Test.MockAllowance do Mox.set_mox_global() # Re-setup mocks to ensure they're available globally - WandererApp.Test.Mocks.setup_mocks() + WandererApp.Test.Mocks.setup_test_mocks(mode: :global) end end diff --git a/test/support/mocks.ex b/test/support/mocks.ex index 8d3e05c7..0740a21b 100644 --- a/test/support/mocks.ex +++ b/test/support/mocks.ex @@ -16,9 +16,15 @@ defmodule WandererApp.Test.Mocks do :ok end """ - def setup_test_mocks do - # Claim ownership of all mocks for this test process - Mox.set_mox_private() + def setup_test_mocks(opts \\ []) do + # For integration tests that spawn processes (MapPool, etc.), + # we need global mode so mocks work across process boundaries + mode = Keyword.get(opts, :mode, :private) + + case mode do + :global -> Mox.set_mox_global() + :private -> Mox.set_mox_private() + end # Set up default stubs for this test setup_default_stubs() diff --git a/test/unit/auth_test.exs b/test/unit/auth_test.exs index 089a1c51..7772c5be 100644 --- a/test/unit/auth_test.exs +++ b/test/unit/auth_test.exs @@ -132,22 +132,6 @@ defmodule WandererAppWeb.AuthTest do assert result.status == 400 end - test "rejects request for non-existent map" do - non_existent_id = "550e8400-e29b-41d4-a716-446655440000" - - conn = - build_conn() - |> put_req_header("authorization", "Bearer test_api_key_123") - |> put_private(:phoenix_router, WandererAppWeb.Router) - |> Map.put(:params, %{"map_identifier" => non_existent_id}) - |> Plug.Conn.fetch_query_params() - - result = CheckMapApiKey.call(conn, CheckMapApiKey.init([])) - - assert result.halted - assert result.status == 404 - end - test "rejects request for map without API key configured", %{map: map} do # Update map to have no API key using the proper action {:ok, map_without_key} = Ash.update(map, %{public_api_key: nil}, action: :update_api_key) @@ -166,6 +150,24 @@ defmodule WandererAppWeb.AuthTest do end end + describe "CheckMapApiKey plug without fixtures" do + test "rejects request for non-existent map" do + non_existent_id = "550e8400-e29b-41d4-a716-446655440000" + + conn = + build_conn() + |> put_req_header("authorization", "Bearer test_api_key_123") + |> put_private(:phoenix_router, WandererAppWeb.Router) + |> Map.put(:params, %{"map_identifier" => non_existent_id}) + |> Plug.Conn.fetch_query_params() + + result = CheckMapApiKey.call(conn, CheckMapApiKey.init([])) + + assert result.halted + assert result.status == 404 + end + end + describe "CheckAclApiKey plug" do setup do user = Factory.insert(:user) @@ -248,6 +250,25 @@ defmodule WandererAppWeb.AuthTest do assert result.status == 401 end + test "rejects request for ACL without API key configured", %{acl: acl} do + # Update ACL to have no API key + {:ok, acl_without_key} = Ash.update(acl, %{api_key: nil}) + + conn = + build_conn() + |> put_req_header("authorization", "Bearer test_acl_key_456") + |> put_private(:phoenix_router, WandererAppWeb.Router) + |> Map.put(:params, %{"id" => acl_without_key.id}) + |> Plug.Conn.fetch_query_params() + + result = CheckAclApiKey.call(conn, CheckAclApiKey.init([])) + + assert result.halted + assert result.status == 401 + end + end + + describe "CheckAclApiKey plug without fixtures" do test "rejects request with missing ACL ID" do conn = build_conn() @@ -277,23 +298,6 @@ defmodule WandererAppWeb.AuthTest do assert result.halted assert result.status == 404 end - - test "rejects request for ACL without API key configured", %{acl: acl} do - # Update ACL to have no API key - {:ok, acl_without_key} = Ash.update(acl, %{api_key: nil}) - - conn = - build_conn() - |> put_req_header("authorization", "Bearer test_acl_key_456") - |> put_private(:phoenix_router, WandererAppWeb.Router) - |> Map.put(:params, %{"id" => acl_without_key.id}) - |> Plug.Conn.fetch_query_params() - - result = CheckAclApiKey.call(conn, CheckAclApiKey.init([])) - - assert result.halted - assert result.status == 401 - end end describe "BasicAuth" do diff --git a/test/unit/map/map_pool_test.exs.bak b/test/unit/map/map_pool_test.exs.bak deleted file mode 100644 index a59ab61a..00000000 --- a/test/unit/map/map_pool_test.exs.bak +++ /dev/null @@ -1,359 +0,0 @@ -defmodule WandererApp.Map.MapPoolTest do - use ExUnit.Case, async: true - - alias WandererApp.Map.{MapPool, MapPoolDynamicSupervisor, Reconciler} - - @cache :map_pool_cache - @registry :map_pool_registry - @unique_registry :unique_map_pool_registry - - setup do - # Clean up any existing test data - cleanup_test_data() - - # Check if required infrastructure is running - registries_running? = - try do - Registry.keys(@registry, self()) != :error - rescue - _ -> false - end - - reconciler_running? = Process.whereis(Reconciler) != nil - - on_exit(fn -> - cleanup_test_data() - end) - - {:ok, registries_running: registries_running?, reconciler_running: reconciler_running?} - end - - defp cleanup_test_data do - # Clean up test caches - WandererApp.Cache.delete("started_maps") - Cachex.clear(@cache) - end - - describe "garbage collection with synchronous stop" do - @tag :skip - test "garbage collector successfully stops map with synchronous call" do - # This test would require setting up a full map pool with a test map - # Skipping for now as it requires more complex setup with actual map data - :ok - end - - @tag :skip - test "garbage collector handles stop failures gracefully" do - # This test would verify error handling when stop fails - :ok - end - end - - describe "cache lookup with registry fallback" do - test "stop_map handles cache miss by scanning registry", %{ - registries_running: registries_running? - } do - if registries_running? do - # Setup: Create a map_id that's not in cache but will be found in registry scan - map_id = "test_map_#{:rand.uniform(1_000_000)}" - - # Verify cache is empty for this map - assert {:ok, nil} = Cachex.get(@cache, map_id) - - # Call stop_map - should handle gracefully with fallback - assert :ok = MapPoolDynamicSupervisor.stop_map(map_id) - else - # Skip test if registries not running - :ok - end - end - - test "stop_map handles non-existent pool_uuid in registry", %{ - registries_running: registries_running? - } do - if registries_running? do - map_id = "test_map_#{:rand.uniform(1_000_000)}" - fake_uuid = "fake_uuid_#{:rand.uniform(1_000_000)}" - - # Put fake uuid in cache that doesn't exist in registry - Cachex.put(@cache, map_id, fake_uuid) - - # Call stop_map - should handle gracefully with fallback - assert :ok = MapPoolDynamicSupervisor.stop_map(map_id) - else - :ok - end - end - - test "stop_map updates cache when found via registry scan", %{ - registries_running: registries_running? - } do - if registries_running? do - # This test would require a running pool with registered maps - # For now, we verify the fallback logic doesn't crash - map_id = "test_map_#{:rand.uniform(1_000_000)}" - assert :ok = MapPoolDynamicSupervisor.stop_map(map_id) - else - :ok - end - end - end - - describe "state cleanup atomicity" do - @tag :skip - test "rollback occurs when registry update fails" do - # This would require mocking Registry.update_value to fail - # Skipping for now as it requires more complex mocking setup - :ok - end - - @tag :skip - test "rollback occurs when cache delete fails" do - # This would require mocking Cachex.del to fail - :ok - end - - @tag :skip - test "successful cleanup updates all three state stores" do - # This would verify Registry, Cache, and GenServer state are all updated - :ok - end - end - - describe "Reconciler - zombie map detection and cleanup" do - test "reconciler detects zombie maps in started_maps cache", %{ - reconciler_running: reconciler_running? - } do - if reconciler_running? do - # Setup: Add maps to started_maps that aren't in any registry - zombie_map_id = "zombie_map_#{:rand.uniform(1_000_000)}" - - WandererApp.Cache.insert_or_update( - "started_maps", - [zombie_map_id], - fn existing -> [zombie_map_id | existing] |> Enum.uniq() end - ) - - # Get started_maps - {:ok, started_maps} = WandererApp.Cache.lookup("started_maps", []) - assert zombie_map_id in started_maps - - # Trigger reconciliation - send(Reconciler, :reconcile) - # Give it time to process - Process.sleep(200) - - # Verify zombie was cleaned up - {:ok, started_maps_after} = WandererApp.Cache.lookup("started_maps", []) - refute zombie_map_id in started_maps_after - else - :ok - end - end - - test "reconciler cleans up zombie map caches", %{reconciler_running: reconciler_running?} do - if reconciler_running? do - zombie_map_id = "zombie_map_#{:rand.uniform(1_000_000)}" - - # Setup zombie state - WandererApp.Cache.insert_or_update( - "started_maps", - [zombie_map_id], - fn existing -> [zombie_map_id | existing] |> Enum.uniq() end - ) - - WandererApp.Cache.insert("map_#{zombie_map_id}:started", true) - Cachex.put(@cache, zombie_map_id, "fake_uuid") - - # Trigger reconciliation - send(Reconciler, :reconcile) - Process.sleep(200) - - # Verify all caches cleaned - {:ok, started_maps} = WandererApp.Cache.lookup("started_maps", []) - refute zombie_map_id in started_maps - - {:ok, cache_entry} = Cachex.get(@cache, zombie_map_id) - assert cache_entry == nil - else - :ok - end - end - end - - describe "Reconciler - orphan map detection and fix" do - @tag :skip - test "reconciler detects orphan maps in registry" do - # This would require setting up a pool with maps in registry - # but not in started_maps cache - :ok - end - - @tag :skip - test "reconciler adds orphan maps to started_maps cache" do - # This would verify orphan maps get added to the cache - :ok - end - end - - describe "Reconciler - cache inconsistency detection and fix" do - test "reconciler detects map with missing cache entry", %{ - reconciler_running: reconciler_running? - } do - if reconciler_running? do - # This test verifies the reconciler can detect when a map - # is in the registry but has no cache entry - # Since we can't easily set up a full pool, we test the detection logic - - map_id = "test_map_#{:rand.uniform(1_000_000)}" - - # Ensure no cache entry - Cachex.del(@cache, map_id) - - # The reconciler would detect this if the map was in a registry - # For now, we just verify the logic doesn't crash - send(Reconciler, :reconcile) - Process.sleep(200) - - # No assertions needed - just verifying no crashes - end - end - - test "reconciler detects cache pointing to non-existent pool", %{ - reconciler_running: reconciler_running? - } do - if reconciler_running? do - map_id = "test_map_#{:rand.uniform(1_000_000)}" - fake_uuid = "fake_uuid_#{:rand.uniform(1_000_000)}" - - # Put fake uuid in cache - Cachex.put(@cache, map_id, fake_uuid) - - # Trigger reconciliation - send(Reconciler, :reconcile) - Process.sleep(200) - - # Cache entry should be removed since pool doesn't exist - {:ok, cache_entry} = Cachex.get(@cache, map_id) - assert cache_entry == nil - else - :ok - end - end - end - - describe "Reconciler - stats and telemetry" do - test "reconciler emits telemetry events", %{reconciler_running: reconciler_running?} do - if reconciler_running? do - # Setup telemetry handler - test_pid = self() - - :telemetry.attach( - "test-reconciliation", - [:wanderer_app, :map, :reconciliation], - fn _event, measurements, _metadata, _config -> - send(test_pid, {:telemetry, measurements}) - end, - nil - ) - - # Trigger reconciliation - send(Reconciler, :reconcile) - Process.sleep(200) - - # Should receive telemetry event - assert_receive {:telemetry, measurements}, 500 - - assert is_integer(measurements.total_started_maps) - assert is_integer(measurements.total_registry_maps) - assert is_integer(measurements.zombie_maps) - assert is_integer(measurements.orphan_maps) - assert is_integer(measurements.cache_inconsistencies) - - # Cleanup - :telemetry.detach("test-reconciliation") - else - :ok - end - end - end - - describe "Reconciler - manual trigger" do - test "trigger_reconciliation runs reconciliation immediately", %{ - reconciler_running: reconciler_running? - } do - if reconciler_running? do - zombie_map_id = "zombie_map_#{:rand.uniform(1_000_000)}" - - # Setup zombie state - WandererApp.Cache.insert_or_update( - "started_maps", - [zombie_map_id], - fn existing -> [zombie_map_id | existing] |> Enum.uniq() end - ) - - # Verify it exists - {:ok, started_maps_before} = WandererApp.Cache.lookup("started_maps", []) - assert zombie_map_id in started_maps_before - - # Trigger manual reconciliation - Reconciler.trigger_reconciliation() - Process.sleep(200) - - # Verify zombie was cleaned up - {:ok, started_maps_after} = WandererApp.Cache.lookup("started_maps", []) - refute zombie_map_id in started_maps_after - else - :ok - end - end - end - - describe "edge cases and error handling" do - test "stop_map with cache error returns ok", %{registries_running: registries_running?} do - if registries_running? do - map_id = "test_map_#{:rand.uniform(1_000_000)}" - - # Even if cache operations fail, should return :ok - assert :ok = MapPoolDynamicSupervisor.stop_map(map_id) - else - :ok - end - end - - test "reconciler handles empty registries gracefully", %{ - reconciler_running: reconciler_running? - } do - if reconciler_running? do - # Clear everything - cleanup_test_data() - - # Should not crash even with empty data - send(Reconciler, :reconcile) - Process.sleep(200) - - # No assertions - just verifying no crash - assert true - else - :ok - end - end - - test "reconciler handles nil values in caches", %{reconciler_running: reconciler_running?} do - if reconciler_running? do - map_id = "test_map_#{:rand.uniform(1_000_000)}" - - # Explicitly set nil - Cachex.put(@cache, map_id, nil) - - # Should handle gracefully - send(Reconciler, :reconcile) - Process.sleep(200) - - assert true - else - :ok - end - end - end -end diff --git a/test/unit/map/slug_uniqueness_test.exs.bak b/test/unit/map/slug_uniqueness_test.exs.bak deleted file mode 100644 index e2ea0e2d..00000000 --- a/test/unit/map/slug_uniqueness_test.exs.bak +++ /dev/null @@ -1,361 +0,0 @@ -defmodule WandererApp.Map.SlugUniquenessTest do - @moduledoc """ - Tests for map slug uniqueness constraints and handling. - - These tests verify that: - 1. Database unique constraint is enforced - 2. Application-level slug generation handles uniqueness - 3. Concurrent map creation doesn't create duplicates - 4. Error handling works correctly for slug conflicts - """ - use WandererApp.DataCase, async: true - - alias WandererApp.Api.Map - - require Logger - - describe "slug uniqueness constraint" do - setup do - # Create a test character (which includes a user) - character = create_test_user() - %{character: character} - end - - test "prevents duplicate slugs via database constraint", %{character: character} do - # Create first map with a specific slug - {:ok, map1} = - Map.new(%{ - name: "Test Map", - slug: "test-map", - owner_id: character.id, - description: "First map", - scope: "wormholes" - }) - - assert map1.slug == "test-map" - - # Attempt to create second map with same slug - # The updated logic now auto-increments the slug instead of failing - result = - Map.new(%{ - name: "Different Name", - slug: "test-map", - owner_id: character.id, - description: "Second map", - scope: "wormholes" - }) - - # Should succeed with auto-incremented slug - assert {:ok, map2} = result - assert map2.slug == "test-map-2" - end - - test "automatically increments slug when duplicate detected", %{character: character} do - # Create first map - {:ok, map1} = - Map.new(%{ - name: "Test Map", - slug: "test-map", - owner_id: character.id, - description: "First map", - scope: "wormholes" - }) - - assert map1.slug == "test-map" - - # Create second map with same name (should auto-increment slug) - {:ok, map2} = - Map.new(%{ - name: "Test Map", - slug: "test-map", - owner_id: character.id, - description: "Second map", - scope: "wormholes" - }) - - # Slug should be automatically incremented - assert map2.slug == "test-map-2" - - # Create third map with same name - {:ok, map3} = - Map.new(%{ - name: "Test Map", - slug: "test-map", - owner_id: character.id, - description: "Third map", - scope: "wormholes" - }) - - assert map3.slug == "test-map-3" - end - - test "handles many maps with similar names", %{character: character} do - # Create 10 maps with the same base slug - maps = - for i <- 1..10 do - {:ok, map} = - Map.new(%{ - name: "Popular Name", - slug: "popular-name", - owner_id: character.id, - description: "Map #{i}", - scope: "wormholes" - }) - - map - end - - # Verify all slugs are unique - slugs = Enum.map(maps, & &1.slug) - assert length(Enum.uniq(slugs)) == 10 - - # First should keep the base slug - assert List.first(maps).slug == "popular-name" - - # Others should be numbered - assert "popular-name-2" in slugs - assert "popular-name-10" in slugs - end - end - - describe "concurrent slug creation (race condition)" do - setup do - character = create_test_user() - %{character: character} - end - - @tag :slow - test "handles concurrent map creation with identical slugs", %{character: character} do - # Create 5 concurrent map creation requests with the same slug - tasks = - for i <- 1..5 do - Task.async(fn -> - Map.new(%{ - name: "Concurrent Test", - slug: "concurrent-test", - owner_id: character.id, - description: "Concurrent map #{i}", - scope: "wormholes" - }) - end) - end - - # Wait for all tasks to complete - results = Task.await_many(tasks, 10_000) - - # All should either succeed or fail gracefully (no crashes) - assert length(results) == 5 - - # Get successful results - successful = Enum.filter(results, &match?({:ok, _}, &1)) - failed = Enum.filter(results, &match?({:error, _}, &1)) - - # At least some should succeed - assert length(successful) > 0 - - # Extract maps from successful results - maps = Enum.map(successful, fn {:ok, map} -> map end) - - # Verify all successful maps have unique slugs - slugs = Enum.map(maps, & &1.slug) - - assert length(Enum.uniq(slugs)) == length(slugs), - "All successful maps should have unique slugs" - - # Log results for visibility - Logger.info("Concurrent test: #{length(successful)} succeeded, #{length(failed)} failed") - Logger.info("Unique slugs created: #{inspect(slugs)}") - end - - @tag :slow - test "concurrent creation with different names creates different base slugs", %{character: character} do - # Create concurrent requests with different names (should all succeed) - tasks = - for i <- 1..5 do - Task.async(fn -> - Map.new(%{ - name: "Concurrent Map #{i}", - slug: "concurrent-map-#{i}", - owner_id: character.id, - description: "Map #{i}", - scope: "wormholes" - }) - end) - end - - results = Task.await_many(tasks, 10_000) - - # All should succeed - assert Enum.all?(results, &match?({:ok, _}, &1)) - - # All should have different slugs - slugs = Enum.map(results, fn {:ok, map} -> map.slug end) - assert length(Enum.uniq(slugs)) == 5 - end - end - - describe "slug generation edge cases" do - setup do - character = create_test_user() - %{character: character} - end - - test "handles very long slugs", %{character: character} do - # Create map with name within limits but slug that's very long - # Note: name max is 20 chars, slug max is 40 chars - long_slug = String.duplicate("a", 50) - - # Attempting to create a map with a slug that's too long should fail validation - result = - Map.new(%{ - name: "Long Slug Test", - slug: long_slug, - owner_id: character.id, - description: "Long slug test", - scope: "wormholes" - }) - - # Should fail because slug exceeds max length - assert {:error, _error} = result - - # But creating with a slug exactly at max length should work - max_length_slug = String.duplicate("a", 40) - - {:ok, map} = - Map.new(%{ - name: "Long Slug Test", - slug: max_length_slug, - owner_id: character.id, - description: "Long slug test", - scope: "wormholes" - }) - - assert String.length(map.slug) == 40 - end - - test "handles special characters in slugs", %{character: character} do - # Test that special characters are properly slugified - {:ok, map} = - Map.new(%{ - name: "Test: Map & Name!", - slug: "test-map-name", - owner_id: character.id, - description: "Special chars test", - scope: "wormholes" - }) - - # Slug should only contain allowed characters - assert map.slug =~ ~r/^[a-z0-9-]+$/ - end - end - - describe "slug update operations" do - setup do - character = create_test_user() - - {:ok, map} = - Map.new(%{ - name: "Original Map", - slug: "original-map", - owner_id: character.id, - description: "Original", - scope: "wormholes" - }) - - %{character: character, map: map} - end - - test "updating map with same slug succeeds", %{map: map} do - # Update other fields, keep same slug - result = - Map.update(map, %{ - description: "Updated description", - slug: "original-map" - }) - - assert {:ok, updated_map} = result - assert updated_map.slug == "original-map" - assert updated_map.description == "Updated description" - end - - test "updating to conflicting slug is handled", %{character: character, map: map} do - # Create another map - {:ok, _other_map} = - Map.new(%{ - name: "Other Map", - slug: "other-map", - owner_id: character.id, - description: "Other", - scope: "wormholes" - }) - - # Try to update first map to use other map's slug - result = - Map.update(map, %{ - slug: "other-map" - }) - - # Should either fail or auto-increment - case result do - {:ok, updated_map} -> - # If successful, slug should be different - assert updated_map.slug != "other-map" - assert updated_map.slug =~ ~r/^other-map-\d+$/ - - {:error, _} -> - # Or it can fail with validation error - :ok - end - end - end - - describe "get_map_by_slug with duplicates" do - setup do - character = create_test_user() - %{character: character} - end - - test "get_map_by_slug! raises on duplicates if they exist" do - # Note: This test documents the behavior when duplicates somehow exist - # In production, this should be prevented by our fixes - # If duplicates exist (data integrity issue), the query should fail - - # This is a documentation test - we can't easily create duplicates - # due to the database constraint, but we document expected behavior - assert true - end - end - - # Helper functions - - defp create_test_user do - # Create a test user with necessary attributes - user = - case Ash.create(WandererApp.Api.User, %{ - name: "Test User #{:rand.uniform(10_000)}", - hash: "test_hash_#{:rand.uniform(100_000_000)}" - }) do - {:ok, user} -> user - {:error, reason} -> raise "Failed to create user: #{inspect(reason)}" - end - - # Create a character for the user (maps need character as owner) - unique_id = System.unique_integer([:positive]) - - character = - case Ash.create( - WandererApp.Api.Character, - %{ - eve_id: "#{2_000_000_000 + unique_id}", - name: "Test Character #{unique_id}", - user_id: user.id - }, - action: :link - ) do - {:ok, character} -> character - {:error, reason} -> raise "Failed to create character: #{inspect(reason)}" - end - - character - end -end diff --git a/test/unit/map_duplication_service_test.exs b/test/unit/map_duplication_service_test.exs index bf47a9ee..b0ab5f37 100644 --- a/test/unit/map_duplication_service_test.exs +++ b/test/unit/map_duplication_service_test.exs @@ -82,7 +82,6 @@ defmodule WandererApp.MapDuplicationServiceTest do assert {:error, {:not_found, _message}} = result end - @tag :skip test "preserves original map unchanged", %{owner: owner, source_map: source_map} do original_name = source_map.name original_description = source_map.description @@ -114,7 +113,7 @@ defmodule WandererApp.MapDuplicationServiceTest do {:ok, duplicate1} = Duplication.duplicate_map(source_map.id, target_map1, []) - # Create second duplicate + # Create second duplicate target_map2 = insert(:map, %{ name: "Unique Copy 2",