Compare commits

...

182 Commits

Author SHA1 Message Date
CI
e1a946bb1d chore: release version v1.84.36 2025-11-19 10:33:10 +00:00
Dmitry Popov
543ec7f071 fix: fixed duplicated map slugs 2025-11-19 11:32:35 +01:00
CI
bf40d2cb8d chore: [skip ci] 2025-11-19 09:44:24 +00:00
CI
48ac40ea55 chore: release version v1.84.35 2025-11-19 09:44:24 +00:00
Dmitry Popov
5a3f3c40fe Merge pull request #552 from guarzo/guarzo/structurefix
fix: structure search / paste issues
2025-11-19 13:43:52 +04:00
guarzo
d5bac311ff Merge branch 'main' into guarzo/structurefix 2025-11-18 22:24:30 -05:00
Guarzo
34a7c854ed fix: structure search / paste issues 2025-11-18 22:19:04 -05:00
CI
ebb6090be9 chore: [skip ci] 2025-11-18 11:47:15 +00:00
CI
7a4d31db60 chore: release version v1.84.34 2025-11-18 11:47:15 +00:00
Dmitry Popov
2acf9ed5dc Merge branch 'main' of github.com:wanderer-industries/wanderer 2025-11-18 12:46:45 +01:00
Dmitry Popov
46df025200 fix(core): fixed character tracking issues 2025-11-18 12:46:42 +01:00
CI
43a363b5ab chore: [skip ci] 2025-11-18 11:00:34 +00:00
CI
03688387d8 chore: release version v1.84.33 2025-11-18 11:00:34 +00:00
Dmitry Popov
5060852918 Merge branch 'main' of github.com:wanderer-industries/wanderer 2025-11-18 12:00:04 +01:00
Dmitry Popov
57381b9782 fix(core): fixed character tracking issues 2025-11-18 12:00:01 +01:00
CI
6014c60e13 chore: [skip ci] 2025-11-18 10:08:04 +00:00
CI
1b711d7b4b chore: release version v1.84.32 2025-11-18 10:08:04 +00:00
Dmitry Popov
f761ba9746 fix(core): fixed character tracking issues 2025-11-18 11:04:32 +01:00
CI
20a795c5b5 chore: [skip ci] 2025-11-17 13:41:22 +00:00
CI
0c80894c65 chore: release version v1.84.31 2025-11-17 13:41:22 +00:00
Dmitry Popov
21844f0550 fix(core): fixed connactions validation logic 2025-11-17 14:40:46 +01:00
CI
f7716ca45a chore: [skip ci] 2025-11-17 12:38:04 +00:00
CI
de74714c77 chore: release version v1.84.30 2025-11-17 12:38:04 +00:00
Dmitry Popov
4dfa83bd30 chore: fixed character updates issue 2025-11-17 13:37:30 +01:00
CI
cb4dba8dc2 chore: [skip ci] 2025-11-17 12:09:39 +00:00
CI
1d75b8f063 chore: release version v1.84.29 2025-11-17 12:09:39 +00:00
Dmitry Popov
2a42c4e6df Merge branch 'main' of github.com:wanderer-industries/wanderer 2025-11-17 13:09:08 +01:00
Dmitry Popov
0ee6160bcd chore: fixed MapEventRelay logs 2025-11-17 13:09:05 +01:00
CI
5826d2492b chore: [skip ci] 2025-11-17 11:53:30 +00:00
CI
a643e20247 chore: release version v1.84.28 2025-11-17 11:53:30 +00:00
Dmitry Popov
66dc680281 fix(core): fixed ACL updates 2025-11-17 12:52:59 +01:00
CI
46f46c745e chore: [skip ci] 2025-11-17 09:16:32 +00:00
CI
00bf620e35 chore: release version v1.84.27 2025-11-17 09:16:32 +00:00
Dmitry Popov
46eef60d86 chore: fixed warnings 2025-11-17 10:15:57 +01:00
Dmitry Popov
fe836442ab fix(core): supported characters_updates for external events 2025-11-17 00:08:08 +01:00
Dmitry Popov
9514806dbb fix(core): improved character tracking 2025-11-16 23:45:39 +01:00
Dmitry Popov
4e6423ebc8 fix(core): improved character tracking 2025-11-16 18:28:58 +01:00
Dmitry Popov
a97e598299 fix(core): improved character location tracking 2025-11-16 16:39:39 +01:00
CI
9c26b50aac chore: [skip ci] 2025-11-16 01:14:41 +00:00
CI
3f2ddf5cc4 chore: release version v1.84.26 2025-11-16 01:14:41 +00:00
Dmitry Popov
233b2bd7a4 fix(core): disable character tracker pausing 2025-11-16 02:14:05 +01:00
CI
0d35268efc chore: [skip ci] 2025-11-16 01:01:35 +00:00
CI
d169220eb2 chore: release version v1.84.25 2025-11-16 01:01:35 +00:00
Dmitry Popov
182d5ec9fb fix(core): used upsert for adding map systems 2025-11-16 02:00:59 +01:00
CI
32958253b7 chore: [skip ci] 2025-11-15 22:50:08 +00:00
CI
c011d56ce7 chore: release version v1.84.24 2025-11-15 22:50:08 +00:00
Dmitry Popov
73d1921d42 Merge pull request #549 from wanderer-industries/redesign-and-fixes
fix(Map): New design and prepared main pages for new patch
2025-11-16 02:49:36 +04:00
Dmitry Popov
7bb810e1e6 chore: update bg image url 2025-11-15 23:35:59 +01:00
CI
c90ac7b1e3 chore: [skip ci] 2025-11-15 22:17:28 +00:00
CI
005e0c2bc6 chore: release version v1.84.23 2025-11-15 22:17:28 +00:00
Dmitry Popov
808acb540e fix(core): fixed map pings cancel errors 2025-11-15 23:16:58 +01:00
DanSylvest
06626f910b fix(Map): Fixed problem related with error if settings was removed and mapper crashed. Fixed settings reset. 2025-11-15 21:30:45 +03:00
CI
812582d955 chore: [skip ci] 2025-11-15 11:38:00 +00:00
CI
f3077c0bf1 chore: release version v1.84.22 2025-11-15 11:38:00 +00:00
Dmitry Popov
32c70cbbad Merge branch 'main' of github.com:wanderer-industries/wanderer 2025-11-15 12:37:31 +01:00
Dmitry Popov
8934935e10 fix(core): fixed map initialization 2025-11-15 12:37:27 +01:00
CI
20c8a53712 chore: [skip ci] 2025-11-15 08:48:30 +00:00
CI
b22970fef3 chore: release version v1.84.21 2025-11-15 08:48:30 +00:00
Dmitry Popov
cf72394ef9 Merge branch 'main' of github.com:wanderer-industries/wanderer 2025-11-15 09:47:53 +01:00
Dmitry Popov
e6dbba7283 fix(core): fixed map characters adding 2025-11-15 09:47:48 +01:00
CI
843b3b86b2 chore: [skip ci] 2025-11-15 07:29:25 +00:00
CI
bd865b9f64 chore: release version v1.84.20 2025-11-15 07:29:25 +00:00
Dmitry Popov
ae91cd2f92 Merge branch 'main' of github.com:wanderer-industries/wanderer 2025-11-15 08:25:59 +01:00
Dmitry Popov
0be7a5f9d0 fix(core): fixed map start issues 2025-11-15 08:25:55 +01:00
CI
e15bfa426a chore: [skip ci] 2025-11-14 19:28:51 +00:00
CI
4198e4b07a chore: release version v1.84.19 2025-11-14 19:28:51 +00:00
Dmitry Popov
03ee08ff67 Merge branch 'main' of github.com:wanderer-industries/wanderer 2025-11-14 20:28:16 +01:00
Dmitry Popov
ac4dd4c28b fix(core): fixed map start issues 2025-11-14 20:28:12 +01:00
CI
308e81a464 chore: [skip ci] 2025-11-14 18:36:20 +00:00
CI
6f4240d931 chore: release version v1.84.18 2025-11-14 18:36:20 +00:00
Dmitry Popov
847b45a431 fix(core): added gracefull map poll recovery from saved state. added map slug unique checks 2025-11-14 19:35:45 +01:00
CI
5ec97d74ca chore: [skip ci] 2025-11-14 13:43:40 +00:00
CI
74359a5542 chore: release version v1.84.17 2025-11-14 13:43:40 +00:00
Dmitry Popov
0020f46dd8 fix(core): fixed activity tracking issues 2025-11-14 14:42:44 +01:00
CI
a6751b45c6 chore: [skip ci] 2025-11-13 16:20:24 +00:00
CI
f48aeb5cec chore: release version v1.84.16 2025-11-13 16:20:24 +00:00
Dmitry Popov
a5f25646c9 Merge branch 'main' of github.com:wanderer-industries/wanderer 2025-11-13 17:19:47 +01:00
Dmitry Popov
23cf1fd96f fix(core): removed maps auto-start logic 2025-11-13 17:19:44 +01:00
CI
6f15521069 chore: [skip ci] 2025-11-13 14:49:32 +00:00
CI
9d41e57c06 chore: release version v1.84.15 2025-11-13 14:49:32 +00:00
Dmitry Popov
ea9a22df09 Merge branch 'main' of github.com:wanderer-industries/wanderer 2025-11-13 15:49:01 +01:00
Dmitry Popov
0d4fd6f214 fix(core): fixed maps start/stop logic, added server downtime period support 2025-11-13 15:48:56 +01:00
CI
87a6c20545 chore: [skip ci] 2025-11-13 14:46:26 +00:00
CI
c375f4e4ce chore: release version v1.84.14 2025-11-13 14:46:26 +00:00
Dmitry Popov
843a6d7320 Merge pull request #543 from wanderer-industries/fix-error-on-remove-settings
fix(Map): Fixed problem related with error if settings was removed an…
2025-11-13 18:43:13 +04:00
DanSylvest
98c54a3413 fix(Map): Fixed problem related with error if settings was removed and mapper crashed. Fixed settings reset. 2025-11-13 12:53:40 +03:00
CI
0439110938 chore: [skip ci] 2025-11-13 07:52:33 +00:00
CI
8ce1e5fa3e chore: release version v1.84.13 2025-11-13 07:52:33 +00:00
Dmitry Popov
ebaf6bcdc6 Merge branch 'main' of github.com:wanderer-industries/wanderer 2025-11-13 08:52:00 +01:00
Dmitry Popov
40d947bebc chore: updated RELEASE_NODE for server defaults 2025-11-13 08:51:56 +01:00
CI
61d1c3848f chore: [skip ci] 2025-11-13 07:39:29 +00:00
CI
e152ce179f chore: release version v1.84.12 2025-11-13 07:39:29 +00:00
Dmitry Popov
7bbe387183 chore: reduce garbage collection interval 2025-11-13 08:38:52 +01:00
CI
b1555ff03c chore: [skip ci] 2025-11-12 18:53:48 +00:00
CI
e624499244 chore: release version v1.84.11 2025-11-12 18:53:48 +00:00
Dmitry Popov
6a1976dec6 Merge pull request #541 from guarzo/guarzo/apifun2
fix: api and doc updates
2025-11-12 22:53:17 +04:00
Guarzo
3db24c4344 fix: api and doc updates 2025-11-12 18:39:21 +00:00
CI
883c09f255 chore: [skip ci] 2025-11-12 17:28:54 +00:00
CI
ff24d80038 chore: release version v1.84.10 2025-11-12 17:28:54 +00:00
Dmitry Popov
63cbc9c0b9 Merge branch 'main' of github.com:wanderer-industries/wanderer 2025-11-12 18:28:20 +01:00
Dmitry Popov
8056972a27 fix(core): Fixed adding system on character dock 2025-11-12 18:28:16 +01:00
CI
1759d46740 chore: [skip ci] 2025-11-12 13:28:14 +00:00
CI
e4b7d2e45b chore: release version v1.84.9 2025-11-12 13:28:14 +00:00
Dmitry Popov
41573cbee3 Merge branch 'main' of github.com:wanderer-industries/wanderer 2025-11-12 14:27:43 +01:00
Dmitry Popov
24ffc20bb8 chore: added ccp attribution to footer 2025-11-12 14:27:40 +01:00
CI
e077849b66 chore: [skip ci] 2025-11-12 12:42:09 +00:00
CI
375a9ef65b chore: release version v1.84.8 2025-11-12 12:42:08 +00:00
Dmitry Popov
9bf90ab752 fix(core): added cleanup jobs for old system signatures & chain passages 2025-11-12 13:41:33 +01:00
CI
90c3481151 chore: [skip ci] 2025-11-12 10:57:58 +00:00
CI
e36b08a7e5 chore: release version v1.84.7 2025-11-12 10:57:58 +00:00
Dmitry Popov
e1f79170c3 Merge pull request #540 from guarzo/guarzo/apifun
fix: api and search fixes
2025-11-12 14:54:33 +04:00
Guarzo
68b5455e91 bug fix 2025-11-12 07:25:49 +00:00
Guarzo
f28e75c7f4 pr updates 2025-11-12 07:16:21 +00:00
Guarzo
6091adb28e fix: api and structure search fixes 2025-11-12 07:07:39 +00:00
CI
d4657b335f chore: [skip ci] 2025-11-12 00:13:07 +00:00
CI
7fee850902 chore: release version v1.84.6 2025-11-12 00:13:07 +00:00
Dmitry Popov
648c168a66 fix(core): Added map slug uniqness checking while using API 2025-11-12 01:12:13 +01:00
CI
f5c4b2c407 chore: [skip ci] 2025-11-11 12:52:39 +00:00
CI
b592223d52 chore: release version v1.84.5 2025-11-11 12:52:39 +00:00
Dmitry Popov
5cf118c6ee Merge branch 'main' of github.com:wanderer-industries/wanderer 2025-11-11 13:52:11 +01:00
Dmitry Popov
b25013c652 fix(core): Added tracking for map & character event handling errors 2025-11-11 13:52:07 +01:00
CI
cf43861b11 chore: [skip ci] 2025-11-11 12:27:54 +00:00
CI
b5fe8f8878 chore: release version v1.84.4 2025-11-11 12:27:54 +00:00
Dmitry Popov
5e5068c7de fix(core): fixed issue with updating system signatures 2025-11-11 13:27:17 +01:00
CI
624b51edfb chore: [skip ci] 2025-11-11 09:52:29 +00:00
CI
a72f8e60c4 chore: release version v1.84.3 2025-11-11 09:52:29 +00:00
Dmitry Popov
dec8ae50c9 Merge branch 'develop' 2025-11-11 10:51:55 +01:00
Dmitry Popov
0332d36a8e fix(core): fixed linked signature time status update
Some checks failed
Build Test / 🚀 Deploy to test env (fly.io) (push) Has been cancelled
Build Test / 🛠 Build (1.17, 18.x, 27) (push) Has been cancelled
Build / 🛠 Build (1.17, 18.x, 27) (push) Has been cancelled
Build / 🛠 Build Docker Images (linux/amd64) (push) Has been cancelled
Build / 🛠 Build Docker Images (linux/arm64) (push) Has been cancelled
Build / merge (push) Has been cancelled
Build / 🏷 Create Release (push) Has been cancelled
🧪 Test Suite / Test Suite (push) Has been cancelled
2025-11-11 10:51:43 +01:00
CI
8444c7f82d chore: [skip ci] 2025-11-10 16:57:53 +00:00
CI
ec3fc7447e chore: release version v1.84.2 2025-11-10 16:57:53 +00:00
Dmitry Popov
20ec2800c9 Merge pull request #538 from wanderer-industries/develop
Develop
2025-11-10 20:56:53 +04:00
Dmitry Popov
6fbf43e860 fix(api): fixed api for get/update map systems
Some checks failed
Build Test / 🚀 Deploy to test env (fly.io) (push) Has been cancelled
Build Test / 🛠 Build (1.17, 18.x, 27) (push) Has been cancelled
Build / 🛠 Build (1.17, 18.x, 27) (push) Has been cancelled
Build / 🛠 Build Docker Images (linux/amd64) (push) Has been cancelled
Build / 🛠 Build Docker Images (linux/arm64) (push) Has been cancelled
Build / merge (push) Has been cancelled
Build / 🏷 Create Release (push) Has been cancelled
🧪 Test Suite / Test Suite (push) Has been cancelled
2025-11-10 17:23:44 +01:00
Dmitry Popov
697da38020 Merge pull request #537 from guarzo/guarzo/apisystemperf
Some checks failed
Build Test / 🚀 Deploy to test env (fly.io) (push) Has been cancelled
Build Test / 🛠 Build (1.17, 18.x, 27) (push) Has been cancelled
Build / 🛠 Build (1.17, 18.x, 27) (push) Has been cancelled
🧪 Test Suite / Test Suite (push) Has been cancelled
Build / 🛠 Build Docker Images (linux/amd64) (push) Has been cancelled
Build / 🛠 Build Docker Images (linux/arm64) (push) Has been cancelled
Build / merge (push) Has been cancelled
Build / 🏷 Create Release (push) Has been cancelled
fix: add indexes for map/system
2025-11-09 01:48:01 +04:00
Guarzo
4bc65b43d2 fix: add index for map/systems api 2025-11-08 14:30:19 +00:00
Dmitry Popov
910ec97fd1 chore: refactored map server processes
Some checks failed
Build Test / 🚀 Deploy to test env (fly.io) (push) Has been cancelled
Build Test / 🛠 Build (1.17, 18.x, 27) (push) Has been cancelled
Build / 🛠 Build (1.17, 18.x, 27) (push) Has been cancelled
🧪 Test Suite / Test Suite (push) Has been cancelled
Build / 🛠 Build Docker Images (linux/amd64) (push) Has been cancelled
Build / 🛠 Build Docker Images (linux/arm64) (push) Has been cancelled
Build / merge (push) Has been cancelled
Build / 🏷 Create Release (push) Has been cancelled
2025-11-06 09:23:19 +01:00
Dmitry Popov
40ed58ee8c Merge pull request #536 from wanderer-industries/refactor-map-servers
Some checks failed
Build Test / 🚀 Deploy to test env (fly.io) (push) Has been cancelled
Build Test / 🛠 Build (1.17, 18.x, 27) (push) Has been cancelled
Build / 🛠 Build (1.17, 18.x, 27) (push) Has been cancelled
Build / 🛠 Build Docker Images (linux/amd64) (push) Has been cancelled
Build / 🛠 Build Docker Images (linux/arm64) (push) Has been cancelled
Build / merge (push) Has been cancelled
Build / 🏷 Create Release (push) Has been cancelled
🧪 Test Suite / Test Suite (push) Has been cancelled
Refactor map servers
2025-11-06 03:03:57 +04:00
Dmitry Popov
c18d241c77 Merge branch 'develop' into refactor-map-servers 2025-11-06 00:01:32 +01:00
Dmitry Popov
8b42908a5c chore: refactored map server processes 2025-11-06 00:01:04 +01:00
Dmitry Popov
6d32505a59 chore: added map cached rtree implementation 2025-11-04 23:40:37 +01:00
Dmitry Popov
fe8a34c77d chore: refactored map state usage 2025-11-04 22:40:04 +01:00
CI
d12cafcca8 chore: [skip ci] 2025-11-01 20:01:52 +00:00
CI
38a9c76ff0 chore: release version v1.84.1 2025-11-01 20:01:52 +00:00
Dmitry Popov
d6c30b4a53 fix(Core): Fixed connection time status update issue 2025-11-01 21:01:18 +01:00
CI
53a81daaf5 chore: [skip ci] 2025-10-29 14:30:52 +00:00
CI
92081c99e3 chore: release version v1.84.0 2025-10-29 14:30:52 +00:00
Dmitry Popov
d78020d2f5 Merge pull request #535 from wanderer-industries/esi-rate-limits
feat(Core): ESI API rate limits support
2025-10-29 18:30:18 +04:00
Dmitry Popov
fb1a9b440d feat(Core): ESI API rate limits support
fixes #534
2025-10-29 15:29:12 +01:00
CI
0141ac46e3 chore: [skip ci] 2025-10-29 09:24:54 +00:00
CI
d2bf6a8f86 chore: release version v1.83.4 2025-10-29 09:24:54 +00:00
Dmitry Popov
1844e4c757 fix(Core): Fixed page reloads 2025-10-29 10:23:54 +01:00
CI
d407efe805 chore: [skip ci] 2025-10-27 23:52:49 +00:00
CI
021e04d87a chore: release version v1.83.3 2025-10-27 23:52:49 +00:00
Dmitry Popov
7844c9db34 fix(Core): Fixed old map API for systems & added small QOL improvements 2025-10-28 00:52:04 +01:00
CI
355beb8394 chore: [skip ci] 2025-10-22 16:09:15 +00:00
CI
d82eeba792 chore: release version v1.83.2 2025-10-22 16:09:15 +00:00
Dmitry Popov
0396b05e58 fix(Connections): Set new connection time status based on to/from system class 2025-10-22 18:08:38 +02:00
CI
9494a9eb37 chore: [skip ci] 2025-10-21 14:13:39 +00:00
CI
8238f84ac7 chore: release version v1.83.1 2025-10-21 14:13:39 +00:00
Dmitry Popov
1cf19b2a50 fix(Kills): Fixed zkb links (added following '/'). 2025-10-21 16:13:08 +02:00
CI
e8543fd2f8 chore: [skip ci] 2025-10-21 07:45:45 +00:00
CI
c7f360e1fa chore: release version v1.83.0 2025-10-21 07:45:45 +00:00
Dmitry Popov
a2b83f7f0c Merge pull request #531 from wanderer-industries/copy-past-roles
Copy past roles
2025-10-21 11:45:13 +04:00
CI
ae5689a403 chore: [skip ci] 2025-10-21 06:52:44 +00:00
CI
c46af1d286 chore: release version v1.82.3 2025-10-21 06:52:44 +00:00
Aleksei Chichenkov
d17ba2168c Merge pull request #533 from wanderer-industries/fix-db
fix(Map): Fix system static info - add source region for U319 from Null-sec
2025-10-21 09:52:17 +03:00
DanSylvest
80c14716eb fix(Map): Fix system static info - add source region for U319 from Null-sec 2025-10-21 09:50:10 +03:00
CI
8541fcd29b chore: [skip ci] 2025-10-21 06:41:33 +00:00
CI
65d6acd7fb chore: release version v1.82.2 2025-10-21 06:41:33 +00:00
Aleksei Chichenkov
8b5f83d6b2 Merge pull request #532 from wanderer-industries/fix-db
fix(Map): Fix system static info - for J012635 add D382; for J015092 …
2025-10-21 09:41:08 +03:00
DanSylvest
5e18891f4b fix(Map): Fix system static info - for J012635 add D382; for J015092 - changed from J244, Z060 to N110, J244; for J000487 removed C008 2025-10-21 09:38:47 +03:00
DanSylvest
74e0b85748 fix(Map): Copy-Paste restriction: support from FE side - fixed problem with incorrect disabling copy and paste buttons 2025-10-21 09:20:41 +03:00
DanSylvest
81d3495b65 fix(Map): Copy-Paste restriction: support from FE side - removed unnecessary constant 2025-10-20 12:51:20 +03:00
CI
d1959ca09f chore: [skip ci] 2025-10-20 09:33:16 +00:00
CI
ec7a5ecf10 chore: release version v1.82.1 2025-10-20 09:33:16 +00:00
DanSylvest
70b9ec99ba Merge remote-tracking branch 'origin/copy-past-roles' into copy-past-roles 2025-10-20 12:32:41 +03:00
Dmitry Popov
7147d79166 Merge branch 'main' into copy-past-roles 2025-10-20 11:33:45 +02:00
Dmitry Popov
1dad9316bd fix(Core): Fixed 'viewer' map access & characters tracking 2025-10-20 11:32:32 +02:00
DanSylvest
872f7dcf48 fix(Map): Copy-Paste restriction: support from FE side 2025-10-20 12:32:07 +03:00
Dmitry Popov
02b450325e fix(Core): Added Eve data downloaded files cleanup logic 2025-10-19 12:37:31 +02:00
Dmitry Popov
136bc4cbb9 feat(Core): Added map roles settings for copy/paste 2025-10-19 12:03:16 +02:00
Dmitry Popov
dab49df9aa Merge branch 'main' into copy-past-roles 2025-10-16 16:01:41 +02:00
Dmitry Popov
6286087f3e feat(Core): Added map roles settings for copy/paste 2025-10-16 16:01:12 +02:00
193 changed files with 11971 additions and 134911 deletions

View File

@@ -1,5 +1,7 @@
export WEB_APP_URL="http://localhost:8000"
export RELEASE_COOKIE="PDpbnyo6mEI_0T4ZsHH_ESmi1vT1toQ8PTc0vbfg5FIT4Ih-Lh98mw=="
# Erlang node name for distributed Erlang (optional - defaults to wanderer@hostname)
# export RELEASE_NODE="wanderer@localhost"
export EVE_CLIENT_ID="<EVE_CLIENT_ID>"
export EVE_CLIENT_SECRET="<EVE_CLIENT_SECRET>"
export EVE_CLIENT_WITH_WALLET_ID="<EVE_CLIENT_WITH_WALLET_ID>"

View File

@@ -2,6 +2,411 @@
<!-- changelog -->
## [v1.84.36](https://github.com/wanderer-industries/wanderer/compare/v1.84.35...v1.84.36) (2025-11-19)
### Bug Fixes:
* fixed duplicated map slugs
## [v1.84.35](https://github.com/wanderer-industries/wanderer/compare/v1.84.34...v1.84.35) (2025-11-19)
### Bug Fixes:
* structure search / paste issues
## [v1.84.34](https://github.com/wanderer-industries/wanderer/compare/v1.84.33...v1.84.34) (2025-11-18)
### Bug Fixes:
* core: fixed character tracking issues
## [v1.84.33](https://github.com/wanderer-industries/wanderer/compare/v1.84.32...v1.84.33) (2025-11-18)
### Bug Fixes:
* core: fixed character tracking issues
## [v1.84.32](https://github.com/wanderer-industries/wanderer/compare/v1.84.31...v1.84.32) (2025-11-18)
### Bug Fixes:
* core: fixed character tracking issues
## [v1.84.31](https://github.com/wanderer-industries/wanderer/compare/v1.84.30...v1.84.31) (2025-11-17)
### Bug Fixes:
* core: fixed connactions validation logic
## [v1.84.30](https://github.com/wanderer-industries/wanderer/compare/v1.84.29...v1.84.30) (2025-11-17)
## [v1.84.29](https://github.com/wanderer-industries/wanderer/compare/v1.84.28...v1.84.29) (2025-11-17)
## [v1.84.28](https://github.com/wanderer-industries/wanderer/compare/v1.84.27...v1.84.28) (2025-11-17)
### Bug Fixes:
* core: fixed ACL updates
## [v1.84.27](https://github.com/wanderer-industries/wanderer/compare/v1.84.26...v1.84.27) (2025-11-17)
### Bug Fixes:
* core: supported characters_updates for external events
* core: improved character tracking
* core: improved character tracking
* core: improved character location tracking
## [v1.84.26](https://github.com/wanderer-industries/wanderer/compare/v1.84.25...v1.84.26) (2025-11-16)
### Bug Fixes:
* core: disable character tracker pausing
## [v1.84.25](https://github.com/wanderer-industries/wanderer/compare/v1.84.24...v1.84.25) (2025-11-16)
### Bug Fixes:
* core: used upsert for adding map systems
## [v1.84.24](https://github.com/wanderer-industries/wanderer/compare/v1.84.23...v1.84.24) (2025-11-15)
### Bug Fixes:
* Map: Fixed problem related with error if settings was removed and mapper crashed. Fixed settings reset.
## [v1.84.23](https://github.com/wanderer-industries/wanderer/compare/v1.84.22...v1.84.23) (2025-11-15)
### Bug Fixes:
* core: fixed map pings cancel errors
## [v1.84.22](https://github.com/wanderer-industries/wanderer/compare/v1.84.21...v1.84.22) (2025-11-15)
### Bug Fixes:
* core: fixed map initialization
## [v1.84.21](https://github.com/wanderer-industries/wanderer/compare/v1.84.20...v1.84.21) (2025-11-15)
### Bug Fixes:
* core: fixed map characters adding
## [v1.84.20](https://github.com/wanderer-industries/wanderer/compare/v1.84.19...v1.84.20) (2025-11-15)
### Bug Fixes:
* core: fixed map start issues
## [v1.84.19](https://github.com/wanderer-industries/wanderer/compare/v1.84.18...v1.84.19) (2025-11-14)
### Bug Fixes:
* core: fixed map start issues
## [v1.84.18](https://github.com/wanderer-industries/wanderer/compare/v1.84.17...v1.84.18) (2025-11-14)
### Bug Fixes:
* core: added gracefull map poll recovery from saved state. added map slug unique checks
## [v1.84.17](https://github.com/wanderer-industries/wanderer/compare/v1.84.16...v1.84.17) (2025-11-14)
### Bug Fixes:
* core: fixed activity tracking issues
## [v1.84.16](https://github.com/wanderer-industries/wanderer/compare/v1.84.15...v1.84.16) (2025-11-13)
### Bug Fixes:
* core: removed maps auto-start logic
## [v1.84.15](https://github.com/wanderer-industries/wanderer/compare/v1.84.14...v1.84.15) (2025-11-13)
### Bug Fixes:
* core: fixed maps start/stop logic, added server downtime period support
## [v1.84.14](https://github.com/wanderer-industries/wanderer/compare/v1.84.13...v1.84.14) (2025-11-13)
### Bug Fixes:
* Map: Fixed problem related with error if settings was removed and mapper crashed. Fixed settings reset.
## [v1.84.13](https://github.com/wanderer-industries/wanderer/compare/v1.84.12...v1.84.13) (2025-11-13)
## [v1.84.12](https://github.com/wanderer-industries/wanderer/compare/v1.84.11...v1.84.12) (2025-11-13)
## [v1.84.11](https://github.com/wanderer-industries/wanderer/compare/v1.84.10...v1.84.11) (2025-11-12)
### Bug Fixes:
* api and doc updates
## [v1.84.10](https://github.com/wanderer-industries/wanderer/compare/v1.84.9...v1.84.10) (2025-11-12)
### Bug Fixes:
* core: Fixed adding system on character dock
## [v1.84.9](https://github.com/wanderer-industries/wanderer/compare/v1.84.8...v1.84.9) (2025-11-12)
## [v1.84.8](https://github.com/wanderer-industries/wanderer/compare/v1.84.7...v1.84.8) (2025-11-12)
### Bug Fixes:
* core: added cleanup jobs for old system signatures & chain passages
## [v1.84.7](https://github.com/wanderer-industries/wanderer/compare/v1.84.6...v1.84.7) (2025-11-12)
### Bug Fixes:
* api and structure search fixes
## [v1.84.6](https://github.com/wanderer-industries/wanderer/compare/v1.84.5...v1.84.6) (2025-11-12)
### Bug Fixes:
* core: Added map slug uniqness checking while using API
## [v1.84.5](https://github.com/wanderer-industries/wanderer/compare/v1.84.4...v1.84.5) (2025-11-11)
### Bug Fixes:
* core: Added tracking for map & character event handling errors
## [v1.84.4](https://github.com/wanderer-industries/wanderer/compare/v1.84.3...v1.84.4) (2025-11-11)
### Bug Fixes:
* core: fixed issue with updating system signatures
## [v1.84.3](https://github.com/wanderer-industries/wanderer/compare/v1.84.2...v1.84.3) (2025-11-11)
### Bug Fixes:
* core: fixed linked signature time status update
## [v1.84.2](https://github.com/wanderer-industries/wanderer/compare/v1.84.1...v1.84.2) (2025-11-10)
### Bug Fixes:
* api: fixed api for get/update map systems
* add index for map/systems api
## [v1.84.1](https://github.com/wanderer-industries/wanderer/compare/v1.84.0...v1.84.1) (2025-11-01)
### Bug Fixes:
* Core: Fixed connection time status update issue
## [v1.84.0](https://github.com/wanderer-industries/wanderer/compare/v1.83.4...v1.84.0) (2025-10-29)
### Features:
* Core: ESI API rate limits support
## [v1.83.4](https://github.com/wanderer-industries/wanderer/compare/v1.83.3...v1.83.4) (2025-10-29)
### Bug Fixes:
* Core: Fixed page reloads
## [v1.83.3](https://github.com/wanderer-industries/wanderer/compare/v1.83.2...v1.83.3) (2025-10-27)
### Bug Fixes:
* Core: Fixed old map API for systems & added small QOL improvements
## [v1.83.2](https://github.com/wanderer-industries/wanderer/compare/v1.83.1...v1.83.2) (2025-10-22)
### Bug Fixes:
* Connections: Set new connection time status based on to/from system class
## [v1.83.1](https://github.com/wanderer-industries/wanderer/compare/v1.83.0...v1.83.1) (2025-10-21)
### Bug Fixes:
* Kills: Fixed zkb links (added following '/').
## [v1.83.0](https://github.com/wanderer-industries/wanderer/compare/v1.82.3...v1.83.0) (2025-10-21)
### Features:
* Core: Added map roles settings for copy/paste
* Core: Added map roles settings for copy/paste
### Bug Fixes:
* Map: Copy-Paste restriction: support from FE side - fixed problem with incorrect disabling copy and paste buttons
* Map: Copy-Paste restriction: support from FE side - removed unnecessary constant
* Map: Copy-Paste restriction: support from FE side
* Core: Added Eve data downloaded files cleanup logic
## [v1.82.3](https://github.com/wanderer-industries/wanderer/compare/v1.82.2...v1.82.3) (2025-10-21)
### Bug Fixes:
* Map: Fix system static info - add source region for U319 from Null-sec
## [v1.82.2](https://github.com/wanderer-industries/wanderer/compare/v1.82.1...v1.82.2) (2025-10-21)
### Bug Fixes:
* Map: Fix system static info - for J012635 add D382; for J015092 - changed from J244, Z060 to N110, J244; for J000487 removed C008
## [v1.82.1](https://github.com/wanderer-industries/wanderer/compare/v1.82.0...v1.82.1) (2025-10-20)
### Bug Fixes:
* Core: Fixed 'viewer' map access & characters tracking
## [v1.82.0](https://github.com/wanderer-industries/wanderer/compare/v1.81.15...v1.82.0) (2025-10-15)

View File

@@ -30,7 +30,7 @@ format f:
mix format
test t:
mix test
MIX_ENV=test mix test
coverage cover co:
mix test --cover
@@ -45,4 +45,3 @@ versions v:
@cat .tool-versions
@cat Aptfile
@echo

View File

@@ -73,7 +73,9 @@ body > div:first-of-type {
}
.maps_bg {
background-image: url('../images/maps_bg.webp');
/* OLD image */
/* background-image: url('../images/maps_bg.webp'); */
background-image: url('https://wanderer-industries.github.io/wanderer-assets/images/eve-screen-catalyst-expansion-bg.jpg');
background-size: cover;
background-position: center;
width: 100%;

View File

@@ -51,20 +51,8 @@ export const Characters = ({ data }: CharactersProps) => {
['border-lime-600/70']: character.online,
},
)}
title={character.tracking_paused ? `${character.name} - Tracking Paused (click to resume)` : character.name}
title={character.name}
>
{character.tracking_paused && (
<>
<span
className={clsx(
'absolute flex flex-col p-[2px] top-[0px] left-[0px] w-[35px] h-[35px]',
'text-yellow-500 text-[9px] z-10 bg-gray-800/40',
'pi',
PrimeIcons.PAUSE,
)}
/>
</>
)}
{mainCharacterEveId === character.eve_id && (
<span
className={clsx(

View File

@@ -118,7 +118,11 @@ export const useContextMenuSystemItems = ({
});
if (isShowPingBtn) {
return <WdMenuItem icon={iconClasses}>{!hasPing ? 'Ping: RALLY' : 'Cancel: RALLY'}</WdMenuItem>;
return (
<WdMenuItem icon={iconClasses} className="!ml-[-2px]">
{!hasPing ? 'Ping: RALLY' : 'Cancel: RALLY'}
</WdMenuItem>
);
}
return (
@@ -126,7 +130,7 @@ export const useContextMenuSystemItems = ({
infoTitle="Locked. Ping can be set only for one system."
infoClass="pi-lock text-stone-500 mr-[12px]"
>
<WdMenuItem disabled icon={iconClasses}>
<WdMenuItem disabled icon={iconClasses} className="!ml-[-2px]">
{!hasPing ? 'Ping: RALLY' : 'Cancel: RALLY'}
</WdMenuItem>
</MenuItemWithInfo>

View File

@@ -2,6 +2,10 @@ import React, { RefObject, useMemo } from 'react';
import { ContextMenu } from 'primereact/contextmenu';
import { PrimeIcons } from 'primereact/api';
import { MenuItem } from 'primereact/menuitem';
import { checkPermissions } from '@/hooks/Mapper/components/map/helpers';
import { useMapRootState } from '@/hooks/Mapper/mapRootProvider';
import { MenuItemWithInfo, WdMenuItem } from '@/hooks/Mapper/components/ui-kit';
import clsx from 'clsx';
export interface ContextMenuSystemMultipleProps {
contextMenuRef: RefObject<ContextMenu>;
@@ -14,20 +18,44 @@ export const ContextMenuSystemMultiple: React.FC<ContextMenuSystemMultipleProps>
onDeleteSystems,
onCopySystems,
}) => {
const {
data: { options, userPermissions },
} = useMapRootState();
const items: MenuItem[] = useMemo(() => {
const allowCopy = checkPermissions(userPermissions, options.allowed_copy_for);
return [
{
label: 'Delete',
icon: clsx(PrimeIcons.TRASH, 'text-red-400'),
command: onDeleteSystems,
},
{ separator: true },
{
label: 'Copy',
icon: PrimeIcons.COPY,
command: onCopySystems,
},
{
label: 'Delete',
icon: PrimeIcons.TRASH,
command: onDeleteSystems,
disabled: !allowCopy,
template: () => {
if (allowCopy) {
return <WdMenuItem icon="pi pi-copy">Copy</WdMenuItem>;
}
return (
<MenuItemWithInfo
infoTitle="Action is blocked because you dont have permission to Copy."
infoClass={clsx(PrimeIcons.QUESTION_CIRCLE, 'text-stone-500 mr-[12px]')}
tooltipWrapperClassName="flex"
>
<WdMenuItem disabled icon="pi pi-copy">
Copy
</WdMenuItem>
</MenuItemWithInfo>
);
},
},
];
}, [onCopySystems, onDeleteSystems]);
}, [onCopySystems, onDeleteSystems, options, userPermissions]);
return (
<>

View File

@@ -1,10 +1,10 @@
import { useCallback, useRef } from 'react';
import { LayoutEventBlocker, TooltipPosition, WdImageSize, WdImgButton } from '@/hooks/Mapper/components/ui-kit';
import { ANOIK_ICON, DOTLAN_ICON, ZKB_ICON } from '@/hooks/Mapper/icons';
import { useCallback, useRef } from 'react';
import classes from './FastSystemActions.module.scss';
import clsx from 'clsx';
import { PrimeIcons } from 'primereact/api';
import classes from './FastSystemActions.module.scss';
export interface FastSystemActionsProps {
systemId: string;
@@ -27,7 +27,7 @@ export const FastSystemActions = ({
ref.current = { systemId, systemName, regionName, isWH };
const handleOpenZKB = useCallback(
() => window.open(`https://zkillboard.com/system/${ref.current.systemId}`, '_blank'),
() => window.open(`https://zkillboard.com/system/${ref.current.systemId}/`, '_blank'),
[],
);

View File

@@ -3,6 +3,10 @@ import { ContextMenu } from 'primereact/contextmenu';
import { PrimeIcons } from 'primereact/api';
import { MenuItem } from 'primereact/menuitem';
import { PasteSystemsAndConnections } from '@/hooks/Mapper/components/map/components';
import { useMapState } from '@/hooks/Mapper/components/map/MapProvider.tsx';
import { checkPermissions } from '@/hooks/Mapper/components/map/helpers';
import { MenuItemWithInfo, WdMenuItem } from '@/hooks/Mapper/components/ui-kit';
import clsx from 'clsx';
export interface ContextMenuRootProps {
contextMenuRef: RefObject<ContextMenu>;
@@ -17,7 +21,13 @@ export const ContextMenuRoot: React.FC<ContextMenuRootProps> = ({
onPasteSystemsAnsConnections,
pasteSystemsAndConnections,
}) => {
const {
data: { options, userPermissions },
} = useMapState();
const items: MenuItem[] = useMemo(() => {
const allowPaste = checkPermissions(userPermissions, options.allowed_paste_for);
return [
{
label: 'Add System',
@@ -27,14 +37,35 @@ export const ContextMenuRoot: React.FC<ContextMenuRootProps> = ({
...(pasteSystemsAndConnections != null
? [
{
label: 'Paste',
icon: 'pi pi-clipboard',
disabled: !allowPaste,
command: onPasteSystemsAnsConnections,
template: () => {
if (allowPaste) {
return (
<WdMenuItem icon="pi pi-clipboard">
Paste
</WdMenuItem>
);
}
return (
<MenuItemWithInfo
infoTitle="Action is blocked because you dont have permission to Paste."
infoClass={clsx(PrimeIcons.QUESTION_CIRCLE, 'text-stone-500 mr-[12px]')}
tooltipWrapperClassName="flex"
>
<WdMenuItem disabled icon="pi pi-clipboard">
Paste
</WdMenuItem>
</MenuItemWithInfo>
);
},
},
]
: []),
];
}, [onAddSystem, onPasteSystemsAnsConnections, pasteSystemsAndConnections]);
}, [userPermissions, options, onAddSystem, pasteSystemsAndConnections, onPasteSystemsAnsConnections]);
return (
<>

View File

@@ -1,6 +1,6 @@
@use "sass:color";
@use '@/hooks/Mapper/components/map/styles/eve-common-variables';
@import '@/hooks/Mapper/components/map/styles/solar-system-node';
@use '@/hooks/Mapper/components/map/styles/solar-system-node' as v;
@keyframes move-stripes {
from {
@@ -26,8 +26,8 @@
background-color: var(--rf-node-bg-color, #202020) !important;
color: var(--rf-text-color, #ffffff);
box-shadow: 0 0 5px rgba($dark-bg, 0.5);
border: 1px solid color.adjust($pastel-blue, $lightness: -10%);
box-shadow: 0 0 5px rgba(v.$dark-bg, 0.5);
border: 1px solid color.adjust(v.$pastel-blue, $lightness: -10%);
border-radius: 5px;
position: relative;
z-index: 3;
@@ -99,7 +99,7 @@
}
&.selected {
border-color: $pastel-pink;
border-color: v.$pastel-pink;
box-shadow: 0 0 10px #9a1af1c2;
}
@@ -113,11 +113,11 @@
bottom: 0;
z-index: -1;
border-color: $neon-color-1;
border-color: v.$neon-color-1;
background: repeating-linear-gradient(
45deg,
$neon-color-3 0px,
$neon-color-3 8px,
v.$neon-color-3 0px,
v.$neon-color-3 8px,
transparent 8px,
transparent 21px
);
@@ -146,7 +146,7 @@
border: 1px solid var(--eve-solar-system-status-color-lookingFor-dark15);
background-image: linear-gradient(275deg, #45ff8f2f, #457fff2f);
&.selected {
border-color: $pastel-pink;
border-color: v.$pastel-pink;
}
}
@@ -347,13 +347,13 @@
.Handle {
min-width: initial;
min-height: initial;
border: 1px solid $pastel-blue;
border: 1px solid v.$pastel-blue;
width: 5px;
height: 5px;
pointer-events: auto;
&.selected {
border-color: $pastel-pink;
border-color: v.$pastel-pink;
}
&.HandleTop {

View File

@@ -0,0 +1,5 @@
import { UserPermission, UserPermissions } from '@/hooks/Mapper/types';
export const checkPermissions = (permissions: Partial<UserPermissions>, targetPermission: UserPermission) => {
return targetPermission != null && permissions[targetPermission];
};

View File

@@ -4,3 +4,4 @@ export * from './getSystemClassStyles';
export * from './getShapeClass';
export * from './getBackgroundClass';
export * from './prepareUnsplashedChunks';
export * from './checkPermissions';

View File

@@ -14,8 +14,27 @@ export const useCommandsCharacters = () => {
const ref = useRef({ update });
ref.current = { update };
const charactersUpdated = useCallback((characters: CommandCharactersUpdated) => {
ref.current.update(() => ({ characters: characters.slice() }));
const charactersUpdated = useCallback((updatedCharacters: CommandCharactersUpdated) => {
ref.current.update(state => {
const existing = state.characters ?? [];
// Put updatedCharacters into a map keyed by ID
const updatedMap = new Map(updatedCharacters.map(c => [c.eve_id, c]));
// 1. Update existing characters when possible
const merged = existing.map(character => {
const updated = updatedMap.get(character.eve_id);
if (updated) {
updatedMap.delete(character.eve_id); // Mark as processed
return { ...character, ...updated };
}
return character;
});
// 2. Any remaining items in updatedMap are NEW characters → add them
const newCharacters = Array.from(updatedMap.values());
return { characters: [...merged, ...newCharacters] };
});
}, []);
const characterAdded = useCallback((value: CommandCharacterAdded) => {

View File

@@ -38,6 +38,8 @@ export const useMapInit = () => {
user_characters,
present_characters,
hubs,
options,
user_permissions,
}: CommandInit) => {
const { update } = ref.current;
@@ -63,6 +65,14 @@ export const useMapInit = () => {
updateData.hubs = hubs;
}
if (options) {
updateData.options = options;
}
if (options) {
updateData.userPermissions = user_permissions;
}
if (systems) {
updateData.systems = systems;
}

View File

@@ -49,87 +49,91 @@ export const useMapHandlers = (ref: ForwardedRef<MapHandlers>, onSelectionChange
const { charactersUpdated, presentCharacters, characterAdded, characterRemoved, characterUpdated } =
useCommandsCharacters();
useImperativeHandle(ref, () => {
return {
command(type, data) {
switch (type) {
case Commands.init:
mapInit(data as CommandInit);
break;
case Commands.addSystems:
setTimeout(() => mapAddSystems(data as CommandAddSystems), 100);
break;
case Commands.updateSystems:
mapUpdateSystems(data as CommandUpdateSystems);
break;
case Commands.removeSystems:
setTimeout(() => removeSystems(data as CommandRemoveSystems), 100);
break;
case Commands.addConnections:
setTimeout(() => addConnections(data as CommandAddConnections), 100);
break;
case Commands.removeConnections:
setTimeout(() => removeConnections(data as CommandRemoveConnections), 100);
break;
case Commands.charactersUpdated:
charactersUpdated(data as CommandCharactersUpdated);
break;
case Commands.characterAdded:
characterAdded(data as CommandCharacterAdded);
break;
case Commands.characterRemoved:
characterRemoved(data as CommandCharacterRemoved);
break;
case Commands.characterUpdated:
characterUpdated(data as CommandCharacterUpdated);
break;
case Commands.presentCharacters:
presentCharacters(data as CommandPresentCharacters);
break;
case Commands.updateConnection:
updateConnection(data as CommandUpdateConnection);
break;
case Commands.mapUpdated:
mapUpdated(data as CommandMapUpdated);
break;
case Commands.killsUpdated:
killsUpdated(data as CommandKillsUpdated);
break;
useImperativeHandle(
ref,
() => {
return {
command(type, data) {
switch (type) {
case Commands.init:
mapInit(data as CommandInit);
break;
case Commands.addSystems:
setTimeout(() => mapAddSystems(data as CommandAddSystems), 100);
break;
case Commands.updateSystems:
mapUpdateSystems(data as CommandUpdateSystems);
break;
case Commands.removeSystems:
setTimeout(() => removeSystems(data as CommandRemoveSystems), 100);
break;
case Commands.addConnections:
setTimeout(() => addConnections(data as CommandAddConnections), 100);
break;
case Commands.removeConnections:
setTimeout(() => removeConnections(data as CommandRemoveConnections), 100);
break;
case Commands.charactersUpdated:
charactersUpdated(data as CommandCharactersUpdated);
break;
case Commands.characterAdded:
characterAdded(data as CommandCharacterAdded);
break;
case Commands.characterRemoved:
characterRemoved(data as CommandCharacterRemoved);
break;
case Commands.characterUpdated:
characterUpdated(data as CommandCharacterUpdated);
break;
case Commands.presentCharacters:
presentCharacters(data as CommandPresentCharacters);
break;
case Commands.updateConnection:
updateConnection(data as CommandUpdateConnection);
break;
case Commands.mapUpdated:
mapUpdated(data as CommandMapUpdated);
break;
case Commands.killsUpdated:
killsUpdated(data as CommandKillsUpdated);
break;
case Commands.centerSystem:
setTimeout(() => {
const systemId = `${data}`;
centerSystem(systemId as CommandSelectSystem);
}, 100);
break;
case Commands.centerSystem:
setTimeout(() => {
const systemId = `${data}`;
centerSystem(systemId as CommandSelectSystem);
}, 100);
break;
case Commands.selectSystem:
selectSystems({ systems: [data as string], delay: 500 });
break;
case Commands.selectSystem:
selectSystems({ systems: [data as string], delay: 500 });
break;
case Commands.selectSystems:
selectSystems(data as CommandSelectSystems);
break;
case Commands.selectSystems:
selectSystems(data as CommandSelectSystems);
break;
case Commands.pingAdded:
case Commands.pingCancelled:
case Commands.routes:
case Commands.signaturesUpdated:
case Commands.linkSignatureToSystem:
case Commands.detailedKillsUpdated:
case Commands.characterActivityData:
case Commands.trackingCharactersData:
case Commands.updateActivity:
case Commands.updateTracking:
case Commands.userSettingsUpdated:
// do nothing
break;
case Commands.pingAdded:
case Commands.pingCancelled:
case Commands.routes:
case Commands.signaturesUpdated:
case Commands.linkSignatureToSystem:
case Commands.detailedKillsUpdated:
case Commands.characterActivityData:
case Commands.trackingCharactersData:
case Commands.updateActivity:
case Commands.updateTracking:
case Commands.userSettingsUpdated:
// do nothing
break;
default:
console.warn(`Map handlers: Unknown command: ${type}`, data);
break;
}
},
};
}, []);
default:
console.warn(`Map handlers: Unknown command: ${type}`, data);
break;
}
},
};
},
[],
);
};

View File

@@ -4,10 +4,13 @@ import { DEFAULT_WIDGETS } from '@/hooks/Mapper/components/mapInterface/constant
import { useMapRootState } from '@/hooks/Mapper/mapRootProvider';
export const MapInterface = () => {
// const [items, setItems] = useState<WindowProps[]>(restoreWindowsFromLS);
const { windowsSettings, updateWidgetSettings } = useMapRootState();
const items = useMemo(() => {
if (Object.keys(windowsSettings).length === 0) {
return [];
}
return windowsSettings.windows
.map(x => {
const content = DEFAULT_WIDGETS.find(y => y.id === x.id)?.content;

View File

@@ -1,12 +1,12 @@
import { Widget } from '@/hooks/Mapper/components/mapInterface/components';
import { useMapRootState } from '@/hooks/Mapper/mapRootProvider';
import { SystemSettingsDialog } from '@/hooks/Mapper/components/mapInterface/components/SystemSettingsDialog/SystemSettingsDialog.tsx';
import { LayoutEventBlocker, SystemView, TooltipPosition, WdImgButton } from '@/hooks/Mapper/components/ui-kit';
import { SystemInfoContent } from './SystemInfoContent';
import { ANOIK_ICON, DOTLAN_ICON, ZKB_ICON } from '@/hooks/Mapper/icons';
import { useMapRootState } from '@/hooks/Mapper/mapRootProvider';
import { getSystemStaticInfo } from '@/hooks/Mapper/mapRootProvider/hooks/useLoadSystemStatic';
import { PrimeIcons } from 'primereact/api';
import { useCallback, useState } from 'react';
import { SystemSettingsDialog } from '@/hooks/Mapper/components/mapInterface/components/SystemSettingsDialog/SystemSettingsDialog.tsx';
import { ANOIK_ICON, DOTLAN_ICON, ZKB_ICON } from '@/hooks/Mapper/icons';
import { getSystemStaticInfo } from '@/hooks/Mapper/mapRootProvider/hooks/useLoadSystemStatic';
import { SystemInfoContent } from './SystemInfoContent';
export const SystemInfo = () => {
const [visible, setVisible] = useState(false);
@@ -48,7 +48,7 @@ export const SystemInfo = () => {
</div>
<LayoutEventBlocker className="flex gap-1 items-center">
<a href={`https://zkillboard.com/system/${systemId}`} rel="noreferrer" target="_blank">
<a href={`https://zkillboard.com/system/${systemId}/`} rel="noreferrer" target="_blank">
<img src={ZKB_ICON} width="14" height="14" className="external-icon" />
</a>
<a href={`http://anoik.is/systems/${solarSystemName}`} rel="noreferrer" target="_blank">

View File

@@ -30,10 +30,14 @@ export const SystemStructures: React.FC = () => {
const processClipboard = useCallback(
(text: string) => {
if (!systemId) {
console.warn('Cannot update structures: no system selected');
return;
}
const updated = processSnippetText(text, structures);
handleUpdateStructures(updated);
},
[structures, handleUpdateStructures],
[systemId, structures, handleUpdateStructures],
);
const handlePaste = useCallback(

View File

@@ -30,9 +30,6 @@ export const SystemStructuresDialog: React.FC<StructuresEditDialogProps> = ({
const { outCommand } = useMapRootState();
const [prevQuery, setPrevQuery] = useState('');
const [prevResults, setPrevResults] = useState<{ label: string; value: string }[]>([]);
useEffect(() => {
if (structure) {
setEditData(structure);
@@ -46,34 +43,24 @@ export const SystemStructuresDialog: React.FC<StructuresEditDialogProps> = ({
// Searching corporation owners via auto-complete
const searchOwners = useCallback(
async (e: { query: string }) => {
const newQuery = e.query.trim();
if (!newQuery) {
const query = e.query.trim();
if (!query) {
setOwnerSuggestions([]);
return;
}
// If user typed more text but we have partial match in prevResults
if (newQuery.startsWith(prevQuery) && prevResults.length > 0) {
const filtered = prevResults.filter(item => item.label.toLowerCase().includes(newQuery.toLowerCase()));
setOwnerSuggestions(filtered);
return;
}
try {
// TODO fix it
const { results = [] } = await outCommand({
type: OutCommand.getCorporationNames,
data: { search: newQuery },
data: { search: query },
});
setOwnerSuggestions(results);
setPrevQuery(newQuery);
setPrevResults(results);
} catch (err) {
console.error('Failed to fetch owners:', err);
setOwnerSuggestions([]);
}
},
[prevQuery, prevResults, outCommand],
[outCommand],
);
const handleChange = (field: keyof StructureItem, val: string | Date) => {
@@ -122,7 +109,6 @@ export const SystemStructuresDialog: React.FC<StructuresEditDialogProps> = ({
// fetch corporation ticker if we have an ownerId
if (editData.ownerId) {
try {
// TODO fix it
const { ticker } = await outCommand({
type: OutCommand.getCorporationTicker,
data: { corp_id: editData.ownerId },

View File

@@ -56,6 +56,11 @@ export function useSystemStructures({ systemId, outCommand }: UseSystemStructure
const handleUpdateStructures = useCallback(
async (newList: StructureItem[]) => {
if (!systemId) {
console.warn('Cannot update structures: systemId is undefined');
return;
}
const { added, updated, removed } = getActualStructures(structures, newList);
const sanitizedAdded = added.map(sanitizeIds);

View File

@@ -10,9 +10,14 @@ import { useCallback } from 'react';
import { TooltipPosition, WdButton, WdTooltipWrapper } from '@/hooks/Mapper/components/ui-kit';
import { ConfirmPopup } from 'primereact/confirmpopup';
import { useConfirmPopup } from '@/hooks/Mapper/hooks';
import { useMapRootState } from '@/hooks/Mapper/mapRootProvider';
export const CommonSettings = () => {
const { renderSettingItem } = useMapSettings();
const {
storedSettings: { resetSettings },
} = useMapRootState();
const { cfShow, cfHide, cfVisible, cfRef } = useConfirmPopup();
const renderSettingsList = useCallback(
@@ -22,7 +27,7 @@ export const CommonSettings = () => {
[renderSettingItem],
);
const handleResetSettings = () => {};
const handleResetSettings = useCallback(() => resetSettings(), [resetSettings]);
return (
<div className="flex flex-col h-full gap-1">

View File

@@ -1,6 +1,6 @@
@use "sass:color";
@use '@/hooks/Mapper/components/map/styles/eve-common-variables';
@import '@/hooks/Mapper/components/map/styles/solar-system-node';
@use '@/hooks/Mapper/components/map/styles/solar-system-node' as v;
:root {
--rf-has-user-characters: #ffc75d;
@@ -108,7 +108,7 @@
}
&.selected {
border-color: $pastel-pink;
border-color: v.$pastel-pink;
box-shadow: 0 0 10px #9a1af1c2;
}
@@ -122,11 +122,11 @@
bottom: 0;
z-index: -1;
border-color: $neon-color-1;
border-color: v.$neon-color-1;
background: repeating-linear-gradient(
45deg,
$neon-color-3 0px,
$neon-color-3 8px,
v.$neon-color-3 0px,
v.$neon-color-3 8px,
transparent 8px,
transparent 21px
);
@@ -152,7 +152,7 @@
&.eve-system-status-lookingFor {
background-image: linear-gradient(275deg, #45ff8f2f, #457fff2f);
&.selected {
border-color: $pastel-pink;
border-color: v.$pastel-pink;
}
}

View File

@@ -4,8 +4,17 @@ import { WdTooltipWrapper } from '@/hooks/Mapper/components/ui-kit/WdTooltipWrap
import { TooltipPosition } from '@/hooks/Mapper/components/ui-kit/WdTooltip';
import clsx from 'clsx';
type MenuItemWithInfoProps = { infoTitle: ReactNode; infoClass?: string } & WithChildren;
export const MenuItemWithInfo = ({ children, infoClass, infoTitle }: MenuItemWithInfoProps) => {
type MenuItemWithInfoProps = {
infoTitle: ReactNode;
infoClass?: string;
tooltipWrapperClassName?: string;
} & WithChildren;
export const MenuItemWithInfo = ({
children,
infoClass,
infoTitle,
tooltipWrapperClassName,
}: MenuItemWithInfoProps) => {
return (
<div className="flex justify-between w-full h-full items-center">
{children}
@@ -13,6 +22,7 @@ export const MenuItemWithInfo = ({ children, infoClass, infoTitle }: MenuItemWit
content={infoTitle}
position={TooltipPosition.top}
className="!opacity-100 !pointer-events-auto"
wrapperClassName={tooltipWrapperClassName}
>
<div className={clsx('pi text-orange-400', infoClass)} />
</WdTooltipWrapper>

View File

@@ -1,13 +1,18 @@
import { WithChildren } from '@/hooks/Mapper/types/common.ts';
import { WithChildren, WithClassName } from '@/hooks/Mapper/types/common.ts';
import clsx from 'clsx';
type WdMenuItemProps = { icon?: string; disabled?: boolean } & WithChildren;
export const WdMenuItem = ({ children, icon, disabled }: WdMenuItemProps) => {
type WdMenuItemProps = { icon?: string; disabled?: boolean } & WithChildren & WithClassName;
export const WdMenuItem = ({ children, icon, disabled, className }: WdMenuItemProps) => {
return (
<a
className={clsx('flex gap-[6px] w-full h-full items-center px-[12px] !py-0 ml-[-2px]', 'p-menuitem-link', {
'p-disabled': disabled,
})}
className={clsx(
'flex gap-[6px] w-full h-full items-center px-[12px] !py-0',
'p-menuitem-link',
{
'p-disabled': disabled,
},
className,
)}
>
{icon && <div className={clsx('min-w-[20px]', icon)}></div>}
<div className="w-full">{children}</div>

View File

@@ -10,6 +10,7 @@ export type WdTooltipWrapperProps = {
interactive?: boolean;
smallPaddings?: boolean;
tooltipClassName?: string;
wrapperClassName?: string;
} & Omit<HTMLProps<HTMLDivElement>, 'content' | 'size'> &
Omit<TooltipProps, 'content'>;
@@ -26,6 +27,7 @@ export const WdTooltipWrapper = forwardRef<WdTooltipHandlers, WdTooltipWrapperPr
smallPaddings,
size,
tooltipClassName,
wrapperClassName,
...props
},
forwardedRef,
@@ -36,7 +38,7 @@ export const WdTooltipWrapper = forwardRef<WdTooltipHandlers, WdTooltipWrapperPr
return (
<div className={clsx(classes.WdTooltipWrapperRoot, className)} {...props}>
{targetSelector ? <>{children}</> : <div className={autoClass}>{children}</div>}
{targetSelector ? <>{children}</> : <div className={clsx(autoClass, wrapperClassName)}>{children}</div>}
<WdTooltip
ref={forwardedRef}

View File

@@ -1,12 +1,5 @@
import { PingsPlacement } from '@/hooks/Mapper/mapRootProvider/types.ts';
export enum SESSION_KEY {
viewPort = 'viewPort',
windows = 'windows',
windowsVisible = 'windowsVisible',
routes = 'routes',
}
export const SYSTEM_FOCUSED_LIFETIME = 10000;
export const GRADIENT_MENU_ACTIVE_CLASSES = 'bg-gradient-to-br from-transparent/10 to-fuchsia-300/10';

View File

@@ -6,9 +6,11 @@ import {
MapUnionTypes,
OutCommandHandler,
SolarSystemConnection,
StringBoolean,
TrackingCharacter,
UseCharactersCacheData,
UseCommentsData,
UserPermission,
} from '@/hooks/Mapper/types';
import { useCharactersCache, useComments, useMapRootHandlers } from '@/hooks/Mapper/mapRootProvider/hooks';
import { WithChildren } from '@/hooks/Mapper/types/common.ts';
@@ -80,7 +82,16 @@ const INITIAL_DATA: MapRootData = {
selectedSystems: [],
selectedConnections: [],
userPermissions: {},
options: {},
options: {
allowed_copy_for: UserPermission.VIEW_SYSTEM,
allowed_paste_for: UserPermission.VIEW_SYSTEM,
layout: '',
restrict_offline_showing: 'false',
show_linked_signature_id: 'false',
show_linked_signature_id_temp_name: 'false',
show_temp_system_name: 'false',
store_custom_labels: 'false',
},
isSubscriptionActive: false,
linkSignatureToSystem: null,
mainCharacterEveId: null,
@@ -135,7 +146,7 @@ export interface MapRootContextProps {
hasOldSettings: boolean;
getSettingsForExport(): string | undefined;
applySettings(settings: MapUserSettings): boolean;
resetSettings(settings: MapUserSettings): void;
resetSettings(): void;
checkOldSettings(): void;
};
}

View File

@@ -1,5 +1,4 @@
import { useMapRootState } from '@/hooks/Mapper/mapRootProvider';
import { useCallback, useRef } from 'react';
import {
CommandCharacterAdded,
CommandCharacterRemoved,
@@ -7,6 +6,7 @@ import {
CommandCharacterUpdated,
CommandPresentCharacters,
} from '@/hooks/Mapper/types';
import { useCallback, useRef } from 'react';
export const useCommandsCharacters = () => {
const { update } = useMapRootState();
@@ -14,8 +14,27 @@ export const useCommandsCharacters = () => {
const ref = useRef({ update });
ref.current = { update };
const charactersUpdated = useCallback((characters: CommandCharactersUpdated) => {
ref.current.update(() => ({ characters: characters.slice() }));
const charactersUpdated = useCallback((updatedCharacters: CommandCharactersUpdated) => {
ref.current.update(state => {
const existing = state.characters ?? [];
// Put updatedCharacters into a map keyed by ID
const updatedMap = new Map(updatedCharacters.map(c => [c.eve_id, c]));
// 1. Update existing characters when possible
const merged = existing.map(character => {
const updated = updatedMap.get(character.eve_id);
if (updated) {
updatedMap.delete(character.eve_id); // Mark as processed
return { ...character, ...updated };
}
return character;
});
// 2. Any remaining items in updatedMap are NEW characters → add them
const newCharacters = Array.from(updatedMap.values());
return { characters: [...merged, ...newCharacters] };
});
}, []);
const characterAdded = useCallback((value: CommandCharacterAdded) => {

View File

@@ -148,10 +148,6 @@ export const useMapUserSettings = ({ map_slug }: MapRootData, outCommand: OutCom
setHasOldSettings(!!(widgetsOld || interfaceSettings || widgetRoutes || widgetLocal || widgetKills || onTheMapOld));
}, []);
useEffect(() => {
checkOldSettings();
}, [checkOldSettings]);
const getSettingsForExport = useCallback(() => {
const { map_slug } = ref.current;
@@ -166,6 +162,24 @@ export const useMapUserSettings = ({ map_slug }: MapRootData, outCommand: OutCom
applySettings(createDefaultStoredSettings());
}, [applySettings]);
useEffect(() => {
checkOldSettings();
}, [checkOldSettings]);
// IN Case if in runtime someone clear settings
useEffect(() => {
if (Object.keys(windowsSettings).length !== 0) {
return;
}
if (!isReady) {
return;
}
resetSettings();
location.reload();
}, [isReady, resetSettings, windowsSettings]);
return {
isReady,
hasOldSettings,

View File

@@ -33,7 +33,6 @@ export type CharacterTypeRaw = {
corporation_id: number;
corporation_name: string;
corporation_ticker: string;
tracking_paused: boolean;
};
export interface TrackingCharacter {

View File

@@ -9,3 +9,4 @@ export * from './connectionPassages';
export * from './permissions';
export * from './comment';
export * from './ping';
export * from './options';

View File

@@ -1,4 +1,4 @@
import { CommentType, PingData, SystemSignature, UserPermissions } from '@/hooks/Mapper/types';
import { CommentType, MapOptions, PingData, SystemSignature, UserPermissions } from '@/hooks/Mapper/types';
import { ActivitySummary, CharacterTypeRaw, TrackingCharacter } from '@/hooks/Mapper/types/character.ts';
import { SolarSystemConnection } from '@/hooks/Mapper/types/connection.ts';
import { DetailedKill, Kill } from '@/hooks/Mapper/types/kills.ts';
@@ -94,7 +94,7 @@ export type CommandInit = {
hubs: string[];
user_hubs: string[];
routes: RoutesList;
options: Record<string, string | boolean>;
options: MapOptions;
reset?: boolean;
is_subscription_active?: boolean;
main_character_eve_id?: string | null;

View File

@@ -4,7 +4,7 @@ import { CharacterTypeRaw } from '@/hooks/Mapper/types/character.ts';
import { SolarSystemRawType } from '@/hooks/Mapper/types/system.ts';
import { RoutesList } from '@/hooks/Mapper/types/routes.ts';
import { SolarSystemConnection } from '@/hooks/Mapper/types/connection.ts';
import { PingData, UserPermissions } from '@/hooks/Mapper/types';
import { MapOptions, PingData, UserPermissions } from '@/hooks/Mapper/types';
import { SystemSignature } from '@/hooks/Mapper/types/signatures';
export type MapUnionTypes = {
@@ -23,7 +23,7 @@ export type MapUnionTypes = {
kills: Record<number, number>;
connections: SolarSystemConnection[];
userPermissions: Partial<UserPermissions>;
options: Record<string, string | boolean>;
options: MapOptions;
isSubscriptionActive: boolean;
mainCharacterEveId: string | null;

View File

@@ -0,0 +1,14 @@
import { UserPermission } from '@/hooks/Mapper/types/permissions.ts';
export type StringBoolean = 'true' | 'false';
export type MapOptions = {
allowed_copy_for: UserPermission;
allowed_paste_for: UserPermission;
layout: string;
restrict_offline_showing: StringBoolean;
show_linked_signature_id: StringBoolean;
show_linked_signature_id_temp_name: StringBoolean;
show_temp_system_name: StringBoolean;
store_custom_labels: StringBoolean;
};

View File

@@ -12,11 +12,11 @@ const animateBg = function (bgCanvas) {
*/
const randomInRange = (max, min) => Math.floor(Math.random() * (max - min + 1)) + min;
const BASE_SIZE = 1;
const VELOCITY_INC = 1.01;
const VELOCITY_INC = 1.002;
const VELOCITY_INIT_INC = 0.525;
const JUMP_VELOCITY_INC = 0.55;
const JUMP_SIZE_INC = 1.15;
const SIZE_INC = 1.01;
const SIZE_INC = 1.002;
const RAD = Math.PI / 180;
const WARP_COLORS = [
[197, 239, 247],

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

View File

@@ -25,13 +25,9 @@ config :wanderer_app,
ecto_repos: [WandererApp.Repo],
ash_domains: [WandererApp.Api],
generators: [timestamp_type: :utc_datetime],
ddrt: DDRT,
ddrt: WandererApp.Map.CacheRTree,
logger: Logger,
pubsub_client: Phoenix.PubSub,
wanderer_kills_base_url:
System.get_env("WANDERER_KILLS_BASE_URL", "ws://host.docker.internal:4004"),
wanderer_kills_service_enabled:
System.get_env("WANDERER_KILLS_SERVICE_ENABLED", "false") == "true"
pubsub_client: Phoenix.PubSub
config :wanderer_app, WandererAppWeb.Endpoint,
adapter: Bandit.PhoenixAdapter,

View File

@@ -4,7 +4,7 @@ import Config
config :wanderer_app, WandererApp.Repo,
username: "postgres",
password: "postgres",
hostname: System.get_env("DB_HOST", "localhost"),
hostname: "localhost",
database: "wanderer_dev",
stacktrace: true,
show_sensitive_data_on_connection_error: true,

View File

@@ -258,7 +258,9 @@ config :wanderer_app, WandererApp.Scheduler,
timezone: :utc,
jobs:
[
{"@daily", {WandererApp.Map.Audit, :archive, []}}
{"@daily", {WandererApp.Map.Audit, :archive, []}},
{"@daily", {WandererApp.Map.GarbageCollector, :cleanup_chain_passages, []}},
{"@daily", {WandererApp.Map.GarbageCollector, :cleanup_system_signatures, []}}
] ++ sheduler_jobs,
timeout: :infinity

View File

@@ -1,7 +1,25 @@
defmodule WandererApp.Api.Changes.SlugifyName do
@moduledoc """
Ensures map slugs are unique by:
1. Slugifying the provided slug/name
2. Checking for existing slugs (optimization)
3. Finding next available slug with numeric suffix if needed
4. Relying on database unique constraint as final arbiter
Race Condition Mitigation:
- Optimistic check reduces DB roundtrips for most cases
- Database unique index ensures no duplicates slip through
- Proper error messages for constraint violations
- Telemetry events for monitoring conflicts
"""
use Ash.Resource.Change
alias Ash.Changeset
require Ash.Query
require Logger
# Maximum number of attempts to find a unique slug
@max_attempts 100
@impl true
@spec change(Changeset.t(), keyword, Change.context()) :: Changeset.t()
@@ -12,10 +30,95 @@ defmodule WandererApp.Api.Changes.SlugifyName do
defp maybe_slugify_name(changeset) do
case Changeset.get_attribute(changeset, :slug) do
slug when is_binary(slug) ->
Changeset.force_change_attribute(changeset, :slug, Slug.slugify(slug))
base_slug = Slug.slugify(slug)
unique_slug = ensure_unique_slug(changeset, base_slug)
Changeset.force_change_attribute(changeset, :slug, unique_slug)
_ ->
changeset
end
end
defp ensure_unique_slug(changeset, base_slug) do
# Get the current record ID if this is an update operation
current_id = Changeset.get_attribute(changeset, :id)
# Check if the base slug is available (optimization to avoid numeric suffixes when possible)
if slug_available?(base_slug, current_id) do
base_slug
else
# Find the next available slug with a numeric suffix
find_available_slug(base_slug, current_id, 2)
end
end
defp find_available_slug(base_slug, current_id, n) when n <= @max_attempts do
candidate_slug = "#{base_slug}-#{n}"
if slug_available?(candidate_slug, current_id) do
# Emit telemetry when we had to use a suffix (indicates potential conflict)
:telemetry.execute(
[:wanderer_app, :map, :slug_suffix_used],
%{suffix_number: n},
%{base_slug: base_slug, final_slug: candidate_slug}
)
candidate_slug
else
find_available_slug(base_slug, current_id, n + 1)
end
end
defp find_available_slug(base_slug, _current_id, n) when n > @max_attempts do
# Fallback: use timestamp suffix if we've tried too many numeric suffixes
# This handles edge cases where many maps have similar names
timestamp = System.system_time(:millisecond)
fallback_slug = "#{base_slug}-#{timestamp}"
Logger.warning(
"Slug generation exceeded #{@max_attempts} attempts for '#{base_slug}', using timestamp fallback",
base_slug: base_slug,
fallback_slug: fallback_slug
)
:telemetry.execute(
[:wanderer_app, :map, :slug_fallback_used],
%{attempts: n},
%{base_slug: base_slug, fallback_slug: fallback_slug}
)
fallback_slug
end
defp slug_available?(slug, current_id) do
query =
WandererApp.Api.Map
|> Ash.Query.filter(slug == ^slug)
|> then(fn query ->
# Exclude the current record if this is an update
if current_id do
Ash.Query.filter(query, id != ^current_id)
else
query
end
end)
|> Ash.Query.limit(1)
case Ash.read(query) do
{:ok, []} ->
true
{:ok, _existing} ->
false
{:error, error} ->
# Log error but be conservative - assume slug is not available
Logger.warning("Error checking slug availability",
slug: slug,
error: inspect(error)
)
false
end
end
end

View File

@@ -30,14 +30,14 @@ defmodule WandererApp.Api.Map do
# Routes configuration
routes do
base("/maps")
get(:read)
index :read
get(:by_slug, route: "/:slug")
# index :read
post(:new)
patch(:update)
delete(:destroy)
# Custom action for map duplication
post(:duplicate, route: "/:id/duplicate")
# post(:duplicate, route: "/:id/duplicate")
end
end

View File

@@ -9,6 +9,11 @@ defmodule WandererApp.Api.MapConnection do
postgres do
repo(WandererApp.Repo)
table("map_chain_v1")
custom_indexes do
# Critical index for list_connections query performance
index [:map_id], name: "map_chain_v1_map_id_index"
end
end
json_api do

View File

@@ -65,7 +65,7 @@ defmodule WandererApp.Api.MapSubscription do
defaults [:create, :read, :update, :destroy]
read :all_active do
prepare build(sort: [updated_at: :asc])
prepare build(sort: [updated_at: :asc], load: [:map])
filter(expr(status == :active))
end

View File

@@ -1,6 +1,26 @@
defmodule WandererApp.Api.MapSystem do
@moduledoc false
@derive {Jason.Encoder,
only: [
:id,
:map_id,
:name,
:solar_system_id,
:position_x,
:position_y,
:status,
:visible,
:locked,
:custom_name,
:description,
:tag,
:temporary_name,
:labels,
:added_at,
:linked_sig_eve_id
]}
use Ash.Resource,
domain: WandererApp.Api,
data_layer: AshPostgres.DataLayer,
@@ -9,6 +29,11 @@ defmodule WandererApp.Api.MapSystem do
postgres do
repo(WandererApp.Repo)
table("map_system_v1")
custom_indexes do
# Partial index for efficient visible systems query
index [:map_id], where: "visible = true", name: "map_system_v1_map_id_visible_index"
end
end
json_api do
@@ -16,6 +41,17 @@ defmodule WandererApp.Api.MapSystem do
includes([:map])
default_fields([
:name,
:solar_system_id,
:status,
:custom_name,
:description,
:tag,
:temporary_name,
:labels
])
derive_filter?(true)
derive_sort?(true)
@@ -31,6 +67,7 @@ defmodule WandererApp.Api.MapSystem do
code_interface do
define(:create, action: :create)
define(:upsert, action: :upsert)
define(:destroy, action: :destroy)
define(:by_id,
@@ -93,6 +130,31 @@ defmodule WandererApp.Api.MapSystem do
defaults [:create, :update, :destroy]
create :upsert do
primary? false
upsert? true
upsert_identity :map_solar_system_id
# Update these fields on conflict
upsert_fields [
:position_x,
:position_y,
:visible,
:name
]
accept [
:map_id,
:solar_system_id,
:name,
:position_x,
:position_y,
:visible,
:locked,
:status
]
end
read :read do
primary?(true)

View File

@@ -38,7 +38,12 @@ defmodule WandererApp.Application do
),
Supervisor.child_spec({Cachex, name: :ship_types_cache}, id: :ship_types_cache_worker),
Supervisor.child_spec({Cachex, name: :character_cache}, id: :character_cache_worker),
Supervisor.child_spec({Cachex, name: :acl_cache}, id: :acl_cache_worker),
Supervisor.child_spec({Cachex, name: :map_cache}, id: :map_cache_worker),
Supervisor.child_spec({Cachex, name: :map_pool_cache},
id: :map_pool_cache_worker
),
Supervisor.child_spec({Cachex, name: :map_state_cache}, id: :map_state_cache_worker),
Supervisor.child_spec({Cachex, name: :character_state_cache},
id: :character_state_cache_worker
),
@@ -48,10 +53,7 @@ defmodule WandererApp.Application do
Supervisor.child_spec({Cachex, name: :wanderer_app_cache},
id: :wanderer_app_cache_worker
),
{Registry, keys: :unique, name: WandererApp.MapRegistry},
{Registry, keys: :unique, name: WandererApp.Character.TrackerRegistry},
{PartitionSupervisor,
child_spec: DynamicSupervisor, name: WandererApp.Map.DynamicSupervisors},
{PartitionSupervisor,
child_spec: DynamicSupervisor, name: WandererApp.Character.DynamicSupervisors},
WandererAppWeb.PresenceGracePeriodManager,
@@ -78,6 +80,7 @@ defmodule WandererApp.Application do
WandererApp.Server.ServerStatusTracker,
WandererApp.Server.TheraDataFetcher,
{WandererApp.Character.TrackerPoolSupervisor, []},
{WandererApp.Map.MapPoolSupervisor, []},
WandererApp.Character.TrackerManager,
WandererApp.Map.Manager
] ++ security_audit_children

View File

@@ -73,6 +73,54 @@ defmodule WandererApp.Cache do
def filter_by_attr_in(type, attr, includes), do: type |> get() |> filter_in(attr, includes)
@doc """
Batch lookup multiple keys from cache.
Returns a map of key => value pairs, with `default` used for missing keys.
"""
def lookup_all(keys, default \\ nil) when is_list(keys) do
# Get all values from cache
values = get_all(keys)
# Build result map with defaults for missing keys
result =
keys
|> Enum.map(fn key ->
value = Map.get(values, key, default)
{key, value}
end)
|> Map.new()
{:ok, result}
end
@doc """
Batch insert multiple key-value pairs into cache.
Accepts a map of key => value pairs or a list of {key, value} tuples.
Skips nil values (deletes the key instead).
"""
def insert_all(entries, opts \\ [])
def insert_all(entries, opts) when is_map(entries) do
# Filter out nil values and delete those keys
{to_delete, to_insert} =
entries
|> Enum.split_with(fn {_key, value} -> is_nil(value) end)
# Delete keys with nil values
Enum.each(to_delete, fn {key, _} -> delete(key) end)
# Insert non-nil values
unless Enum.empty?(to_insert) do
put_all(to_insert, opts)
end
:ok
end
def insert_all(entries, opts) when is_list(entries) do
insert_all(Map.new(entries), opts)
end
defp find(list, %{} = attrs, match: match) do
list
|> Enum.find(fn item ->

View File

@@ -1,6 +1,8 @@
defmodule WandererApp.CachedInfo do
require Logger
alias WandererAppWeb.Helpers.APIUtils
def run(_arg) do
:ok = cache_trig_systems()
end
@@ -37,6 +39,8 @@ defmodule WandererApp.CachedInfo do
end
def get_system_static_info(solar_system_id) do
{:ok, solar_system_id} = APIUtils.parse_int(solar_system_id)
case Cachex.get(:system_static_info_cache, solar_system_id) do
{:ok, nil} ->
case WandererApp.Api.MapSolarSystem.read() do
@@ -116,7 +120,7 @@ defmodule WandererApp.CachedInfo do
def get_solar_system_jumps() do
case WandererApp.Cache.lookup(:solar_system_jumps) do
{:ok, nil} ->
data = WandererApp.EveDataService.get_solar_system_jumps_data()
{:ok, data} = WandererApp.Api.MapSolarSystemJumps.read()
cache_items(data, :solar_system_jumps)

View File

@@ -4,6 +4,8 @@ defmodule WandererApp.Character do
require Logger
alias WandererApp.Cache
@read_character_wallet_scope "esi-wallet.read_character_wallet.v1"
@read_corp_wallet_scope "esi-wallet.read_corporation_wallets.v1"
@@ -16,6 +18,9 @@ defmodule WandererApp.Character do
ship_item_id: nil
}
@present_on_map_ttl :timer.seconds(10)
@not_present_on_map_ttl :timer.minutes(2)
def get_by_eve_id(character_eve_id) when is_binary(character_eve_id) do
WandererApp.Api.Character.by_eve_id(character_eve_id)
end
@@ -28,7 +33,7 @@ defmodule WandererApp.Character do
Cachex.put(:character_cache, character_id, character)
{:ok, character}
error ->
_error ->
{:error, :not_found}
end
@@ -41,7 +46,7 @@ defmodule WandererApp.Character do
def get_character!(character_id) do
case get_character(character_id) do
{:ok, character} ->
{:ok, character} when not is_nil(character) ->
character
_ ->
@@ -50,16 +55,10 @@ defmodule WandererApp.Character do
end
end
def get_map_character(map_id, character_id, opts \\ []) do
def get_map_character(map_id, character_id) do
case get_character(character_id) do
{:ok, character} ->
# If we are forcing the character to not be present, we merge the character state with map settings
character_is_present =
if opts |> Keyword.get(:not_present, false) do
false
else
WandererApp.Character.TrackerManager.Impl.character_is_present(map_id, character_id)
end
{:ok, character} when not is_nil(character) ->
character_is_present = character_is_present?(map_id, character_id)
{:ok,
character
@@ -187,12 +186,16 @@ defmodule WandererApp.Character do
{:ok, result} ->
{:ok, result |> prepare_search_results()}
{:error, error} ->
Logger.warning("#{__MODULE__} failed search: #{inspect(error)}")
{:ok, []}
error ->
Logger.warning("#{__MODULE__} failed search: #{inspect(error)}")
{:ok, []}
end
error ->
_error ->
{:ok, []}
end
end
@@ -263,22 +266,26 @@ defmodule WandererApp.Character do
end
end
defp maybe_merge_map_character_settings(%{id: character_id} = character, _map_id, true) do
{:ok, tracking_paused} =
WandererApp.Cache.lookup("character:#{character_id}:tracking_paused", false)
@decorate cacheable(
cache: Cache,
key: "character-present-#{map_id}-#{character_id}",
opts: [ttl: @present_on_map_ttl]
)
defp character_is_present?(map_id, character_id),
do: WandererApp.Character.TrackerManager.Impl.character_is_present(map_id, character_id)
character
|> Map.merge(%{tracking_paused: tracking_paused})
end
defp maybe_merge_map_character_settings(character, _map_id, true), do: character
@decorate cacheable(
cache: Cache,
key: "not-present-map-character-#{map_id}-#{character_id}",
opts: [ttl: @not_present_on_map_ttl]
)
defp maybe_merge_map_character_settings(
%{id: character_id} = character,
map_id,
_character_is_present
false
) do
{:ok, tracking_paused} =
WandererApp.Cache.lookup("character:#{character_id}:tracking_paused", false)
WandererApp.MapCharacterSettingsRepo.get(map_id, character_id)
|> case do
{:ok, settings} when not is_nil(settings) ->
@@ -296,7 +303,7 @@ defmodule WandererApp.Character do
character
|> Map.merge(@default_character_tracking_data)
end
|> Map.merge(%{online: false, tracking_paused: tracking_paused})
|> Map.merge(%{online: false})
end
defp prepare_search_results(result) do

View File

@@ -14,8 +14,8 @@ defmodule WandererApp.Character.Tracker do
active_maps: [],
is_online: false,
track_online: true,
track_location: true,
track_ship: true,
track_location: false,
track_ship: false,
track_wallet: false,
status: "new"
]
@@ -36,14 +36,11 @@ defmodule WandererApp.Character.Tracker do
status: binary()
}
@pause_tracking_timeout :timer.minutes(60 * 10)
@offline_timeout :timer.minutes(5)
@online_error_timeout :timer.minutes(10)
@ship_error_timeout :timer.minutes(10)
@location_error_timeout :timer.minutes(10)
@location_error_timeout :timer.seconds(30)
@location_error_threshold 3
@online_forbidden_ttl :timer.seconds(7)
@offline_check_delay_ttl :timer.seconds(15)
@online_limit_ttl :timer.seconds(7)
@forbidden_ttl :timer.seconds(10)
@limit_ttl :timer.seconds(5)
@location_limit_ttl :timer.seconds(1)
@@ -93,81 +90,16 @@ defmodule WandererApp.Character.Tracker do
end
end
def check_online_errors(character_id),
do: check_tracking_errors(character_id, "online", @online_error_timeout)
def check_ship_errors(character_id),
do: check_tracking_errors(character_id, "ship", @ship_error_timeout)
def check_location_errors(character_id),
do: check_tracking_errors(character_id, "location", @location_error_timeout)
defp check_tracking_errors(character_id, type, timeout) do
WandererApp.Cache.lookup!("character:#{character_id}:#{type}_error_time")
|> case do
nil ->
:skip
error_time ->
duration = DateTime.diff(DateTime.utc_now(), error_time, :millisecond)
if duration >= timeout do
pause_tracking(character_id)
WandererApp.Cache.delete("character:#{character_id}:#{type}_error_time")
:ok
else
:skip
end
end
defp increment_location_error_count(character_id) do
cache_key = "character:#{character_id}:location_error_count"
current_count = WandererApp.Cache.lookup!(cache_key) || 0
new_count = current_count + 1
WandererApp.Cache.put(cache_key, new_count)
new_count
end
defp pause_tracking(character_id) do
if WandererApp.Character.can_pause_tracking?(character_id) &&
not WandererApp.Cache.has_key?("character:#{character_id}:tracking_paused") do
# Log character tracking statistics before pausing
Logger.debug(fn ->
{:ok, character_state} = WandererApp.Character.get_character_state(character_id)
"CHARACTER_TRACKING_PAUSED: Character tracking paused due to sustained errors: #{inspect(character_id: character_id,
active_maps: length(character_state.active_maps),
is_online: character_state.is_online,
tracking_duration_minutes: get_tracking_duration_minutes(character_id))}"
end)
WandererApp.Cache.delete("character:#{character_id}:online_forbidden")
WandererApp.Cache.delete("character:#{character_id}:online_error_time")
WandererApp.Cache.delete("character:#{character_id}:ship_error_time")
WandererApp.Cache.delete("character:#{character_id}:location_error_time")
WandererApp.Character.update_character(character_id, %{online: false})
WandererApp.Character.update_character_state(character_id, %{
is_online: false
})
# Original log kept for backward compatibility
Logger.warning("[CharacterTracker] paused for #{character_id}")
WandererApp.Cache.put(
"character:#{character_id}:tracking_paused",
true,
ttl: @pause_tracking_timeout
)
{:ok, %{solar_system_id: solar_system_id}} =
WandererApp.Character.get_character(character_id)
{:ok, %{active_maps: active_maps}} =
WandererApp.Character.get_character_state(character_id)
active_maps
|> Enum.each(fn map_id ->
WandererApp.Cache.put(
"map:#{map_id}:character:#{character_id}:start_solar_system_id",
solar_system_id
)
end)
end
defp reset_location_error_count(character_id) do
WandererApp.Cache.delete("character:#{character_id}:location_error_count")
end
def update_settings(character_id, track_settings) do
@@ -194,8 +126,7 @@ defmodule WandererApp.Character.Tracker do
case WandererApp.Character.get_character(character_id) do
{:ok, %{eve_id: eve_id, access_token: access_token, tracking_pool: tracking_pool}}
when not is_nil(access_token) ->
(WandererApp.Cache.has_key?("character:#{character_id}:online_forbidden") ||
WandererApp.Cache.has_key?("character:#{character_id}:tracking_paused"))
WandererApp.Cache.has_key?("character:#{character_id}:online_forbidden")
|> case do
true ->
{:error, :skipped}
@@ -224,9 +155,10 @@ defmodule WandererApp.Character.Tracker do
)
end
if online.online == true && online.online != is_online do
if online.online == true && not is_online do
WandererApp.Cache.delete("character:#{character_id}:ship_error_time")
WandererApp.Cache.delete("character:#{character_id}:location_error_time")
WandererApp.Cache.delete("character:#{character_id}:location_error_count")
WandererApp.Cache.delete("character:#{character_id}:info_forbidden")
WandererApp.Cache.delete("character:#{character_id}:ship_forbidden")
WandererApp.Cache.delete("character:#{character_id}:location_forbidden")
@@ -294,12 +226,6 @@ defmodule WandererApp.Character.Tracker do
{:error, :error_limited, headers} ->
reset_timeout = get_reset_timeout(headers)
reset_seconds =
Map.get(headers, "x-esi-error-limit-reset", ["unknown"]) |> List.first()
remaining =
Map.get(headers, "x-esi-error-limit-remain", ["unknown"]) |> List.first()
WandererApp.Cache.put(
"character:#{character_id}:online_forbidden",
true,
@@ -357,8 +283,7 @@ defmodule WandererApp.Character.Tracker do
defp get_reset_timeout(_headers, default_timeout), do: default_timeout
def update_info(character_id) do
(WandererApp.Cache.has_key?("character:#{character_id}:info_forbidden") ||
WandererApp.Cache.has_key?("character:#{character_id}:tracking_paused"))
WandererApp.Cache.has_key?("character:#{character_id}:info_forbidden")
|> case do
true ->
{:error, :skipped}
@@ -442,8 +367,7 @@ defmodule WandererApp.Character.Tracker do
{:ok, %{eve_id: eve_id, access_token: access_token, tracking_pool: tracking_pool}}
when not is_nil(access_token) ->
(WandererApp.Cache.has_key?("character:#{character_id}:online_forbidden") ||
WandererApp.Cache.has_key?("character:#{character_id}:ship_forbidden") ||
WandererApp.Cache.has_key?("character:#{character_id}:tracking_paused"))
WandererApp.Cache.has_key?("character:#{character_id}:ship_forbidden"))
|> case do
true ->
{:error, :skipped}
@@ -552,7 +476,7 @@ defmodule WandererApp.Character.Tracker do
case WandererApp.Character.get_character(character_id) do
{:ok, %{eve_id: eve_id, access_token: access_token, tracking_pool: tracking_pool}}
when not is_nil(access_token) ->
WandererApp.Cache.has_key?("character:#{character_id}:tracking_paused")
WandererApp.Cache.has_key?("character:#{character_id}:location_forbidden")
|> case do
true ->
{:error, :skipped}
@@ -565,19 +489,33 @@ defmodule WandererApp.Character.Tracker do
character_id: character_id
) do
{:ok, location} when is_map(location) and not is_struct(location) ->
reset_location_error_count(character_id)
WandererApp.Cache.delete("character:#{character_id}:location_error_time")
character_state
|> maybe_update_location(location)
:ok
{:error, error} when error in [:forbidden, :not_found, :timeout] ->
error_count = increment_location_error_count(character_id)
Logger.warning("ESI_ERROR: Character location tracking failed",
character_id: character_id,
tracking_pool: tracking_pool,
error_type: error,
error_count: error_count,
endpoint: "character_location"
)
if error_count >= @location_error_threshold do
WandererApp.Cache.put(
"character:#{character_id}:location_forbidden",
true,
ttl: @location_error_timeout
)
end
if is_nil(
WandererApp.Cache.lookup!("character:#{character_id}:location_error_time")
) do
@@ -601,13 +539,24 @@ defmodule WandererApp.Character.Tracker do
{:error, :error_limited}
{:error, error} ->
error_count = increment_location_error_count(character_id)
Logger.error("ESI_ERROR: Character location tracking failed: #{inspect(error)}",
character_id: character_id,
tracking_pool: tracking_pool,
error_type: error,
error_count: error_count,
endpoint: "character_location"
)
if error_count >= @location_error_threshold do
WandererApp.Cache.put(
"character:#{character_id}:location_forbidden",
true,
ttl: @location_error_timeout
)
end
if is_nil(
WandererApp.Cache.lookup!("character:#{character_id}:location_error_time")
) do
@@ -620,13 +569,24 @@ defmodule WandererApp.Character.Tracker do
{:error, :skipped}
_ ->
error_count = increment_location_error_count(character_id)
Logger.error("ESI_ERROR: Character location tracking failed - wrong response",
character_id: character_id,
tracking_pool: tracking_pool,
error_type: "wrong_response",
error_count: error_count,
endpoint: "character_location"
)
if error_count >= @location_error_threshold do
WandererApp.Cache.put(
"character:#{character_id}:location_forbidden",
true,
ttl: @location_error_timeout
)
end
if is_nil(
WandererApp.Cache.lookup!("character:#{character_id}:location_error_time")
) do
@@ -662,8 +622,7 @@ defmodule WandererApp.Character.Tracker do
|> case do
true ->
(WandererApp.Cache.has_key?("character:#{character_id}:online_forbidden") ||
WandererApp.Cache.has_key?("character:#{character_id}:wallet_forbidden") ||
WandererApp.Cache.has_key?("character:#{character_id}:tracking_paused"))
WandererApp.Cache.has_key?("character:#{character_id}:wallet_forbidden"))
|> case do
true ->
{:error, :skipped}
@@ -782,8 +741,7 @@ defmodule WandererApp.Character.Tracker do
alliance_id
)
when old_alliance_id != alliance_id do
(WandererApp.Cache.has_key?("character:#{character_id}:online_forbidden") ||
WandererApp.Cache.has_key?("character:#{character_id}:tracking_paused"))
WandererApp.Cache.has_key?("character:#{character_id}:online_forbidden")
|> case do
true ->
state
@@ -829,8 +787,7 @@ defmodule WandererApp.Character.Tracker do
)
when old_corporation_id != corporation_id do
(WandererApp.Cache.has_key?("character:#{character_id}:online_forbidden") ||
WandererApp.Cache.has_key?("character:#{character_id}:corporation_info_forbidden") ||
WandererApp.Cache.has_key?("character:#{character_id}:tracking_paused"))
WandererApp.Cache.has_key?("character:#{character_id}:corporation_info_forbidden"))
|> case do
true ->
state
@@ -1006,9 +963,7 @@ defmodule WandererApp.Character.Tracker do
),
do: %{
state
| track_online: true,
track_location: true,
track_ship: true
| track_online: true
}
defp maybe_start_online_tracking(
@@ -1052,11 +1007,6 @@ defmodule WandererApp.Character.Tracker do
DateTime.utc_now()
)
WandererApp.Cache.put(
"map:#{map_id}:character:#{character_id}:start_solar_system_id",
track_settings |> Map.get(:solar_system_id)
)
WandererApp.Cache.delete("map:#{map_id}:character:#{character_id}:solar_system_id")
WandererApp.Cache.delete("map:#{map_id}:character:#{character_id}:station_id")
WandererApp.Cache.delete("map:#{map_id}:character:#{character_id}:structure_id")
@@ -1107,7 +1057,7 @@ defmodule WandererApp.Character.Tracker do
)
end
state
%{state | track_location: false, track_ship: false}
end
defp maybe_stop_tracking(
@@ -1137,19 +1087,6 @@ defmodule WandererApp.Character.Tracker do
defp get_online(_), do: %{online: false}
defp get_tracking_duration_minutes(character_id) do
case WandererApp.Cache.lookup!("character:#{character_id}:map:*:tracking_start_time") do
nil ->
0
start_time when is_struct(start_time, DateTime) ->
DateTime.diff(DateTime.utc_now(), start_time, :minute)
_ ->
0
end
end
# Telemetry handler for database pool monitoring
def handle_pool_query(_event_name, measurements, metadata, _config) do
queue_time = measurements[:queue_time]

View File

@@ -14,8 +14,8 @@ defmodule WandererApp.Character.TrackerManager do
GenServer.start_link(__MODULE__, args, name: __MODULE__)
end
def start_tracking(character_id, opts \\ []),
do: GenServer.cast(__MODULE__, {&Impl.start_tracking/3, [character_id, opts]})
def start_tracking(character_id),
do: GenServer.cast(__MODULE__, {&Impl.start_tracking/2, [character_id]})
def stop_tracking(character_id),
do: GenServer.cast(__MODULE__, {&Impl.stop_tracking/2, [character_id]})

View File

@@ -40,13 +40,13 @@ defmodule WandererApp.Character.TrackerManager.Impl do
tracked_characters
|> Enum.each(fn character_id ->
start_tracking(state, character_id, %{})
start_tracking(state, character_id)
end)
state
end
def start_tracking(state, character_id, opts) do
def start_tracking(state, character_id) do
if not WandererApp.Cache.has_key?("#{character_id}:track_requested") do
WandererApp.Cache.insert(
"#{character_id}:track_requested",

View File

@@ -8,7 +8,7 @@ defmodule WandererApp.Character.TrackerPool do
:tracked_ids,
:uuid,
:characters,
server_online: true
server_online: false
]
@name __MODULE__
@@ -17,11 +17,8 @@ defmodule WandererApp.Character.TrackerPool do
@unique_registry :unique_tracker_pool_registry
@update_location_interval :timer.seconds(1)
@update_online_interval :timer.seconds(5)
@update_online_interval :timer.seconds(30)
@check_offline_characters_interval :timer.minutes(5)
@check_online_errors_interval :timer.minutes(1)
@check_ship_errors_interval :timer.minutes(1)
@check_location_errors_interval :timer.minutes(1)
@update_ship_interval :timer.seconds(2)
@update_info_interval :timer.minutes(2)
@update_wallet_interval :timer.minutes(10)
@@ -46,10 +43,6 @@ defmodule WandererApp.Character.TrackerPool do
{:ok, _} = Registry.register(@unique_registry, Module.concat(__MODULE__, uuid), tracked_ids)
{:ok, _} = Registry.register(@registry, __MODULE__, uuid)
# Cachex.get_and_update(@cache, :tracked_characters, fn ids ->
# {:commit, ids ++ tracked_ids}
# end)
tracked_ids
|> Enum.each(fn id ->
Cachex.put(@cache, id, uuid)
@@ -79,9 +72,6 @@ defmodule WandererApp.Character.TrackerPool do
[tracked_id | r_tracked_ids]
end)
# Cachex.get_and_update(@cache, :tracked_characters, fn ids ->
# {:commit, ids ++ [tracked_id]}
# end)
Cachex.put(@cache, tracked_id, uuid)
{:noreply, %{state | characters: [tracked_id | characters]}}
@@ -96,10 +86,6 @@ defmodule WandererApp.Character.TrackerPool do
r_tracked_ids |> Enum.reject(fn id -> id == tracked_id end)
end)
# Cachex.get_and_update(@cache, :tracked_characters, fn ids ->
# {:commit, ids |> Enum.reject(fn id -> id == tracked_id end)}
# end)
#
Cachex.del(@cache, tracked_id)
{:noreply, %{state | characters: characters |> Enum.reject(fn id -> id == tracked_id end)}}
@@ -121,13 +107,10 @@ defmodule WandererApp.Character.TrackerPool do
)
Process.send_after(self(), :update_online, 100)
Process.send_after(self(), :check_online_errors, :timer.seconds(60))
Process.send_after(self(), :check_ship_errors, :timer.seconds(90))
Process.send_after(self(), :check_location_errors, :timer.seconds(120))
Process.send_after(self(), :check_offline_characters, @check_offline_characters_interval)
Process.send_after(self(), :update_location, 300)
Process.send_after(self(), :update_ship, 500)
Process.send_after(self(), :update_info, 1500)
Process.send_after(self(), :check_offline_characters, @check_offline_characters_interval)
if WandererApp.Env.wallet_tracking_enabled?() do
Process.send_after(self(), :update_wallet, 1000)
@@ -191,6 +174,8 @@ defmodule WandererApp.Character.TrackerPool do
[Tracker Pool] update_online => exception: #{Exception.message(e)}
#{Exception.format_stacktrace(__STACKTRACE__)}
""")
ErrorTracker.report(e, __STACKTRACE__)
end
{:noreply, state}
@@ -259,126 +244,6 @@ defmodule WandererApp.Character.TrackerPool do
{:noreply, state}
end
def handle_info(
:check_online_errors,
%{
characters: characters
} =
state
) do
Process.send_after(self(), :check_online_errors, @check_online_errors_interval)
try do
characters
|> Task.async_stream(
fn character_id ->
WandererApp.TaskWrapper.start_link(
WandererApp.Character.Tracker,
:check_online_errors,
[
character_id
]
)
end,
timeout: :timer.seconds(15),
max_concurrency: System.schedulers_online() * 4,
on_timeout: :kill_task
)
|> Enum.each(fn
{:ok, _result} -> :ok
error -> @logger.error("Error in check_online_errors: #{inspect(error)}")
end)
rescue
e ->
Logger.error("""
[Tracker Pool] check_online_errors => exception: #{Exception.message(e)}
#{Exception.format_stacktrace(__STACKTRACE__)}
""")
end
{:noreply, state}
end
def handle_info(
:check_ship_errors,
%{
characters: characters
} =
state
) do
Process.send_after(self(), :check_ship_errors, @check_ship_errors_interval)
try do
characters
|> Task.async_stream(
fn character_id ->
WandererApp.TaskWrapper.start_link(
WandererApp.Character.Tracker,
:check_ship_errors,
[
character_id
]
)
end,
timeout: :timer.seconds(15),
max_concurrency: System.schedulers_online() * 4,
on_timeout: :kill_task
)
|> Enum.each(fn
{:ok, _result} -> :ok
error -> @logger.error("Error in check_ship_errors: #{inspect(error)}")
end)
rescue
e ->
Logger.error("""
[Tracker Pool] check_ship_errors => exception: #{Exception.message(e)}
#{Exception.format_stacktrace(__STACKTRACE__)}
""")
end
{:noreply, state}
end
def handle_info(
:check_location_errors,
%{
characters: characters
} =
state
) do
Process.send_after(self(), :check_location_errors, @check_location_errors_interval)
try do
characters
|> Task.async_stream(
fn character_id ->
WandererApp.TaskWrapper.start_link(
WandererApp.Character.Tracker,
:check_location_errors,
[
character_id
]
)
end,
timeout: :timer.seconds(15),
max_concurrency: System.schedulers_online() * 4,
on_timeout: :kill_task
)
|> Enum.each(fn
{:ok, _result} -> :ok
error -> @logger.error("Error in check_location_errors: #{inspect(error)}")
end)
rescue
e ->
Logger.error("""
[Tracker Pool] check_location_errors => exception: #{Exception.message(e)}
#{Exception.format_stacktrace(__STACKTRACE__)}
""")
end
{:noreply, state}
end
def handle_info(
:update_location,
%{
@@ -581,8 +446,4 @@ defmodule WandererApp.Character.TrackerPool do
Logger.debug("Failed to monitor message queue: #{inspect(error)}")
end
end
defp via_tuple(uuid) do
{:via, Registry, {@unique_registry, Module.concat(__MODULE__, uuid)}}
end
end

View File

@@ -50,14 +50,9 @@ defmodule WandererApp.Character.TrackerPoolDynamicSupervisor do
end
end
def is_not_tracked?(tracked_id) do
{:ok, tracked_ids} = Cachex.get(@cache, :tracked_characters)
tracked_ids |> Enum.member?(tracked_id) |> Kernel.not()
end
defp get_available_pool([]), do: nil
defp get_available_pool([{pid, uuid} | pools]) do
defp get_available_pool([{_pid, uuid} | pools]) do
case Registry.lookup(@unique_registry, Module.concat(WandererApp.Character.TrackerPool, uuid)) do
[] ->
nil
@@ -67,8 +62,8 @@ defmodule WandererApp.Character.TrackerPoolDynamicSupervisor do
nil ->
get_available_pool(pools)
pid ->
pid
pool_pid ->
pool_pid
end
end
end

View File

@@ -173,12 +173,11 @@ defmodule WandererApp.Character.TrackingUtils do
%{
id: character_id,
eve_id: eve_id
},
} = _character,
map_id,
is_track_allowed,
caller_pid
)
when not is_nil(caller_pid) do
) do
WandererAppWeb.Presence.update(caller_pid, map_id, character_id, %{
tracked: is_track_allowed,
from: DateTime.utc_now()
@@ -217,13 +216,16 @@ defmodule WandererApp.Character.TrackingUtils do
end
defp track_character(
_character,
character,
_map_id,
_is_track_allowed,
_caller_pid
) do
Logger.error("caller_pid is required for tracking characters")
{:error, "caller_pid is required"}
Logger.error(
"Invalid character data for tracking - character must have :id and :eve_id fields, got: #{inspect(character)}"
)
{:error, "Invalid character data"}
end
def untrack(characters, map_id, caller_pid) do
@@ -238,30 +240,14 @@ defmodule WandererApp.Character.TrackingUtils do
})
end)
# WandererApp.Map.Server.untrack_characters(map_id, character_ids)
:ok
else
true ->
Logger.error("caller_pid is required for untracking characters")
Logger.error("caller_pid is required for untracking characters 2")
{:error, "caller_pid is required"}
end
end
# def add_characters([], _map_id, _track_character), do: :ok
# def add_characters([character | characters], map_id, track_character) do
# :ok = WandererApp.Map.Server.add_character(map_id, character, track_character)
# add_characters(characters, map_id, track_character)
# end
# def remove_characters([], _map_id), do: :ok
# def remove_characters([character | characters], map_id) do
# :ok = WandererApp.Map.Server.remove_character(map_id, character.id)
# remove_characters(characters, map_id)
# end
def get_main_character(
nil,
current_user_characters,

View File

@@ -12,7 +12,7 @@ defmodule WandererApp.Character.TransactionsTracker.Impl do
total_balance: 0,
transactions: [],
retries: 5,
server_online: true,
server_online: false,
status: :started
]
@@ -75,7 +75,7 @@ defmodule WandererApp.Character.TransactionsTracker.Impl do
def handle_event(
:update_corp_wallets,
%{character: character} = state
%{character: character, server_online: true} = state
) do
Process.send_after(self(), :update_corp_wallets, @update_interval)
@@ -88,26 +88,26 @@ defmodule WandererApp.Character.TransactionsTracker.Impl do
:update_corp_wallets,
state
) do
Process.send_after(self(), :update_corp_wallets, :timer.seconds(15))
Process.send_after(self(), :update_corp_wallets, @update_interval)
state
end
def handle_event(
:check_wallets,
%{wallets: []} = state
%{character: character, wallets: wallets, server_online: true} = state
) do
Process.send_after(self(), :check_wallets, :timer.seconds(5))
Process.send_after(self(), :check_wallets, @update_interval)
state
end
def handle_event(
:check_wallets,
%{character: character, wallets: wallets} = state
) do
check_wallets(wallets, character)
state
end
def handle_event(
:check_wallets,
state
) do
Process.send_after(self(), :check_wallets, @update_interval)
state

View File

@@ -14,8 +14,6 @@ defmodule WandererApp.DatabaseSetup do
alias WandererApp.Repo
alias Ecto.Adapters.SQL
@test_db_name "wanderer_test"
@doc """
Sets up the test database from scratch.
Creates the database, runs migrations, and sets up initial data.

View File

@@ -21,7 +21,8 @@ defmodule WandererApp.Esi do
defdelegate get_character_location(character_eve_id, opts \\ []), to: WandererApp.Esi.ApiClient
defdelegate get_character_online(character_eve_id, opts \\ []), to: WandererApp.Esi.ApiClient
defdelegate get_character_ship(character_eve_id, opts \\ []), to: WandererApp.Esi.ApiClient
defdelegate find_routes(map_id, origin, hubs, routes_settings), to: WandererApp.Esi.ApiClient
defdelegate get_routes_custom(hubs, origin, params), to: WandererApp.Esi.ApiClient
defdelegate get_routes_eve(hubs, origin, params, opts), to: WandererApp.Esi.ApiClient
defdelegate search(character_eve_id, opts \\ []), to: WandererApp.Esi.ApiClient
defdelegate get_killmail(killmail_id, killmail_hash, opts \\ []), to: WandererApp.Esi.ApiClient

View File

@@ -6,35 +6,9 @@ defmodule WandererApp.Esi.ApiClient do
alias WandererApp.Cache
@ttl :timer.hours(1)
@routes_ttl :timer.minutes(15)
@base_url "https://esi.evetech.net/latest"
@wanderrer_user_agent "(wanderer-industries@proton.me; +https://github.com/wanderer-industries/wanderer)"
@req_esi Req.new(base_url: @base_url, finch: WandererApp.Finch)
@get_link_pairs_advanced_params [
:include_mass_crit,
:include_eol,
:include_frig
]
@default_routes_settings %{
path_type: "shortest",
include_mass_crit: true,
include_eol: false,
include_frig: true,
include_cruise: true,
avoid_wormholes: false,
avoid_pochven: false,
avoid_edencom: false,
avoid_triglavian: false,
include_thera: true,
avoid: []
}
@zarzakh_system 30_100_000
@default_avoid_systems [@zarzakh_system]
@req_esi_options [base_url: "https://esi.evetech.net", finch: WandererApp.Finch]
@cache_opts [cache: true]
@retry_opts [retry: false, retry_log_level: :warning]
@@ -43,11 +17,11 @@ defmodule WandererApp.Esi.ApiClient do
@logger Application.compile_env(:wanderer_app, :logger)
def get_server_status, do: get("/status")
def get_server_status, do: do_get("/status", [], @cache_opts)
def set_autopilot_waypoint(add_to_beginning, clear_other_waypoints, destination_id, opts \\ []),
do:
post_esi(
do_post_esi(
"/ui/autopilot/waypoint",
get_auth_opts(opts)
|> Keyword.merge(
@@ -62,7 +36,7 @@ defmodule WandererApp.Esi.ApiClient do
def post_characters_affiliation(character_eve_ids, _opts)
when is_list(character_eve_ids),
do:
post_esi(
do_post_esi(
"/characters/affiliation/",
json: character_eve_ids,
params: %{
@@ -70,168 +44,9 @@ defmodule WandererApp.Esi.ApiClient do
}
)
def find_routes(map_id, origin, hubs, routes_settings) do
origin = origin |> String.to_integer()
hubs = hubs |> Enum.map(&(&1 |> String.to_integer()))
routes_settings = @default_routes_settings |> Map.merge(routes_settings)
connections =
case routes_settings.avoid_wormholes do
false ->
map_chains =
routes_settings
|> Map.take(@get_link_pairs_advanced_params)
|> Map.put_new(:map_id, map_id)
|> WandererApp.Api.MapConnection.get_link_pairs_advanced!()
|> Enum.map(fn %{
solar_system_source: solar_system_source,
solar_system_target: solar_system_target
} ->
%{
first: solar_system_source,
second: solar_system_target
}
end)
|> Enum.uniq()
{:ok, thera_chains} =
case routes_settings.include_thera do
true ->
WandererApp.Server.TheraDataFetcher.get_chain_pairs(routes_settings)
false ->
{:ok, []}
end
chains = remove_intersection([map_chains | thera_chains] |> List.flatten())
chains =
case routes_settings.include_cruise do
false ->
{:ok, wh_class_a_systems} = WandererApp.CachedInfo.get_wh_class_a_systems()
chains
|> Enum.filter(fn x ->
not Enum.member?(wh_class_a_systems, x.first) and
not Enum.member?(wh_class_a_systems, x.second)
end)
_ ->
chains
end
chains
|> Enum.map(fn chain ->
["#{chain.first}|#{chain.second}", "#{chain.second}|#{chain.first}"]
end)
|> List.flatten()
true ->
[]
end
{:ok, trig_systems} = WandererApp.CachedInfo.get_trig_systems()
pochven_solar_systems =
trig_systems
|> Enum.filter(fn s -> s.triglavian_invasion_status == "Final" end)
|> Enum.map(& &1.solar_system_id)
triglavian_solar_systems =
trig_systems
|> Enum.filter(fn s -> s.triglavian_invasion_status == "Triglavian" end)
|> Enum.map(& &1.solar_system_id)
edencom_solar_systems =
trig_systems
|> Enum.filter(fn s -> s.triglavian_invasion_status == "Edencom" end)
|> Enum.map(& &1.solar_system_id)
avoidance_list =
case routes_settings.avoid_edencom do
true ->
edencom_solar_systems
false ->
[]
end
avoidance_list =
case routes_settings.avoid_triglavian do
true ->
[avoidance_list | triglavian_solar_systems]
false ->
avoidance_list
end
avoidance_list =
case routes_settings.avoid_pochven do
true ->
[avoidance_list | pochven_solar_systems]
false ->
avoidance_list
end
avoidance_list =
(@default_avoid_systems ++ [routes_settings.avoid | avoidance_list])
|> List.flatten()
|> Enum.uniq()
params =
%{
datasource: "tranquility",
flag: routes_settings.path_type,
connections: connections,
avoid: avoidance_list
}
{:ok, all_routes} = get_all_routes(hubs, origin, params)
routes =
all_routes
|> Enum.map(fn route_info ->
map_route_info(route_info)
end)
|> Enum.filter(fn route_info -> not is_nil(route_info) end)
{:ok, routes}
end
def get_all_routes(hubs, origin, params, opts \\ []) do
cache_key =
"routes-#{origin}-#{hubs |> Enum.join("-")}-#{:crypto.hash(:sha, :erlang.term_to_binary(params))}"
case WandererApp.Cache.lookup(cache_key) do
{:ok, result} when not is_nil(result) ->
{:ok, result}
_ ->
case get_all_routes_custom(hubs, origin, params) do
{:ok, result} ->
WandererApp.Cache.insert(
cache_key,
result,
ttl: @routes_ttl
)
{:ok, result}
{:error, _error} ->
@logger.error(
"Error getting custom routes for #{inspect(origin)}: #{inspect(params)}"
)
get_all_routes_eve(hubs, origin, params, opts)
end
end
end
defp get_all_routes_custom(hubs, origin, params),
def get_routes_custom(hubs, origin, params),
do:
post(
do_post(
"#{get_custom_route_base_url()}/route/multiple",
[
json: %{
@@ -245,13 +60,20 @@ defmodule WandererApp.Esi.ApiClient do
|> Keyword.merge(@timeout_opts)
)
def get_all_routes_eve(hubs, origin, params, opts),
def get_routes_eve(hubs, origin, params, opts),
do:
{:ok,
hubs
|> Task.async_stream(
fn destination ->
get_routes(origin, destination, params, opts)
%{
"origin" => origin,
"destination" => destination,
"systems" => [],
"success" => false
}
# do_get_routes_eve(origin, destination, params, opts)
end,
max_concurrency: System.schedulers_online() * 4,
timeout: :timer.seconds(30),
@@ -265,8 +87,19 @@ defmodule WandererApp.Esi.ApiClient do
end
end)}
def get_routes(origin, destination, params, opts) do
case _get_routes(origin, destination, params, opts) do
defp do_get_routes_eve(origin, destination, params, opts) do
esi_params =
Map.merge(params, %{
connections: params.connections |> Enum.join(","),
avoid: params.avoid |> Enum.join(",")
})
do_get(
"/route/#{origin}/#{destination}/?#{esi_params |> Plug.Conn.Query.encode()}",
opts,
@cache_opts
)
|> case do
{:ok, result} ->
%{
"origin" => origin,
@@ -299,9 +132,8 @@ defmodule WandererApp.Esi.ApiClient do
key: "killmail-#{killmail_id}-#{killmail_hash}",
opts: [ttl: @ttl]
)
def get_killmail(killmail_id, killmail_hash, opts \\ []) do
get("/killmails/#{killmail_id}/#{killmail_hash}/", opts, @cache_opts)
end
def get_killmail(killmail_id, killmail_hash, opts \\ []),
do: do_get("/killmails/#{killmail_id}/#{killmail_hash}/", opts, @cache_opts)
@decorate cacheable(
cache: Cache,
@@ -322,7 +154,7 @@ defmodule WandererApp.Esi.ApiClient do
opts: [ttl: @ttl]
)
def get_character_info(eve_id, opts \\ []) do
case get(
case do_get(
"/characters/#{eve_id}/",
opts,
@cache_opts
@@ -371,8 +203,19 @@ defmodule WandererApp.Esi.ApiClient do
do: get_character_auth_data(character_eve_id, "ship", opts ++ @cache_opts)
def search(character_eve_id, opts \\ []) do
search_val = to_string(opts[:params][:search] || "")
categories_val = to_string(opts[:params][:categories] || "character,alliance,corporation")
params = Keyword.get(opts, :params, %{}) |> Map.new()
search_val =
to_string(
Map.get(params, :search) || Map.get(params, "search") || ""
)
categories_val =
to_string(
Map.get(params, :categories) ||
Map.get(params, "categories") ||
"character,alliance,corporation"
)
query_params = [
{"search", search_val},
@@ -388,55 +231,18 @@ defmodule WandererApp.Esi.ApiClient do
@decorate cacheable(
cache: Cache,
key: "search-#{character_eve_id}-#{categories_val}-#{search_val |> Slug.slugify()}",
key: "search-#{character_eve_id}-#{categories_val}-#{Base.encode64(search_val)}",
opts: [ttl: @ttl]
)
defp get_search(character_eve_id, search_val, categories_val, merged_opts) do
get_character_auth_data(character_eve_id, "search", merged_opts)
end
defp remove_intersection(pairs_arr) do
tuples = pairs_arr |> Enum.map(fn x -> {x.first, x.second} end)
tuples
|> Enum.reduce([], fn {first, second} = x, acc ->
if Enum.member?(tuples, {second, first}) do
acc
else
[x | acc]
end
end)
|> Enum.uniq()
|> Enum.map(fn {first, second} ->
%{
first: first,
second: second
}
end)
end
defp _get_routes(origin, destination, params, opts),
do: get_routes_eve(origin, destination, params, opts)
defp get_routes_eve(origin, destination, params, opts) do
esi_params =
Map.merge(params, %{
connections: params.connections |> Enum.join(","),
avoid: params.avoid |> Enum.join(",")
})
get(
"/route/#{origin}/#{destination}/?#{esi_params |> Plug.Conn.Query.encode()}",
opts,
@cache_opts
)
end
defp get_auth_opts(opts), do: [auth: {:bearer, opts[:access_token]}]
defp get_alliance_info(alliance_eve_id, info_path, opts),
do:
get(
do_get(
"/alliances/#{alliance_eve_id}/#{info_path}",
opts,
@cache_opts
@@ -444,7 +250,7 @@ defmodule WandererApp.Esi.ApiClient do
defp get_corporation_info(corporation_eve_id, info_path, opts),
do:
get(
do_get(
"/corporations/#{corporation_eve_id}/#{info_path}",
opts,
@cache_opts
@@ -460,13 +266,13 @@ defmodule WandererApp.Esi.ApiClient do
character_id = opts |> Keyword.get(:character_id, nil)
if not is_access_token_expired?(character_id) do
get(
do_get(
path,
auth_opts,
opts |> with_refresh_token()
)
else
get_retry(path, auth_opts, opts |> with_refresh_token())
do_get_retry(path, auth_opts, opts |> with_refresh_token())
end
end
@@ -481,29 +287,26 @@ defmodule WandererApp.Esi.ApiClient do
defp get_corporation_auth_data(corporation_eve_id, info_path, opts),
do:
get(
do_get(
"/corporations/#{corporation_eve_id}/#{info_path}",
[params: opts[:params] || []] ++
(opts |> get_auth_opts()),
(opts |> with_refresh_token()) ++ @cache_opts
)
defp with_user_agent_opts(opts) do
opts
|> Keyword.merge(
headers: [{:user_agent, "Wanderer/#{WandererApp.Env.vsn()} #{@wanderrer_user_agent}"}]
)
end
defp with_user_agent_opts(opts),
do:
opts
|> Keyword.merge(
headers: [{:user_agent, "Wanderer/#{WandererApp.Env.vsn()} #{@wanderrer_user_agent}"}]
)
defp with_refresh_token(opts) do
opts |> Keyword.merge(refresh_token?: true)
end
defp with_refresh_token(opts), do: opts |> Keyword.merge(refresh_token?: true)
defp with_cache_opts(opts) do
opts |> Keyword.merge(@cache_opts) |> Keyword.merge(cache_dir: System.tmp_dir!())
end
defp with_cache_opts(opts),
do: opts |> Keyword.merge(@cache_opts) |> Keyword.merge(cache_dir: System.tmp_dir!())
defp get(path, api_opts \\ [], opts \\ []) do
defp do_get(path, api_opts \\ [], opts \\ []) do
case Cachex.get(:api_cache, path) do
{:ok, cached_data} when not is_nil(cached_data) ->
{:ok, cached_data}
@@ -515,15 +318,17 @@ defmodule WandererApp.Esi.ApiClient do
defp do_get_request(path, api_opts \\ [], opts \\ []) do
try do
case Req.get(
@req_esi,
api_opts
|> Keyword.merge(url: path)
|> with_user_agent_opts()
|> with_cache_opts()
|> Keyword.merge(@retry_opts)
|> Keyword.merge(@timeout_opts)
) do
@req_esi_options
|> Req.new()
|> Req.get(
api_opts
|> Keyword.merge(url: path)
|> with_user_agent_opts()
|> with_cache_opts()
|> Keyword.merge(@retry_opts)
|> Keyword.merge(@timeout_opts)
)
|> case do
{:ok, %{status: 200, body: body, headers: headers}} ->
maybe_cache_response(path, body, headers, opts)
@@ -537,8 +342,8 @@ defmodule WandererApp.Esi.ApiClient do
{:ok, %{status: 420, headers: headers} = _error} ->
# Extract rate limit information from headers
reset_seconds = Map.get(headers, "x-esi-error-limit-reset", ["unknown"]) |> List.first()
remaining = Map.get(headers, "x-esi-error-limit-remain", ["unknown"]) |> List.first()
reset_seconds = Map.get(headers, "x-esi-error-limit-reset", ["0"]) |> List.first()
remaining = Map.get(headers, "x-esi-error-limit-remain", ["0"]) |> List.first()
# Emit telemetry for rate limiting
:telemetry.execute(
@@ -568,10 +373,40 @@ defmodule WandererApp.Esi.ApiClient do
{:error, :error_limited, headers}
{:ok, %{status: status} = _error} when status in [401, 403] ->
get_retry(path, api_opts, opts)
{:ok, %{status: 429, headers: headers} = _error} ->
# Extract rate limit information from headers
reset_seconds = Map.get(headers, "retry-after", ["0"]) |> List.first()
{:ok, %{status: status}} ->
# Emit telemetry for rate limiting
:telemetry.execute(
[:wanderer_app, :esi, :rate_limited],
%{
count: 1,
reset_duration:
case Integer.parse(reset_seconds || "0") do
{seconds, _} -> seconds * 1000
_ -> 0
end
},
%{
method: "GET",
path: path,
reset_seconds: reset_seconds
}
)
Logger.warning("ESI_RATE_LIMITED: GET request rate limited",
method: "GET",
path: path,
reset_seconds: reset_seconds
)
{:error, :error_limited, headers}
{:ok, %{status: status} = _error} when status in [401, 403] ->
do_get_retry(path, api_opts, opts)
{:ok, %{status: status, headers: headers}} ->
{:error, "Unexpected status: #{status}"}
{:error, _reason} ->
@@ -585,7 +420,7 @@ defmodule WandererApp.Esi.ApiClient do
end
end
defp maybe_cache_response(path, body, %{"expires" => [expires]}, opts)
defp maybe_cache_response(path, body, %{"expires" => [expires]} = _headers, opts)
when is_binary(path) and not is_nil(expires) do
try do
if opts |> Keyword.get(:cache, false) do
@@ -609,7 +444,7 @@ defmodule WandererApp.Esi.ApiClient do
defp maybe_cache_response(_path, _body, _headers, _opts), do: :ok
defp post(url, opts) do
defp do_post(url, opts) do
try do
case Req.post("#{url}", opts |> with_user_agent_opts()) do
{:ok, %{status: status, body: body}} when status in [200, 201] ->
@@ -623,8 +458,8 @@ defmodule WandererApp.Esi.ApiClient do
{:ok, %{status: 420, headers: headers} = _error} ->
# Extract rate limit information from headers
reset_seconds = Map.get(headers, "x-esi-error-limit-reset", ["unknown"]) |> List.first()
remaining = Map.get(headers, "x-esi-error-limit-remain", ["unknown"]) |> List.first()
reset_seconds = Map.get(headers, "x-esi-error-limit-reset", ["0"]) |> List.first()
remaining = Map.get(headers, "x-esi-error-limit-remain", ["0"]) |> List.first()
# Emit telemetry for rate limiting
:telemetry.execute(
@@ -668,16 +503,13 @@ defmodule WandererApp.Esi.ApiClient do
end
end
defp post_esi(url, opts) do
defp do_post_esi(url, opts) do
try do
req_opts =
(opts |> with_user_agent_opts() |> Keyword.merge(@retry_opts)) ++
[params: opts[:params] || []]
Req.new(
[base_url: @base_url, finch: WandererApp.Finch] ++
req_opts
)
Req.new(@req_esi_options ++ req_opts)
|> Req.post(url: url)
|> case do
{:ok, %{status: status, body: body}} when status in [200, 201] ->
@@ -691,8 +523,8 @@ defmodule WandererApp.Esi.ApiClient do
{:ok, %{status: 420, headers: headers} = _error} ->
# Extract rate limit information from headers
reset_seconds = Map.get(headers, "x-esi-error-limit-reset", ["unknown"]) |> List.first()
remaining = Map.get(headers, "x-esi-error-limit-remain", ["unknown"]) |> List.first()
reset_seconds = Map.get(headers, "x-esi-error-limit-reset", ["0"]) |> List.first()
remaining = Map.get(headers, "x-esi-error-limit-remain", ["0"]) |> List.first()
# Emit telemetry for rate limiting
:telemetry.execute(
@@ -722,6 +554,36 @@ defmodule WandererApp.Esi.ApiClient do
{:error, :error_limited, headers}
{:ok, %{status: 429, headers: headers} = _error} ->
# Extract rate limit information from headers
reset_seconds = Map.get(headers, "retry-after", ["0"]) |> List.first()
# Emit telemetry for rate limiting
:telemetry.execute(
[:wanderer_app, :esi, :rate_limited],
%{
count: 1,
reset_duration:
case Integer.parse(reset_seconds || "0") do
{seconds, _} -> seconds * 1000
_ -> 0
end
},
%{
method: "POST_ESI",
path: url,
reset_seconds: reset_seconds
}
)
Logger.warning("ESI_RATE_LIMITED: POST request rate limited",
method: "POST_ESI",
path: url,
reset_seconds: reset_seconds
)
{:error, :error_limited, headers}
{:ok, %{status: status}} ->
{:error, "Unexpected status: #{status}"}
@@ -736,7 +598,7 @@ defmodule WandererApp.Esi.ApiClient do
end
end
defp get_retry(path, api_opts, opts, status \\ :forbidden) do
defp do_get_retry(path, api_opts, opts, status \\ :forbidden) do
refresh_token? = opts |> Keyword.get(:refresh_token?, false)
retry_count = opts |> Keyword.get(:retry_count, 0)
character_id = opts |> Keyword.get(:character_id, nil)
@@ -748,7 +610,7 @@ defmodule WandererApp.Esi.ApiClient do
{:ok, token} ->
auth_opts = [access_token: token.access_token] |> get_auth_opts()
get(
do_get(
path,
api_opts |> Keyword.merge(auth_opts),
opts |> Keyword.merge(retry_count: retry_count + 1)
@@ -913,44 +775,4 @@ defmodule WandererApp.Esi.ApiClient do
:character_token_invalid
)
end
defp map_route_info(
%{
"origin" => origin,
"destination" => destination,
"systems" => result_systems,
"success" => success
} = _route_info
),
do:
map_route_info(%{
origin: origin,
destination: destination,
systems: result_systems,
success: success
})
defp map_route_info(
%{origin: origin, destination: destination, systems: result_systems, success: success} =
_route_info
) do
systems =
case result_systems do
[] ->
[]
_ ->
result_systems |> Enum.reject(fn system_id -> system_id == origin end)
end
%{
has_connection: result_systems != [],
systems: systems,
origin: origin,
destination: destination,
success: success
}
end
defp map_route_info(_), do: nil
end

View File

@@ -38,32 +38,8 @@ defmodule WandererApp.EveDataService do
|> Ash.bulk_create(WandererApp.Api.MapSolarSystemJumps, :create)
Logger.info("MapSolarSystemJumps updated!")
end
def download_files() do
tasks =
@dump_file_names
|> Enum.map(fn file_name ->
Task.async(fn ->
download_file(file_name)
end)
end)
Task.await_many(tasks, :timer.minutes(30))
end
def download_file(file_name) do
url = "#{@eve_db_dump_url}/#{file_name}"
Logger.info("Downloading file from #{url}")
download_path = Path.join([:code.priv_dir(:wanderer_app), "repo", "data", file_name])
Req.get!(url, raw: true, into: File.stream!(download_path, [:write])).body
|> Stream.run()
Logger.info("File downloaded successfully to #{download_path}")
:ok
cleanup_files()
end
def load_wormhole_types() do
@@ -163,7 +139,57 @@ defmodule WandererApp.EveDataService do
data
end
def load_map_constellations() do
defp cleanup_files() do
tasks =
@dump_file_names
|> Enum.map(fn file_name ->
Task.async(fn ->
cleanup_file(file_name)
end)
end)
Task.await_many(tasks, :timer.minutes(30))
end
defp cleanup_file(file_name) do
Logger.info("Cleaning file: #{file_name}")
download_path = Path.join([:code.priv_dir(:wanderer_app), "repo", "data", file_name])
:ok = File.rm(download_path)
Logger.info("File removed successfully to #{download_path}")
:ok
end
defp download_files() do
tasks =
@dump_file_names
|> Enum.map(fn file_name ->
Task.async(fn ->
download_file(file_name)
end)
end)
Task.await_many(tasks, :timer.minutes(30))
end
defp download_file(file_name) do
url = "#{@eve_db_dump_url}/#{file_name}"
Logger.info("Downloading file from #{url}")
download_path = Path.join([:code.priv_dir(:wanderer_app), "repo", "data", file_name])
Req.get!(url, raw: true, into: File.stream!(download_path, [:write])).body
|> Stream.run()
Logger.info("File downloaded successfully to #{download_path}")
:ok
end
defp load_map_constellations() do
WandererApp.Utils.CSVUtil.csv_row_to_table_record(
"#{:code.priv_dir(:wanderer_app)}/repo/data/mapConstellations.csv",
fn row ->
@@ -175,7 +201,7 @@ defmodule WandererApp.EveDataService do
)
end
def load_map_regions() do
defp load_map_regions() do
WandererApp.Utils.CSVUtil.csv_row_to_table_record(
"#{:code.priv_dir(:wanderer_app)}/repo/data/mapRegions.csv",
fn row ->
@@ -187,7 +213,7 @@ defmodule WandererApp.EveDataService do
)
end
def load_map_location_wormhole_classes() do
defp load_map_location_wormhole_classes() do
WandererApp.Utils.CSVUtil.csv_row_to_table_record(
"#{:code.priv_dir(:wanderer_app)}/repo/data/mapLocationWormholeClasses.csv",
fn row ->
@@ -199,7 +225,7 @@ defmodule WandererApp.EveDataService do
)
end
def load_inv_groups() do
defp load_inv_groups() do
WandererApp.Utils.CSVUtil.csv_row_to_table_record(
"#{:code.priv_dir(:wanderer_app)}/repo/data/invGroups.csv",
fn row ->
@@ -212,7 +238,7 @@ defmodule WandererApp.EveDataService do
)
end
def get_db_data() do
defp get_db_data() do
map_constellations = load_map_constellations()
map_regions = load_map_regions()
map_location_wormhole_classes = load_map_location_wormhole_classes()
@@ -296,7 +322,7 @@ defmodule WandererApp.EveDataService do
)
end
def get_ship_types_data() do
defp get_ship_types_data() do
inv_groups = load_inv_groups()
ship_type_groups =
@@ -331,7 +357,7 @@ defmodule WandererApp.EveDataService do
|> Enum.filter(fn t -> t.group_id in ship_type_groups end)
end
def get_solar_system_jumps_data() do
defp get_solar_system_jumps_data() do
WandererApp.Utils.CSVUtil.csv_row_to_table_record(
"#{:code.priv_dir(:wanderer_app)}/repo/data/mapSolarSystemJumps.csv",
fn row ->

View File

@@ -2,7 +2,7 @@ defmodule WandererApp.ExternalEvents do
@moduledoc """
External event system for SSE and webhook delivery.
This system is completely separate from the internal Phoenix PubSub
This system is completely separate from the internal Phoenix PubSub
event system and does NOT modify any existing event flows.
External events are delivered to:
@@ -72,20 +72,12 @@ defmodule WandererApp.ExternalEvents do
# Check if MapEventRelay is alive before sending
if Process.whereis(MapEventRelay) do
try do
# Use call with timeout instead of cast for better error handling
GenServer.call(MapEventRelay, {:deliver_event, event}, 5000)
:ok
catch
:exit, {:timeout, _} ->
Logger.error("Timeout delivering event to MapEventRelay for map #{map_id}")
{:error, :timeout}
:exit, reason ->
Logger.error("Failed to deliver event to MapEventRelay: #{inspect(reason)}")
{:error, reason}
end
# Use cast for async delivery to avoid blocking the caller
# This is critical for performance in hot paths (character updates)
GenServer.cast(MapEventRelay, {:deliver_event, event})
:ok
else
Logger.debug(fn -> "MapEventRelay not available for event delivery (map: #{map_id})" end)
{:error, :relay_not_available}
end
else

View File

@@ -20,6 +20,7 @@ defmodule WandererApp.ExternalEvents.Event do
| :character_added
| :character_removed
| :character_updated
| :characters_updated
| :map_kill
| :acl_member_added
| :acl_member_removed
@@ -42,50 +43,6 @@ defmodule WandererApp.ExternalEvents.Event do
defstruct [:id, :map_id, :type, :payload, :timestamp]
@doc """
Creates a new external event with ULID for ordering.
Validates that the event_type is supported before creating the event.
"""
@spec new(String.t(), event_type(), map()) :: t() | {:error, :invalid_event_type}
def new(map_id, event_type, payload) when is_binary(map_id) and is_map(payload) do
if valid_event_type?(event_type) do
%__MODULE__{
id: Ecto.ULID.generate(System.system_time(:millisecond)),
map_id: map_id,
type: event_type,
payload: payload,
timestamp: DateTime.utc_now()
}
else
raise ArgumentError,
"Invalid event type: #{inspect(event_type)}. Must be one of: #{supported_event_types() |> Enum.map(&to_string/1) |> Enum.join(", ")}"
end
end
@doc """
Converts an event to JSON format for delivery.
"""
@spec to_json(t()) :: map()
def to_json(%__MODULE__{} = event) do
%{
"id" => event.id,
"type" => to_string(event.type),
"map_id" => event.map_id,
"timestamp" => DateTime.to_iso8601(event.timestamp),
"payload" => serialize_payload(event.payload)
}
end
# Convert Ash structs and other complex types to plain maps
defp serialize_payload(payload) when is_struct(payload) do
serialize_payload(payload, MapSet.new())
end
defp serialize_payload(payload) when is_map(payload) do
serialize_payload(payload, MapSet.new())
end
# Define allowlisted fields for different struct types
@system_fields [
:id,
@@ -133,6 +90,73 @@ defmodule WandererApp.ExternalEvents.Event do
]
@signature_fields [:id, :signature_id, :name, :type, :group]
@supported_event_types [
:add_system,
:deleted_system,
:system_renamed,
:system_metadata_changed,
:signatures_updated,
:signature_added,
:signature_removed,
:connection_added,
:connection_removed,
:connection_updated,
:character_added,
:character_removed,
:character_updated,
:characters_updated,
:map_kill,
:acl_member_added,
:acl_member_removed,
:acl_member_updated,
:rally_point_added,
:rally_point_removed
]
@doc """
Creates a new external event with ULID for ordering.
Validates that the event_type is supported before creating the event.
"""
@spec new(String.t(), event_type(), map()) :: t() | {:error, :invalid_event_type}
def new(map_id, event_type, payload) when is_binary(map_id) and is_map(payload) do
if valid_event_type?(event_type) do
%__MODULE__{
id: Ecto.ULID.generate(System.system_time(:millisecond)),
map_id: map_id,
type: event_type,
payload: payload,
timestamp: DateTime.utc_now()
}
else
raise ArgumentError,
"Invalid event type: #{inspect(event_type)}. Must be one of: #{supported_event_types() |> Enum.map(&to_string/1) |> Enum.join(", ")}"
end
end
@doc """
Converts an event to JSON format for delivery.
"""
@spec to_json(t()) :: map()
def to_json(%__MODULE__{} = event) do
%{
"id" => event.id,
"type" => to_string(event.type),
"map_id" => event.map_id,
"timestamp" => DateTime.to_iso8601(event.timestamp),
"payload" => serialize_payload(event.payload)
}
end
# Convert Ash structs and other complex types to plain maps
defp serialize_payload(payload) when is_struct(payload) do
serialize_payload(payload, MapSet.new())
end
defp serialize_payload(payload) when is_map(payload) do
serialize_payload(payload, MapSet.new())
end
# Overloaded versions with visited tracking
defp serialize_payload(payload, visited) when is_struct(payload) do
# Check for circular reference
@@ -193,29 +217,7 @@ defmodule WandererApp.ExternalEvents.Event do
Returns all supported event types.
"""
@spec supported_event_types() :: [event_type()]
def supported_event_types do
[
:add_system,
:deleted_system,
:system_renamed,
:system_metadata_changed,
:signatures_updated,
:signature_added,
:signature_removed,
:connection_added,
:connection_removed,
:connection_updated,
:character_added,
:character_removed,
:character_updated,
:map_kill,
:acl_member_added,
:acl_member_removed,
:acl_member_updated,
:rally_point_added,
:rally_point_removed
]
end
def supported_event_types, do: @supported_event_types
@doc """
Validates an event type.

View File

@@ -212,6 +212,7 @@ defmodule WandererApp.ExternalEvents.JsonApiFormatter do
"time_status" => payload["time_status"] || payload[:time_status],
"mass_status" => payload["mass_status"] || payload[:mass_status],
"ship_size_type" => payload["ship_size_type"] || payload[:ship_size_type],
"locked" => payload["locked"] || payload[:locked],
"updated_at" => event.timestamp
},
"relationships" => %{

View File

@@ -82,16 +82,9 @@ defmodule WandererApp.ExternalEvents.MapEventRelay do
@impl true
def handle_call({:deliver_event, %Event{} = event}, _from, state) do
# Log ACL events at info level for debugging
if event.type in [:acl_member_added, :acl_member_removed, :acl_member_updated] do
Logger.debug(fn ->
"MapEventRelay received :deliver_event (call) for map #{event.map_id}, type: #{event.type}"
end)
else
Logger.debug(fn ->
"MapEventRelay received :deliver_event (call) for map #{event.map_id}, type: #{event.type}"
end)
end
Logger.debug(fn ->
"MapEventRelay received :deliver_event (call) for map #{event.map_id}, type: #{event.type}"
end)
new_state = deliver_single_event(event, state)
{:reply, :ok, new_state}

View File

@@ -31,7 +31,7 @@ defmodule WandererApp.StartCorpWalletTrackerTask do
if not is_nil(admin_character) do
:ok =
WandererApp.Character.TrackerManager.start_tracking(admin_character.id, keep_alive: true)
WandererApp.Character.TrackerManager.start_tracking(admin_character.id)
{:ok, _pid} =
WandererApp.Character.TrackerManager.start_transaction_tracker(admin_character.id)

View File

@@ -546,7 +546,7 @@ defmodule WandererApp.Kills.Client do
end
end
defp check_health(%{socket_pid: pid, last_message_time: last_msg_time} = state)
defp check_health(%{socket_pid: pid, last_message_time: last_msg_time} = _state)
when not is_nil(pid) and not is_nil(last_msg_time) do
cond do
not socket_alive?(pid) ->

View File

@@ -109,8 +109,8 @@ defmodule WandererApp.Kills.MapEventListener do
# Handle re-subscription attempt
def handle_info(:resubscribe_to_maps, state) do
running_maps = WandererApp.Map.RegistryHelper.list_all_maps()
current_running_map_ids = MapSet.new(Enum.map(running_maps, & &1.id))
{:ok, started_maps} = WandererApp.Cache.lookup("started_maps", [])
current_running_map_ids = MapSet.new(started_maps)
Logger.debug(fn ->
"[MapEventListener] Resubscribing to maps. Running maps: #{MapSet.size(current_running_map_ids)}"
@@ -229,7 +229,7 @@ defmodule WandererApp.Kills.MapEventListener do
{:error, :not_running} ->
{:error, :not_running}
{:ok, status} ->
{:ok, _status} ->
{:error, :not_connected}
error ->

View File

@@ -88,13 +88,13 @@ defmodule WandererApp.Kills.Subscription.MapIntegration do
def get_tracked_system_ids do
try do
# Get systems from currently running maps
active_maps = WandererApp.Map.RegistryHelper.list_all_maps()
{:ok, started_maps_ids} = WandererApp.Cache.lookup("started_maps", [])
Logger.debug("[MapIntegration] Found #{length(active_maps)} active maps")
Logger.debug("[MapIntegration] Found #{length(started_maps_ids)} active maps")
map_systems =
active_maps
|> Enum.map(fn %{id: map_id} ->
started_maps_ids
|> Enum.map(fn map_id ->
case WandererApp.MapSystemRepo.get_visible_by_map(map_id) do
{:ok, systems} ->
system_ids = Enum.map(systems, & &1.solar_system_id)
@@ -114,7 +114,7 @@ defmodule WandererApp.Kills.Subscription.MapIntegration do
|> Enum.uniq()
Logger.debug(fn ->
"[MapIntegration] Total tracked systems: #{length(system_ids)} across #{length(active_maps)} maps"
"[MapIntegration] Total tracked systems: #{length(system_ids)} across #{length(started_maps_ids)} maps"
end)
{:ok, system_ids}

View File

@@ -136,9 +136,6 @@ defmodule WandererApp.License.LicenseManager do
end
end
@doc """
Updates a license's expiration date based on the map's subscription.
"""
def update_license_expiration_from_subscription(map_id) do
with {:ok, license} <- get_license_by_map_id(map_id),
{:ok, subscription} <- SubscriptionManager.get_active_map_subscription(map_id) do
@@ -146,43 +143,15 @@ defmodule WandererApp.License.LicenseManager do
end
end
@doc """
Checks if a license is expired.
"""
defp expired?(license) do
case license.expire_at do
nil -> false
expire_at -> DateTime.compare(expire_at, DateTime.utc_now()) == :lt
end
end
@doc """
Generates a random string of specified length.
"""
defp generate_random_string(length) do
:crypto.strong_rand_bytes(length)
|> Base.encode16(case: :upper)
|> binary_part(0, length)
end
@doc """
Formats a datetime as YYYY-MM-DD.
"""
defp format_date(datetime) do
Calendar.strftime(datetime, "%Y-%m-%d")
end
@doc """
Generates a link to the map.
"""
defp generate_map_link(map_slug) do
base_url = Application.get_env(:wanderer_app, :web_app_url)
"#{base_url}/#{map_slug}"
end
@doc """
Gets the map owner's data.
"""
defp get_map_owner_email(map) do
{:ok, %{owner: owner}} = map |> Ash.load([:owner])
"#{owner.name}(#{owner.eve_id})"

View File

@@ -135,7 +135,7 @@ defmodule WandererApp.License.LicenseManagerClient do
Application.get_env(:wanderer_app, :license_manager)[:auth_key]
end
defp parse_error_response(status, %{"error" => error_message}) do
defp parse_error_response(_status, %{"error" => error_message}) do
{:error, error_message}
end

View File

@@ -7,6 +7,8 @@ defmodule WandererApp.Map do
require Logger
@map_state_cache :map_state_cache
defstruct map_id: nil,
name: nil,
scope: :none,
@@ -51,8 +53,8 @@ defmodule WandererApp.Map do
{:ok, map} ->
map
_ ->
Logger.error(fn -> "Failed to get map #{map_id}" end)
error ->
Logger.error("Failed to get map #{map_id}: #{inspect(error)}")
%{}
end
end
@@ -69,6 +71,50 @@ defmodule WandererApp.Map do
end)
end
def get_map_state(map_id, init_if_empty? \\ true) do
case Cachex.get(@map_state_cache, map_id) do
{:ok, nil} ->
case init_if_empty? do
true ->
map_state = WandererApp.Map.Server.Impl.do_init_state(map_id: map_id)
Cachex.put(@map_state_cache, map_id, map_state)
{:ok, map_state}
_ ->
{:ok, nil}
end
{:ok, map_state} ->
{:ok, map_state}
end
end
def get_map_state!(map_id) do
case get_map_state(map_id) do
{:ok, map_state} ->
map_state
_ ->
Logger.error("Failed to get map_state #{map_id}")
throw("Failed to get map_state #{map_id}")
end
end
def update_map_state(map_id, state_update),
do:
Cachex.get_and_update(@map_state_cache, map_id, fn map_state ->
case map_state do
nil ->
new_state = WandererApp.Map.Server.Impl.do_init_state(map_id: map_id)
{:commit, Map.merge(new_state, state_update)}
_ ->
{:commit, Map.merge(map_state, state_update)}
end
end)
def delete_map_state(map_id), do: Cachex.del(@map_state_cache, map_id)
def get_characters_limit(map_id),
do: {:ok, map_id |> get_map!() |> Map.get(:characters_limit, 50)}
@@ -88,6 +134,22 @@ defmodule WandererApp.Map do
def get_options(map_id),
do: {:ok, map_id |> get_map!() |> Map.get(:options, Map.new())}
def get_tracked_character_ids(map_id) do
{:ok,
map_id
|> get_map!()
|> Map.get(:characters, [])
|> Enum.filter(fn character_id ->
{:ok, tracking_start_time} =
WandererApp.Cache.lookup(
"character:#{character_id}:map:#{map_id}:tracking_start_time",
nil
)
not is_nil(tracking_start_time)
end)}
end
@doc """
Returns a full list of characters in the map
"""
@@ -137,9 +199,31 @@ defmodule WandererApp.Map do
def add_characters!(map, []), do: map
def add_characters!(%{map_id: map_id} = map, [character | rest]) do
add_character(map_id, character)
add_characters!(map, rest)
def add_characters!(%{map_id: map_id} = map, characters) when is_list(characters) do
# Get current characters list once
current_characters = Map.get(map, :characters, [])
characters_ids =
characters
|> Enum.map(fn %{id: char_id} -> char_id end)
# Filter out characters that already exist
new_character_ids =
characters_ids
|> Enum.reject(fn char_id -> char_id in current_characters end)
# If all characters already exist, return early
if new_character_ids == [] do
map
else
case update_map(map_id, %{characters: new_character_ids ++ current_characters}) do
{:commit, map} ->
map
_ ->
map
end
end
end
def add_character(
@@ -152,64 +236,13 @@ defmodule WandererApp.Map do
case not (characters |> Enum.member?(character_id)) do
true ->
WandererApp.Character.get_map_character(map_id, character_id)
|> case do
{:ok,
%{
alliance_id: alliance_id,
corporation_id: corporation_id,
solar_system_id: solar_system_id,
structure_id: structure_id,
station_id: station_id,
ship: ship_type_id,
ship_name: ship_name
}} ->
map_id
|> update_map(%{characters: [character_id | characters]})
map_id
|> update_map(%{characters: [character_id | characters]})
# WandererApp.Cache.insert(
# "map:#{map_id}:character:#{character_id}:alliance_id",
# alliance_id
# )
# WandererApp.Cache.insert(
# "map:#{map_id}:character:#{character_id}:corporation_id",
# corporation_id
# )
# WandererApp.Cache.insert(
# "map:#{map_id}:character:#{character_id}:solar_system_id",
# solar_system_id
# )
# WandererApp.Cache.insert(
# "map:#{map_id}:character:#{character_id}:structure_id",
# structure_id
# )
# WandererApp.Cache.insert(
# "map:#{map_id}:character:#{character_id}:station_id",
# station_id
# )
# WandererApp.Cache.insert(
# "map:#{map_id}:character:#{character_id}:ship_type_id",
# ship_type_id
# )
# WandererApp.Cache.insert(
# "map:#{map_id}:character:#{character_id}:ship_name",
# ship_name
# )
:ok
error ->
error
end
:ok
_ ->
{:error, :already_exists}
:ok
end
end
@@ -486,15 +519,16 @@ defmodule WandererApp.Map do
solar_system_source,
solar_system_target
) do
case map_id
|> get_map!()
|> Map.get(:connections, Map.new())
connections =
map_id
|> get_map!()
|> Map.get(:connections, Map.new())
case connections
|> Map.get("#{solar_system_source}_#{solar_system_target}") do
nil ->
{:ok,
map_id
|> get_map!()
|> Map.get(:connections, Map.new())
connections
|> Map.get("#{solar_system_target}_#{solar_system_source}")}
connection ->

View File

@@ -0,0 +1,364 @@
defmodule WandererApp.Map.CacheRTree do
@moduledoc """
Cache-based spatial index implementing DDRT behavior.
Provides R-tree-like spatial indexing using grid-based storage in Nebulex cache.
No GenServer processes required - all operations are functional and cache-based.
## Storage Structure
Data is stored in the cache with the following keys:
- `"rtree:<name>:leaves"` - Map of solar_system_id => {id, bounding_box}
- `"rtree:<name>:grid"` - Map of {grid_x, grid_y} => [solar_system_id, ...]
- `"rtree:<name>:config"` - Tree configuration
## Spatial Grid
Uses 150x150 pixel grid cells for O(1) spatial queries. Each system node
(130x34 pixels) typically overlaps 1-2 grid cells, providing fast collision
detection without the overhead of GenServer-based tree traversal.
"""
@behaviour WandererApp.Test.DDRT
alias WandererApp.Cache
# Grid cell size in pixels
@grid_size 150
# Type definitions matching DDRT behavior
@type id :: number() | String.t()
@type coord_range :: {number(), number()}
@type bounding_box :: list(coord_range())
@type leaf :: {id(), bounding_box()}
# ============================================================================
# Public API - DDRT Behavior Implementation
# ============================================================================
@doc """
Insert one or more leaves into the spatial index.
## Parameters
- `leaf_or_leaves` - Single `{id, bounding_box}` tuple or list of tuples
- `name` - Name of the R-tree instance
## Examples
iex> CacheRTree.insert({30000142, [{100, 230}, {50, 84}]}, "rtree_map_123")
{:ok, %{}}
iex> CacheRTree.insert([
...> {30000142, [{100, 230}, {50, 84}]},
...> {30000143, [{250, 380}, {100, 134}]}
...> ], "rtree_map_123")
{:ok, %{}}
"""
@impl true
def insert(leaf_or_leaves, name) do
leaves = normalize_leaves(leaf_or_leaves)
# Update leaves storage
current_leaves = get_leaves(name)
new_leaves =
Enum.reduce(leaves, current_leaves, fn {id, box}, acc ->
Map.put(acc, id, {id, box})
end)
put_leaves(name, new_leaves)
# Update spatial grid
current_grid = get_grid(name)
new_grid =
Enum.reduce(leaves, current_grid, fn leaf, grid ->
add_to_grid(grid, leaf)
end)
put_grid(name, new_grid)
# Match DRTree return format
{:ok, %{}}
end
@doc """
Delete one or more leaves from the spatial index.
## Parameters
- `id_or_ids` - Single ID or list of IDs to remove
- `name` - Name of the R-tree instance
## Examples
iex> CacheRTree.delete([30000142], "rtree_map_123")
{:ok, %{}}
iex> CacheRTree.delete([30000142, 30000143], "rtree_map_123")
{:ok, %{}}
"""
@impl true
def delete(id_or_ids, name) do
ids = normalize_ids(id_or_ids)
current_leaves = get_leaves(name)
current_grid = get_grid(name)
# Remove from leaves and track bounding boxes for grid cleanup
{new_leaves, removed} =
Enum.reduce(ids, {current_leaves, []}, fn id, {leaves, removed} ->
case Map.pop(leaves, id) do
{nil, leaves} -> {leaves, removed}
{{^id, box}, leaves} -> {leaves, [{id, box} | removed]}
end
end)
# Update grid
new_grid =
Enum.reduce(removed, current_grid, fn {id, box}, grid ->
remove_from_grid(grid, id, box)
end)
put_leaves(name, new_leaves)
put_grid(name, new_grid)
{:ok, %{}}
end
@doc """
Update a leaf's bounding box.
## Parameters
- `id` - ID of the leaf to update
- `box_or_tuple` - Either a new `bounding_box` or `{old_box, new_box}` tuple
- `name` - Name of the R-tree instance
## Examples
iex> CacheRTree.update(30000142, [{150, 280}, {200, 234}], "rtree_map_123")
{:ok, %{}}
iex> CacheRTree.update(30000142, {[{100, 230}, {50, 84}], [{150, 280}, {200, 234}]}, "rtree_map_123")
{:ok, %{}}
"""
@impl true
def update(id, box_or_tuple, name) do
{old_box, new_box} =
case box_or_tuple do
{old, new} ->
{old, new}
box ->
# Need to look up old box
leaves = get_leaves(name)
case Map.get(leaves, id) do
{^id, old} -> {old, box}
# Will be handled as new insert
nil -> {nil, box}
end
end
# Delete old, insert new
if old_box, do: delete([id], name)
insert({id, new_box}, name)
end
@doc """
Query for all leaves intersecting a bounding box.
Uses grid-based spatial indexing for O(1) average case performance.
## Parameters
- `bounding_box` - Query bounding box `[{x_min, x_max}, {y_min, y_max}]`
- `name` - Name of the R-tree instance
## Returns
- `{:ok, [id()]}` - List of IDs intersecting the query box
- `{:error, term()}` - Error if query fails
## Examples
iex> CacheRTree.query([{200, 330}, {90, 124}], "rtree_map_123")
{:ok, [30000143]}
iex> CacheRTree.query([{0, 50}, {0, 50}], "rtree_map_123")
{:ok, []}
"""
@impl true
def query(bounding_box, name) do
# Get candidate IDs from grid cells
grid = get_grid(name)
grid_cells = get_grid_cells(bounding_box)
candidate_ids =
grid_cells
|> Enum.flat_map(fn cell -> Map.get(grid, cell, []) end)
|> Enum.uniq()
# Precise intersection test
leaves = get_leaves(name)
matching_ids =
Enum.filter(candidate_ids, fn id ->
case Map.get(leaves, id) do
{^id, leaf_box} -> boxes_intersect?(bounding_box, leaf_box)
nil -> false
end
end)
{:ok, matching_ids}
rescue
error -> {:error, error}
end
# ============================================================================
# Initialization and Management
# ============================================================================
@doc """
Initialize an empty R-tree in the cache.
## Parameters
- `name` - Name for this R-tree instance
- `config` - Optional configuration map (width, verbose, etc.)
## Examples
iex> CacheRTree.init_tree("rtree_map_123")
:ok
iex> CacheRTree.init_tree("rtree_map_456", %{width: 150, verbose: false})
:ok
"""
@impl true
def init_tree(name, config \\ %{}) do
Cache.put(cache_key(name, :leaves), %{})
Cache.put(cache_key(name, :grid), %{})
Cache.put(cache_key(name, :config), Map.merge(default_config(), config))
:ok
end
@doc """
Clear all data for an R-tree from the cache.
Should be called when a map is shut down to free memory.
## Parameters
- `name` - Name of the R-tree instance to clear
## Examples
iex> CacheRTree.clear_tree("rtree_map_123")
:ok
"""
def clear_tree(name) do
Cache.delete(cache_key(name, :leaves))
Cache.delete(cache_key(name, :grid))
Cache.delete(cache_key(name, :config))
:ok
end
# ============================================================================
# Private Helper Functions
# ============================================================================
# Cache access helpers
defp cache_key(name, suffix), do: "rtree:#{name}:#{suffix}"
defp get_leaves(name) do
Cache.get(cache_key(name, :leaves)) || %{}
end
defp put_leaves(name, leaves) do
Cache.put(cache_key(name, :leaves), leaves)
end
defp get_grid(name) do
Cache.get(cache_key(name, :grid)) || %{}
end
defp put_grid(name, grid) do
Cache.put(cache_key(name, :grid), grid)
end
defp default_config do
%{
width: 150,
grid_size: @grid_size,
verbose: false
}
end
# Grid operations
defp add_to_grid(grid, {id, bounding_box}) do
grid_cells = get_grid_cells(bounding_box)
Enum.reduce(grid_cells, grid, fn cell, acc ->
Map.update(acc, cell, [id], fn existing_ids ->
if id in existing_ids do
existing_ids
else
[id | existing_ids]
end
end)
end)
end
defp remove_from_grid(grid, id, bounding_box) do
grid_cells = get_grid_cells(bounding_box)
Enum.reduce(grid_cells, grid, fn cell, acc ->
Map.update(acc, cell, [], fn existing_ids ->
List.delete(existing_ids, id)
end)
end)
end
# Calculate which grid cells a bounding box overlaps
defp get_grid_cells(bounding_box) do
[{x_min, x_max}, {y_min, y_max}] = bounding_box
# Calculate cell coordinates using integer division
# Handles negative coordinates correctly
cell_x_min = div_floor(x_min, @grid_size)
cell_x_max = div_floor(x_max, @grid_size)
cell_y_min = div_floor(y_min, @grid_size)
cell_y_max = div_floor(y_max, @grid_size)
# Generate all overlapping cells
for x <- cell_x_min..cell_x_max,
y <- cell_y_min..cell_y_max do
{x, y}
end
end
# Floor division that works correctly with negative numbers
defp div_floor(a, b) when a >= 0, do: div(a, b)
defp div_floor(a, b) when a < 0 do
case rem(a, b) do
0 -> div(a, b)
_ -> div(a, b) - 1
end
end
# Check if two bounding boxes intersect
defp boxes_intersect?(box1, box2) do
[{x1_min, x1_max}, {y1_min, y1_max}] = box1
[{x2_min, x2_max}, {y2_min, y2_max}] = box2
# Boxes intersect if they overlap on both axes
x_overlap = x1_min <= x2_max and x2_min <= x1_max
y_overlap = y1_min <= y2_max and y2_min <= y1_max
x_overlap and y_overlap
end
# Input normalization
defp normalize_leaves(leaf) when is_tuple(leaf), do: [leaf]
defp normalize_leaves(leaves) when is_list(leaves), do: leaves
defp normalize_ids(id) when is_number(id) or is_binary(id), do: [id]
defp normalize_ids(ids) when is_list(ids), do: ids
end

View File

@@ -1,42 +0,0 @@
defmodule WandererApp.Map.DynamicSupervisor do
@moduledoc """
Dynamically starts a map server
"""
use DynamicSupervisor
require Logger
alias WandererApp.Map.Server
def start_link(_arg) do
DynamicSupervisor.start_link(__MODULE__, nil, name: __MODULE__)
end
def init(nil) do
DynamicSupervisor.init(strategy: :one_for_one)
end
def _start_child(map_id) do
child_spec = %{
id: Server,
start: {Server, :start_link, [map_id]},
restart: :transient
}
case DynamicSupervisor.start_child(__MODULE__, child_spec) do
{:ok, _} ->
:ok
{:error, {:already_started, _}} ->
:ok
{:error, reason} ->
{:error, reason}
end
end
def which_children do
Supervisor.which_children(__MODULE__)
end
end

View File

@@ -0,0 +1,38 @@
defmodule WandererApp.Map.GarbageCollector do
@moduledoc """
Manager map subscription plans
"""
require Logger
require Ash.Query
@logger Application.compile_env(:wanderer_app, :logger)
@one_week_seconds 7 * 24 * 60 * 60
@two_weeks_seconds 14 * 24 * 60 * 60
def cleanup_chain_passages() do
Logger.info("Start cleanup old map chain passages...")
WandererApp.Api.MapChainPassages
|> Ash.Query.filter(updated_at: [less_than: get_cutoff_time(@one_week_seconds)])
|> Ash.bulk_destroy!(:destroy, %{}, batch_size: 100)
@logger.info(fn -> "All map chain passages processed" end)
:ok
end
def cleanup_system_signatures() do
Logger.info("Start cleanup old map system signatures...")
WandererApp.Api.MapSystemSignature
|> Ash.Query.filter(updated_at: [less_than: get_cutoff_time(@two_weeks_seconds)])
|> Ash.bulk_destroy!(:destroy, %{}, batch_size: 100)
@logger.info(fn -> "All map system signatures processed" end)
:ok
end
defp get_cutoff_time(seconds), do: DateTime.utc_now() |> DateTime.add(-seconds, :second)
end

View File

@@ -8,12 +8,10 @@ defmodule WandererApp.Map.Manager do
require Logger
alias WandererApp.Map.Server
alias WandererApp.Map.ServerSupervisor
@maps_start_per_second 10
@maps_start_interval 1000
@maps_start_chunk_size 20
@maps_start_interval 500
@maps_queue :maps_queue
@garbage_collection_interval :timer.hours(1)
@check_maps_queue_interval :timer.seconds(1)
@pings_cleanup_interval :timer.minutes(10)
@@ -39,15 +37,11 @@ defmodule WandererApp.Map.Manager do
do: WandererApp.Queue.push_uniq(@maps_queue, map_id)
def stop_map(map_id) when is_binary(map_id) do
case Server.map_pid(map_id) do
pid when is_pid(pid) ->
GenServer.cast(
pid,
:stop
)
with {:ok, started_maps} <- WandererApp.Cache.lookup("started_maps", []),
true <- Enum.member?(started_maps, map_id) do
Logger.warning(fn -> "Shutting down map server: #{inspect(map_id)}" end)
nil ->
:ok
WandererApp.Map.MapPoolDynamicSupervisor.stop_map(map_id)
end
end
@@ -56,23 +50,16 @@ defmodule WandererApp.Map.Manager do
@impl true
def init([]) do
WandererApp.Queue.new(@maps_queue, [])
WandererApp.Cache.insert("started_maps", [])
{:ok, check_maps_queue_timer} =
:timer.send_interval(@check_maps_queue_interval, :check_maps_queue)
{:ok, garbage_collector_timer} =
:timer.send_interval(@garbage_collection_interval, :garbage_collect)
{:ok, pings_cleanup_timer} =
:timer.send_interval(@pings_cleanup_interval, :cleanup_pings)
safe_async_task(fn ->
start_last_active_maps()
end)
{:ok,
%{
garbage_collector_timer: garbage_collector_timer,
check_maps_queue_timer: check_maps_queue_timer,
pings_cleanup_timer: pings_cleanup_timer
}}
@@ -106,36 +93,6 @@ defmodule WandererApp.Map.Manager do
end
end
@impl true
def handle_info(:garbage_collect, state) do
try do
WandererApp.Map.RegistryHelper.list_all_maps()
|> Enum.each(fn %{id: map_id, pid: server_pid} ->
case Process.alive?(server_pid) do
true ->
presence_character_ids =
WandererApp.Cache.lookup!("map_#{map_id}:presence_character_ids", [])
if presence_character_ids |> Enum.empty?() do
Logger.info("No more characters present on: #{map_id}, shutting down map server...")
stop_map(map_id)
end
false ->
Logger.warning("Server not alive: #{inspect(server_pid)}")
:ok
end
end)
{:noreply, state}
rescue
e ->
Logger.error(Exception.message(e))
{:noreply, state}
end
end
@impl true
def handle_info(:cleanup_pings, state) do
try do
@@ -156,7 +113,7 @@ defmodule WandererApp.Map.Manager do
Enum.each(pings, fn %{id: ping_id, map_id: map_id, type: type} = ping ->
{:ok, %{system: system}} = ping |> Ash.load([:system])
WandererApp.Map.Server.Impl.broadcast!(map_id, :ping_cancelled, %{
Server.Impl.broadcast!(map_id, :ping_cancelled, %{
id: ping_id,
solar_system_id: system.solar_system_id,
type: type
@@ -173,26 +130,12 @@ defmodule WandererApp.Map.Manager do
end
end
defp start_last_active_maps() do
{:ok, last_map_states} =
WandererApp.Api.MapState.get_last_active(
DateTime.utc_now()
|> DateTime.add(-30, :minute)
)
last_map_states
|> Enum.map(fn %{map_id: map_id} -> map_id end)
|> Enum.each(fn map_id -> start_map(map_id) end)
:ok
end
defp start_maps() do
chunks =
@maps_queue
|> WandererApp.Queue.to_list!()
|> Enum.uniq()
|> Enum.chunk_every(@maps_start_per_second)
|> Enum.chunk_every(@maps_start_chunk_size)
WandererApp.Queue.clear(@maps_queue)
@@ -237,21 +180,21 @@ defmodule WandererApp.Map.Manager do
end
defp start_map_server(map_id) do
case DynamicSupervisor.start_child(
{:via, PartitionSupervisor, {WandererApp.Map.DynamicSupervisors, self()}},
{ServerSupervisor, map_id: map_id}
) do
{:ok, pid} ->
{:ok, pid}
with {:ok, started_maps} <- WandererApp.Cache.lookup("started_maps", []),
false <- Enum.member?(started_maps, map_id) do
WandererApp.Cache.insert_or_update(
"started_maps",
[map_id],
fn existing ->
[map_id | existing] |> Enum.uniq()
end
)
{:error, {:already_started, pid}} ->
{:ok, pid}
{:error, {:shutdown, {:failed_to_start_child, Server, {:already_started, pid}}}} ->
{:ok, pid}
{:error, reason} ->
{:error, reason}
WandererApp.Map.MapPoolDynamicSupervisor.start_map(map_id)
else
_error ->
Logger.warning("Map already started: #{map_id}")
:ok
end
end
end

View File

@@ -0,0 +1,825 @@
defmodule WandererApp.Map.MapPool do
@moduledoc false
use GenServer, restart: :transient
require Logger
alias WandererApp.Map.{MapPoolState, Server}
defstruct [
:map_ids,
:uuid
]
@name __MODULE__
@cache :map_pool_cache
@registry :map_pool_registry
@unique_registry :unique_map_pool_registry
@map_pool_limit 10
@garbage_collection_interval :timer.hours(4)
@systems_cleanup_timeout :timer.minutes(30)
@characters_cleanup_timeout :timer.minutes(5)
@connections_cleanup_timeout :timer.minutes(5)
@backup_state_timeout :timer.minutes(1)
def new(), do: __struct__()
def new(args), do: __struct__(args)
# Accept both {uuid, map_ids} tuple (from supervisor restart) and just map_ids (legacy)
def start_link({uuid, map_ids}) when is_binary(uuid) and is_list(map_ids) do
GenServer.start_link(
@name,
{uuid, map_ids},
name: Module.concat(__MODULE__, uuid)
)
end
# For backward compatibility - generate UUID if only map_ids provided
def start_link(map_ids) when is_list(map_ids) do
uuid = UUID.uuid1()
GenServer.start_link(
@name,
{uuid, map_ids},
name: Module.concat(__MODULE__, uuid)
)
end
@impl true
def init({uuid, map_ids}) do
# Check for crash recovery - if we have previous state in ETS, merge it with new map_ids
{final_map_ids, recovery_info} =
case MapPoolState.get_pool_state(uuid) do
{:ok, recovered_map_ids} ->
# Merge and deduplicate map IDs
merged = Enum.uniq(recovered_map_ids ++ map_ids)
recovery_count = length(recovered_map_ids)
Logger.info(
"[Map Pool #{uuid}] Crash recovery detected: recovering #{recovery_count} maps",
pool_uuid: uuid,
recovered_maps: recovered_map_ids,
new_maps: map_ids,
total_maps: length(merged)
)
# Emit telemetry for crash recovery
:telemetry.execute(
[:wanderer_app, :map_pool, :recovery, :start],
%{recovered_map_count: recovery_count, total_map_count: length(merged)},
%{pool_uuid: uuid}
)
{merged, %{recovered: true, count: recovery_count}}
{:error, :not_found} ->
# Normal startup, no previous state to recover
{map_ids, %{recovered: false}}
end
# Register with empty list - maps will be added as they're started in handle_continue
{:ok, _} = Registry.register(@unique_registry, Module.concat(__MODULE__, uuid), [])
{:ok, _} = Registry.register(@registry, __MODULE__, uuid)
# Don't pre-populate cache - will be populated as maps start in handle_continue
# This prevents duplicates when recovering
state =
%{
uuid: uuid,
map_ids: []
}
|> new()
{:ok, state, {:continue, {:start, {final_map_ids, recovery_info}}}}
end
@impl true
def terminate(reason, %{uuid: uuid} = _state) do
# On graceful shutdown, clean up ETS state
# On crash, keep ETS state for recovery
case reason do
:normal ->
Logger.debug("[Map Pool #{uuid}] Graceful shutdown, cleaning up ETS state")
MapPoolState.delete_pool_state(uuid)
:shutdown ->
Logger.debug("[Map Pool #{uuid}] Graceful shutdown, cleaning up ETS state")
MapPoolState.delete_pool_state(uuid)
{:shutdown, _} ->
Logger.debug("[Map Pool #{uuid}] Graceful shutdown, cleaning up ETS state")
MapPoolState.delete_pool_state(uuid)
_ ->
Logger.warning(
"[Map Pool #{uuid}] Abnormal termination (#{inspect(reason)}), keeping ETS state for recovery"
)
# Keep ETS state for crash recovery
:ok
end
:ok
end
@impl true
def handle_continue({:start, {map_ids, recovery_info}}, state) do
Logger.info("#{@name} started")
# Track recovery statistics
start_time = System.monotonic_time(:millisecond)
initial_count = length(map_ids)
# Start maps synchronously and accumulate state changes
{new_state, failed_maps} =
map_ids
|> Enum.reduce({state, []}, fn map_id, {current_state, failed} ->
case do_start_map(map_id, current_state) do
{:ok, updated_state} ->
{updated_state, failed}
{:error, reason} ->
Logger.error("[Map Pool] Failed to start map #{map_id}: #{reason}")
# Emit telemetry for individual map recovery failure
if recovery_info.recovered do
:telemetry.execute(
[:wanderer_app, :map_pool, :recovery, :map_failed],
%{map_id: map_id},
%{pool_uuid: state.uuid, reason: reason}
)
end
{current_state, [map_id | failed]}
end
end)
# Calculate final statistics
end_time = System.monotonic_time(:millisecond)
duration_ms = end_time - start_time
successful_count = length(new_state.map_ids)
failed_count = length(failed_maps)
# Log and emit telemetry for recovery completion
if recovery_info.recovered do
Logger.info(
"[Map Pool #{state.uuid}] Crash recovery completed: #{successful_count}/#{initial_count} maps recovered in #{duration_ms}ms",
pool_uuid: state.uuid,
recovered_count: successful_count,
failed_count: failed_count,
total_count: initial_count,
duration_ms: duration_ms,
failed_maps: failed_maps
)
:telemetry.execute(
[:wanderer_app, :map_pool, :recovery, :complete],
%{
recovered_count: successful_count,
failed_count: failed_count,
duration_ms: duration_ms
},
%{pool_uuid: state.uuid}
)
end
# Schedule periodic tasks
Process.send_after(self(), :backup_state, @backup_state_timeout)
Process.send_after(self(), :cleanup_systems, 15_000)
Process.send_after(self(), :cleanup_characters, @characters_cleanup_timeout)
Process.send_after(self(), :cleanup_connections, @connections_cleanup_timeout)
Process.send_after(self(), :garbage_collect, @garbage_collection_interval)
# Start message queue monitoring
Process.send_after(self(), :monitor_message_queue, :timer.seconds(30))
{:noreply, new_state}
end
@impl true
def handle_continue({:init_map, map_id}, %{uuid: uuid} = state) do
# Perform the actual map initialization asynchronously
# This runs after the GenServer.call has already returned
start_time = System.monotonic_time(:millisecond)
try do
# Initialize the map state and start the map server using extracted helper
do_initialize_map_server(map_id)
duration = System.monotonic_time(:millisecond) - start_time
Logger.info("[Map Pool #{uuid}] Map #{map_id} initialized successfully in #{duration}ms")
# Emit telemetry for slow initializations
if duration > 5_000 do
Logger.warning("[Map Pool #{uuid}] Slow map initialization: #{map_id} took #{duration}ms")
:telemetry.execute(
[:wanderer_app, :map_pool, :slow_init],
%{duration_ms: duration},
%{map_id: map_id, pool_uuid: uuid}
)
end
{:noreply, state}
rescue
e ->
duration = System.monotonic_time(:millisecond) - start_time
Logger.error("""
[Map Pool #{uuid}] Failed to initialize map #{map_id} after #{duration}ms: #{Exception.message(e)}
#{Exception.format_stacktrace(__STACKTRACE__)}
""")
# Rollback: Remove from state, registry, cache, and ETS using extracted helper
new_state = do_unregister_map(map_id, uuid, state)
# Emit telemetry for failed initialization
:telemetry.execute(
[:wanderer_app, :map_pool, :init_failed],
%{duration_ms: duration},
%{map_id: map_id, pool_uuid: uuid, reason: Exception.message(e)}
)
{:noreply, new_state}
end
end
@impl true
def handle_cast(:stop, state), do: {:stop, :normal, state}
@impl true
def handle_call({:start_map, map_id}, _from, %{map_ids: map_ids, uuid: uuid} = state) do
# Enforce capacity limit to prevent pool overload due to race conditions
if length(map_ids) >= @map_pool_limit do
Logger.warning(
"[Map Pool #{uuid}] Pool at capacity (#{length(map_ids)}/#{@map_pool_limit}), " <>
"rejecting map #{map_id} and triggering new pool creation"
)
# Trigger a new pool creation attempt asynchronously
# This allows the system to create a new pool for this map
spawn(fn ->
WandererApp.Map.MapPoolDynamicSupervisor.start_map(map_id)
end)
{:reply, :ok, state}
else
# Check if map is already started or being initialized
if map_id in map_ids do
Logger.debug("[Map Pool #{uuid}] Map #{map_id} already in pool")
{:reply, {:ok, :already_started}, state}
else
# Pre-register the map in registry and cache to claim ownership
# This prevents race conditions where multiple pools try to start the same map
registry_result =
Registry.update_value(@unique_registry, Module.concat(__MODULE__, uuid), fn r_map_ids ->
[map_id | r_map_ids]
end)
case registry_result do
{_new_value, _old_value} ->
# Add to cache
Cachex.put(@cache, map_id, uuid)
# Add to state
new_state = %{state | map_ids: [map_id | map_ids]}
# Persist state to ETS
MapPoolState.save_pool_state(uuid, new_state.map_ids)
Logger.debug("[Map Pool #{uuid}] Map #{map_id} queued for async initialization")
# Return immediately and initialize asynchronously
{:reply, {:ok, :initializing}, new_state, {:continue, {:init_map, map_id}}}
:error ->
Logger.error("[Map Pool #{uuid}] Failed to register map #{map_id} in registry")
{:reply, {:error, :registration_failed}, state}
end
end
end
end
@impl true
def handle_call(
{:stop_map, map_id},
_from,
state
) do
case do_stop_map(map_id, state) do
{:ok, new_state} ->
{:reply, :ok, new_state}
{:error, reason} ->
{:reply, {:error, reason}, state}
end
end
defp do_start_map(map_id, %{map_ids: map_ids, uuid: uuid} = state) do
if map_id in map_ids do
# Map already started
{:ok, state}
else
# Track what operations succeeded for potential rollback
completed_operations = []
try do
# Step 1: Update Registry (most critical, do first)
registry_result =
Registry.update_value(@unique_registry, Module.concat(__MODULE__, uuid), fn r_map_ids ->
[map_id | r_map_ids]
end)
completed_operations = [:registry | completed_operations]
case registry_result do
{new_value, _old_value} when is_list(new_value) ->
:ok
:error ->
raise "Failed to update registry for pool #{uuid}"
end
# Step 2: Add to cache
case Cachex.put(@cache, map_id, uuid) do
{:ok, _} ->
:ok
{:error, reason} ->
raise "Failed to add to cache: #{inspect(reason)}"
end
completed_operations = [:cache | completed_operations]
# Step 3: Start the map server using extracted helper
do_initialize_map_server(map_id)
completed_operations = [:map_server | completed_operations]
# Step 4: Update GenServer state (last, as this is in-memory and fast)
new_state = %{state | map_ids: [map_id | map_ids]}
# Step 5: Persist state to ETS for crash recovery
MapPoolState.save_pool_state(uuid, new_state.map_ids)
Logger.debug("[Map Pool] Successfully started map #{map_id} in pool #{uuid}")
{:ok, new_state}
rescue
e ->
Logger.error("""
[Map Pool] Failed to start map #{map_id} (completed: #{inspect(completed_operations)}): #{Exception.message(e)}
#{Exception.format_stacktrace(__STACKTRACE__)}
""")
# Attempt rollback of completed operations
rollback_start_map_operations(map_id, uuid, completed_operations)
{:error, Exception.message(e)}
end
end
end
defp rollback_start_map_operations(map_id, uuid, completed_operations) do
Logger.warning("[Map Pool] Attempting to rollback start_map operations for #{map_id}")
# Rollback in reverse order
if :map_server in completed_operations do
Logger.debug("[Map Pool] Rollback: Stopping map server for #{map_id}")
try do
Server.Impl.stop_map(map_id)
rescue
e ->
Logger.error("[Map Pool] Rollback failed to stop map server: #{Exception.message(e)}")
end
end
if :cache in completed_operations do
Logger.debug("[Map Pool] Rollback: Removing #{map_id} from cache")
case Cachex.del(@cache, map_id) do
{:ok, _} ->
:ok
{:error, reason} ->
Logger.error("[Map Pool] Rollback failed for cache: #{inspect(reason)}")
end
end
if :registry in completed_operations do
Logger.debug("[Map Pool] Rollback: Removing #{map_id} from registry")
try do
Registry.update_value(@unique_registry, Module.concat(__MODULE__, uuid), fn r_map_ids ->
r_map_ids |> Enum.reject(fn id -> id == map_id end)
end)
rescue
e ->
Logger.error("[Map Pool] Rollback failed for registry: #{Exception.message(e)}")
end
end
end
defp do_stop_map(map_id, %{map_ids: map_ids, uuid: uuid} = state) do
# Track what operations succeeded for potential rollback
completed_operations = []
try do
# Step 1: Update Registry (most critical, do first)
registry_result =
Registry.update_value(@unique_registry, Module.concat(__MODULE__, uuid), fn r_map_ids ->
r_map_ids |> Enum.reject(fn id -> id == map_id end)
end)
completed_operations = [:registry | completed_operations]
case registry_result do
{new_value, _old_value} when is_list(new_value) ->
:ok
:error ->
raise "Failed to update registry for pool #{uuid}"
end
# Step 2: Delete from cache
case Cachex.del(@cache, map_id) do
{:ok, _} ->
:ok
{:error, reason} ->
raise "Failed to delete from cache: #{inspect(reason)}"
end
completed_operations = [:cache | completed_operations]
# Step 3: Stop the map server (clean up all map resources)
map_id
|> Server.Impl.stop_map()
completed_operations = [:map_server | completed_operations]
# Step 4: Update GenServer state (last, as this is in-memory and fast)
new_state = %{state | map_ids: map_ids |> Enum.reject(fn id -> id == map_id end)}
# Step 5: Persist state to ETS for crash recovery
MapPoolState.save_pool_state(uuid, new_state.map_ids)
Logger.debug("[Map Pool] Successfully stopped map #{map_id} from pool #{uuid}")
{:ok, new_state}
rescue
e ->
Logger.error("""
[Map Pool] Failed to stop map #{map_id} (completed: #{inspect(completed_operations)}): #{Exception.message(e)}
#{Exception.format_stacktrace(__STACKTRACE__)}
""")
# Attempt rollback of completed operations
rollback_stop_map_operations(map_id, uuid, completed_operations)
{:error, Exception.message(e)}
end
end
# Helper function to initialize the map server (no state management)
# This extracts the common map initialization logic used in both
# synchronous (do_start_map) and asynchronous ({:init_map, map_id}) paths
defp do_initialize_map_server(map_id) do
map_id
|> WandererApp.Map.get_map_state!()
|> Server.Impl.start_map()
end
# Helper function to unregister a map from all tracking
# Used for rollback when map initialization fails in the async path
defp do_unregister_map(map_id, uuid, state) do
# Remove from registry
Registry.update_value(@unique_registry, Module.concat(__MODULE__, uuid), fn r_map_ids ->
Enum.reject(r_map_ids, &(&1 == map_id))
end)
# Remove from cache
Cachex.del(@cache, map_id)
# Update state
new_state = %{state | map_ids: Enum.reject(state.map_ids, &(&1 == map_id))}
# Update ETS
MapPoolState.save_pool_state(uuid, new_state.map_ids)
new_state
end
defp rollback_stop_map_operations(map_id, uuid, completed_operations) do
Logger.warning("[Map Pool] Attempting to rollback stop_map operations for #{map_id}")
# Rollback in reverse order
if :cache in completed_operations do
Logger.debug("[Map Pool] Rollback: Re-adding #{map_id} to cache")
case Cachex.put(@cache, map_id, uuid) do
{:ok, _} ->
:ok
{:error, reason} ->
Logger.error("[Map Pool] Rollback failed for cache: #{inspect(reason)}")
end
end
if :registry in completed_operations do
Logger.debug("[Map Pool] Rollback: Re-adding #{map_id} to registry")
try do
Registry.update_value(@unique_registry, Module.concat(__MODULE__, uuid), fn r_map_ids ->
if map_id in r_map_ids do
r_map_ids
else
[map_id | r_map_ids]
end
end)
rescue
e ->
Logger.error("[Map Pool] Rollback failed for registry: #{Exception.message(e)}")
end
end
# Note: We don't rollback map_server stop as Server.Impl.stop_map() is idempotent
# and the cleanup operations are safe to leave in a "stopped" state
end
@impl true
def handle_call(:error, _, state), do: {:stop, :error, :ok, state}
@impl true
def handle_info(:backup_state, %{map_ids: map_ids, uuid: uuid} = state) do
Process.send_after(self(), :backup_state, @backup_state_timeout)
try do
# Persist pool state to ETS
MapPoolState.save_pool_state(uuid, map_ids)
# Backup individual map states to database
map_ids
|> Task.async_stream(
fn map_id ->
{:ok, _map_state} = Server.Impl.save_map_state(map_id)
end,
max_concurrency: System.schedulers_online() * 4,
on_timeout: :kill_task,
timeout: :timer.minutes(1)
)
|> Enum.each(fn _result -> :ok end)
rescue
e ->
Logger.error("""
[Map Pool] backup_state => exception: #{Exception.message(e)}
#{Exception.format_stacktrace(__STACKTRACE__)}
""")
end
{:noreply, state}
end
@impl true
def handle_info(:cleanup_systems, %{map_ids: map_ids} = state) do
Process.send_after(self(), :cleanup_systems, @systems_cleanup_timeout)
try do
map_ids
|> Task.async_stream(
fn map_id ->
Server.Impl.cleanup_systems(map_id)
end,
max_concurrency: System.schedulers_online() * 4,
on_timeout: :kill_task,
timeout: :timer.minutes(1)
)
|> Enum.each(fn _result -> :ok end)
rescue
e ->
Logger.error("""
[Map Pool] cleanup_systems => exception: #{Exception.message(e)}
#{Exception.format_stacktrace(__STACKTRACE__)}
""")
end
{:noreply, state}
end
@impl true
def handle_info(:cleanup_connections, %{map_ids: map_ids} = state) do
Process.send_after(self(), :cleanup_connections, @connections_cleanup_timeout)
try do
map_ids
|> Task.async_stream(
fn map_id ->
Server.Impl.cleanup_connections(map_id)
end,
max_concurrency: System.schedulers_online() * 4,
on_timeout: :kill_task,
timeout: :timer.minutes(1)
)
|> Enum.each(fn _result -> :ok end)
rescue
e ->
Logger.error("""
[Map Pool] cleanup_connections => exception: #{Exception.message(e)}
#{Exception.format_stacktrace(__STACKTRACE__)}
""")
end
{:noreply, state}
end
@impl true
def handle_info(:cleanup_characters, %{map_ids: map_ids} = state) do
Process.send_after(self(), :cleanup_characters, @characters_cleanup_timeout)
try do
map_ids
|> Task.async_stream(
fn map_id ->
Server.Impl.cleanup_characters(map_id)
end,
max_concurrency: System.schedulers_online() * 4,
on_timeout: :kill_task,
timeout: :timer.minutes(1)
)
|> Enum.each(fn _result -> :ok end)
rescue
e ->
Logger.error("""
[Map Pool] cleanup_characters => exception: #{Exception.message(e)}
#{Exception.format_stacktrace(__STACKTRACE__)}
""")
end
{:noreply, state}
end
@impl true
def handle_info(:garbage_collect, %{map_ids: map_ids, uuid: uuid} = state) do
Process.send_after(self(), :garbage_collect, @garbage_collection_interval)
try do
# Process each map and accumulate state changes
new_state =
map_ids
|> Enum.reduce(state, fn map_id, current_state ->
presence_character_ids =
WandererApp.Cache.lookup!("map_#{map_id}:presence_character_ids", [])
if presence_character_ids |> Enum.empty?() do
Logger.info(
"#{uuid}: No more characters present on: #{map_id}, shutting down map server..."
)
case do_stop_map(map_id, current_state) do
{:ok, updated_state} ->
Logger.debug("#{uuid}: Successfully stopped map #{map_id}")
updated_state
{:error, reason} ->
Logger.error("#{uuid}: Failed to stop map #{map_id}: #{reason}")
current_state
end
else
current_state
end
end)
{:noreply, new_state}
rescue
e ->
Logger.error("#{uuid}: Garbage collection error: #{Exception.message(e)}")
{:noreply, state}
end
end
@impl true
def handle_info(:monitor_message_queue, state) do
monitor_message_queue(state)
# Schedule next monitoring check
Process.send_after(self(), :monitor_message_queue, :timer.seconds(30))
{:noreply, state}
end
def handle_info({ref, result}, state) when is_reference(ref) do
Process.demonitor(ref, [:flush])
case result do
{:error, error} ->
Logger.error("#{__MODULE__} failed to process: #{inspect(error)}")
:ok
_ ->
:ok
end
{:noreply, state}
end
def handle_info(:map_deleted, %{map_ids: map_ids} = state) do
# When a map is deleted, stop all maps in this pool that are deleted
# This is a graceful shutdown triggered by user action
Logger.info("[Map Pool #{state.uuid}] Received map_deleted event, stopping affected maps")
# Check which of our maps were deleted and stop them
new_state =
map_ids
|> Enum.reduce(state, fn map_id, current_state ->
# Check if the map still exists in the database
case WandererApp.MapRepo.get(map_id) do
{:ok, %{deleted: true}} ->
Logger.info("[Map Pool #{state.uuid}] Map #{map_id} was deleted, stopping it")
case do_stop_map(map_id, current_state) do
{:ok, updated_state} ->
updated_state
{:error, reason} ->
Logger.error(
"[Map Pool #{state.uuid}] Failed to stop deleted map #{map_id}: #{reason}"
)
current_state
end
{:ok, _map} ->
# Map still exists and is not deleted
current_state
{:error, _} ->
# Map doesn't exist, should stop it
Logger.info("[Map Pool #{state.uuid}] Map #{map_id} not found, stopping it")
case do_stop_map(map_id, current_state) do
{:ok, updated_state} ->
updated_state
{:error, reason} ->
Logger.error(
"[Map Pool #{state.uuid}] Failed to stop missing map #{map_id}: #{reason}"
)
current_state
end
end
end)
{:noreply, new_state}
end
def handle_info(event, state) do
try do
Server.Impl.handle_event(event)
rescue
e ->
Logger.error("""
[Map Pool] handle_info => exception: #{Exception.message(e)}
#{Exception.format_stacktrace(__STACKTRACE__)}
""")
ErrorTracker.report(e, __STACKTRACE__)
end
{:noreply, state}
end
defp monitor_message_queue(state) do
try do
{_, message_queue_len} = Process.info(self(), :message_queue_len)
{_, memory} = Process.info(self(), :memory)
# Alert on high message queue
if message_queue_len > 50 do
Logger.warning("GENSERVER_QUEUE_HIGH: Map pool message queue buildup",
pool_id: state.uuid,
message_queue_length: message_queue_len,
memory_bytes: memory,
pool_length: length(state.map_ids)
)
# Emit telemetry
:telemetry.execute(
[:wanderer_app, :map, :map_pool, :queue_buildup],
%{
message_queue_length: message_queue_len,
memory_bytes: memory
},
%{
pool_id: state.uuid,
pool_length: length(state.map_ids)
}
)
end
rescue
error ->
Logger.debug("Failed to monitor message queue: #{inspect(error)}")
end
end
end

View File

@@ -0,0 +1,193 @@
defmodule WandererApp.Map.MapPoolDynamicSupervisor do
@moduledoc false
use DynamicSupervisor
require Logger
@cache :map_pool_cache
@registry :map_pool_registry
@unique_registry :unique_map_pool_registry
@map_pool_limit 10
@genserver_call_timeout :timer.minutes(2)
@name __MODULE__
def start_link(_arg) do
DynamicSupervisor.start_link(@name, [], name: @name, max_restarts: 10)
end
def init(_arg) do
DynamicSupervisor.init(strategy: :one_for_one)
end
def start_map(map_id) do
case Registry.lookup(@registry, WandererApp.Map.MapPool) do
[] ->
start_child([map_id], 0)
pools ->
case get_available_pool(pools) do
nil ->
start_child([map_id], pools |> Enum.count())
pid ->
result = GenServer.call(pid, {:start_map, map_id}, @genserver_call_timeout)
case result do
{:ok, :initializing} ->
Logger.debug(
"[Map Pool Supervisor] Map #{map_id} queued for async initialization"
)
result
{:ok, :already_started} ->
Logger.debug("[Map Pool Supervisor] Map #{map_id} already started")
result
:ok ->
# Legacy synchronous response (from crash recovery path)
Logger.debug("[Map Pool Supervisor] Map #{map_id} started synchronously")
result
other ->
Logger.warning(
"[Map Pool Supervisor] Unexpected response for map #{map_id}: #{inspect(other)}"
)
other
end
end
end
end
def stop_map(map_id) do
case Cachex.get(@cache, map_id) do
{:ok, nil} ->
# Cache miss - try to find the pool by scanning the registry
Logger.warning(
"Cache miss for map #{map_id}, scanning registry for pool containing this map"
)
find_pool_by_scanning_registry(map_id)
{:ok, pool_uuid} ->
# Cache hit - use the pool_uuid to lookup the pool
case Registry.lookup(
@unique_registry,
Module.concat(WandererApp.Map.MapPool, pool_uuid)
) do
[] ->
Logger.warning(
"Pool with UUID #{pool_uuid} not found in registry for map #{map_id}, scanning registry"
)
find_pool_by_scanning_registry(map_id)
[{pool_pid, _}] ->
GenServer.call(pool_pid, {:stop_map, map_id}, @genserver_call_timeout)
end
{:error, reason} ->
Logger.error("Failed to lookup map #{map_id} in cache: #{inspect(reason)}")
:ok
end
end
defp find_pool_by_scanning_registry(map_id) do
case Registry.lookup(@registry, WandererApp.Map.MapPool) do
[] ->
Logger.debug("No map pools found in registry for map #{map_id}")
:ok
pools ->
# Scan all pools to find the one containing this map_id
found_pool =
Enum.find_value(pools, fn {_pid, uuid} ->
case Registry.lookup(
@unique_registry,
Module.concat(WandererApp.Map.MapPool, uuid)
) do
[{pool_pid, map_ids}] ->
if map_id in map_ids do
{pool_pid, uuid}
else
nil
end
_ ->
nil
end
end)
case found_pool do
{pool_pid, pool_uuid} ->
Logger.info(
"Found map #{map_id} in pool #{pool_uuid} via registry scan, updating cache"
)
# Update the cache to fix the inconsistency
Cachex.put(@cache, map_id, pool_uuid)
GenServer.call(pool_pid, {:stop_map, map_id}, @genserver_call_timeout)
nil ->
Logger.debug("Map #{map_id} not found in any pool registry")
:ok
end
end
end
defp get_available_pool([]), do: nil
defp get_available_pool([{_pid, uuid} | pools]) do
case Registry.lookup(@unique_registry, Module.concat(WandererApp.Map.MapPool, uuid)) do
[] ->
nil
uuid_pools ->
case get_available_pool_pid(uuid_pools) do
nil ->
get_available_pool(pools)
pid ->
pid
end
end
end
defp get_available_pool_pid([]), do: nil
defp get_available_pool_pid([{pid, map_ids} | pools]) do
if Enum.count(map_ids) < @map_pool_limit do
pid
else
get_available_pool_pid(pools)
end
end
defp start_child(map_ids, pools_count) do
# Generate UUID for the new pool - this will be used for crash recovery
uuid = UUID.uuid1()
# Pass both UUID and map_ids to the pool for crash recovery support
case DynamicSupervisor.start_child(@name, {WandererApp.Map.MapPool, {uuid, map_ids}}) do
{:ok, pid} ->
Logger.info("Starting map pool #{uuid}, total map_pools: #{pools_count + 1}")
{:ok, pid}
{:error, {:already_started, pid}} ->
{:ok, pid}
end
end
defp stop_child(uuid) do
case Registry.lookup(@registry, uuid) do
[{pid, _}] ->
GenServer.cast(pid, :stop)
_ ->
Logger.warn("Unable to locate pool assigned to #{inspect(uuid)}")
:ok
end
end
end

View File

@@ -0,0 +1,190 @@
defmodule WandererApp.Map.MapPoolState do
@moduledoc """
Helper module for persisting MapPool state to ETS for crash recovery.
This module provides functions to save and retrieve MapPool state from an ETS table.
The state survives GenServer crashes but is lost on node restart, which ensures
automatic recovery from crashes while avoiding stale state on system restart.
## ETS Table Ownership
The ETS table `:map_pool_state_table` is owned by the MapPoolSupervisor,
ensuring it survives individual MapPool process crashes.
## State Format
State is stored as tuples: `{pool_uuid, map_ids, last_updated_timestamp}`
where:
- `pool_uuid` is the unique identifier for the pool (key)
- `map_ids` is a list of map IDs managed by this pool
- `last_updated_timestamp` is the Unix timestamp of the last update
"""
require Logger
@table_name :map_pool_state_table
@stale_threshold_hours 24
@doc """
Initializes the ETS table for storing MapPool state.
This should be called by the MapPoolSupervisor during initialization.
The table is created as:
- `:set` - Each pool UUID has exactly one entry
- `:public` - Any process can read/write
- `:named_table` - Can be accessed by name
Returns the table reference or raises if table already exists.
"""
@spec init_table() :: :ets.table()
def init_table do
:ets.new(@table_name, [:set, :public, :named_table])
end
@doc """
Saves the current state of a MapPool to ETS.
## Parameters
- `uuid` - The unique identifier for the pool
- `map_ids` - List of map IDs currently managed by this pool
## Examples
iex> MapPoolState.save_pool_state("pool-123", [1, 2, 3])
:ok
"""
@spec save_pool_state(String.t(), [integer()]) :: :ok
def save_pool_state(uuid, map_ids) when is_binary(uuid) and is_list(map_ids) do
timestamp = System.system_time(:second)
true = :ets.insert(@table_name, {uuid, map_ids, timestamp})
Logger.debug("Saved MapPool state for #{uuid}: #{length(map_ids)} maps",
pool_uuid: uuid,
map_count: length(map_ids)
)
:ok
end
@doc """
Retrieves the saved state for a MapPool from ETS.
## Parameters
- `uuid` - The unique identifier for the pool
## Returns
- `{:ok, map_ids}` if state exists
- `{:error, :not_found}` if no state exists for this UUID
## Examples
iex> MapPoolState.get_pool_state("pool-123")
{:ok, [1, 2, 3]}
iex> MapPoolState.get_pool_state("non-existent")
{:error, :not_found}
"""
@spec get_pool_state(String.t()) :: {:ok, [integer()]} | {:error, :not_found}
def get_pool_state(uuid) when is_binary(uuid) do
case :ets.lookup(@table_name, uuid) do
[{^uuid, map_ids, _timestamp}] ->
{:ok, map_ids}
[] ->
{:error, :not_found}
end
end
@doc """
Deletes the state for a MapPool from ETS.
This should be called when a pool is gracefully shut down.
## Parameters
- `uuid` - The unique identifier for the pool
## Examples
iex> MapPoolState.delete_pool_state("pool-123")
:ok
"""
@spec delete_pool_state(String.t()) :: :ok
def delete_pool_state(uuid) when is_binary(uuid) do
true = :ets.delete(@table_name, uuid)
Logger.debug("Deleted MapPool state for #{uuid}", pool_uuid: uuid)
:ok
end
@doc """
Removes stale entries from the ETS table.
Entries are considered stale if they haven't been updated in the last
#{@stale_threshold_hours} hours. This helps prevent the table from growing
unbounded due to pool UUIDs that are no longer in use.
Returns the number of entries deleted.
## Examples
iex> MapPoolState.cleanup_stale_entries()
{:ok, 3}
"""
@spec cleanup_stale_entries() :: {:ok, non_neg_integer()}
def cleanup_stale_entries do
stale_threshold = System.system_time(:second) - @stale_threshold_hours * 3600
match_spec = [
{
{:"$1", :"$2", :"$3"},
[{:<, :"$3", stale_threshold}],
[:"$1"]
}
]
stale_uuids = :ets.select(@table_name, match_spec)
Enum.each(stale_uuids, fn uuid ->
:ets.delete(@table_name, uuid)
Logger.info("Cleaned up stale MapPool state for #{uuid}",
pool_uuid: uuid,
reason: :stale
)
end)
{:ok, length(stale_uuids)}
end
@doc """
Returns all pool states currently stored in ETS.
Useful for debugging and monitoring.
## Examples
iex> MapPoolState.list_all_states()
[
{"pool-123", [1, 2, 3], 1699564800},
{"pool-456", [4, 5], 1699564900}
]
"""
@spec list_all_states() :: [{String.t(), [integer()], integer()}]
def list_all_states do
:ets.tab2list(@table_name)
end
@doc """
Returns the count of pool states currently stored in ETS.
## Examples
iex> MapPoolState.count_states()
5
"""
@spec count_states() :: non_neg_integer()
def count_states do
:ets.info(@table_name, :size)
end
end

View File

@@ -0,0 +1,29 @@
defmodule WandererApp.Map.MapPoolSupervisor do
@moduledoc false
use Supervisor
alias WandererApp.Map.MapPoolState
@name __MODULE__
@registry :map_pool_registry
@unique_registry :unique_map_pool_registry
def start_link(_args) do
Supervisor.start_link(@name, [], name: @name)
end
def init(_args) do
# Initialize ETS table for MapPool state persistence
# This table survives individual MapPool crashes but is lost on node restart
MapPoolState.init_table()
children = [
{Registry, [keys: :unique, name: @unique_registry]},
{Registry, [keys: :duplicate, name: @registry]},
{WandererApp.Map.MapPoolDynamicSupervisor, []},
{WandererApp.Map.Reconciler, []}
]
Supervisor.init(children, strategy: :rest_for_one, max_restarts: 10)
end
end

View File

@@ -2,6 +2,8 @@ defmodule WandererApp.Map.PositionCalculator do
@moduledoc false
require Logger
@ddrt Application.compile_env(:wanderer_app, :ddrt)
# Node height
@h 34
# Node weight
@@ -60,7 +62,7 @@ defmodule WandererApp.Map.PositionCalculator do
end
defp is_available_position({x, y} = _position, rtree_name) do
case DDRT.query(get_system_bounding_rect(%{position_x: x, position_y: y}), rtree_name) do
case @ddrt.query(get_system_bounding_rect(%{position_x: x, position_y: y}), rtree_name) do
{:ok, []} ->
true

View File

@@ -0,0 +1,280 @@
defmodule WandererApp.Map.Reconciler do
@moduledoc """
Periodically reconciles map state across different stores (Cache, Registry, GenServer state)
to detect and fix inconsistencies that may prevent map servers from restarting.
"""
use GenServer
require Logger
@cache :map_pool_cache
@registry :map_pool_registry
@unique_registry :unique_map_pool_registry
@reconciliation_interval :timer.minutes(5)
def start_link(_opts) do
GenServer.start_link(__MODULE__, [], name: __MODULE__)
end
@impl true
def init(_opts) do
Logger.info("Starting Map Reconciler")
schedule_reconciliation()
{:ok, %{}}
end
@impl true
def handle_info(:reconcile, state) do
schedule_reconciliation()
try do
reconcile_state()
rescue
e ->
Logger.error("""
[Map Reconciler] reconciliation error: #{Exception.message(e)}
#{Exception.format_stacktrace(__STACKTRACE__)}
""")
end
{:noreply, state}
end
@doc """
Manually trigger a reconciliation (useful for testing or manual cleanup)
"""
def trigger_reconciliation do
GenServer.cast(__MODULE__, :reconcile_now)
end
@impl true
def handle_cast(:reconcile_now, state) do
try do
reconcile_state()
rescue
e ->
Logger.error("""
[Map Reconciler] manual reconciliation error: #{Exception.message(e)}
#{Exception.format_stacktrace(__STACKTRACE__)}
""")
end
{:noreply, state}
end
defp schedule_reconciliation do
Process.send_after(self(), :reconcile, @reconciliation_interval)
end
defp reconcile_state do
Logger.debug("[Map Reconciler] Starting state reconciliation")
# Get started_maps from cache
{:ok, started_maps} = WandererApp.Cache.lookup("started_maps", [])
# Get all maps from registries
registry_maps = get_all_registry_maps()
# Detect zombie maps (in started_maps but not in any registry)
zombie_maps = started_maps -- registry_maps
# Detect orphan maps (in registry but not in started_maps)
orphan_maps = registry_maps -- started_maps
# Detect cache inconsistencies (map_pool_cache pointing to wrong or non-existent pools)
cache_inconsistencies = find_cache_inconsistencies(registry_maps)
stats = %{
total_started_maps: length(started_maps),
total_registry_maps: length(registry_maps),
zombie_maps: length(zombie_maps),
orphan_maps: length(orphan_maps),
cache_inconsistencies: length(cache_inconsistencies)
}
Logger.info("[Map Reconciler] Reconciliation stats: #{inspect(stats)}")
# Emit telemetry
:telemetry.execute(
[:wanderer_app, :map, :reconciliation],
stats,
%{}
)
# Clean up zombie maps
cleanup_zombie_maps(zombie_maps)
# Fix orphan maps
fix_orphan_maps(orphan_maps)
# Fix cache inconsistencies
fix_cache_inconsistencies(cache_inconsistencies)
Logger.debug("[Map Reconciler] State reconciliation completed")
end
defp get_all_registry_maps do
case Registry.lookup(@registry, WandererApp.Map.MapPool) do
[] ->
[]
pools ->
pools
|> Enum.flat_map(fn {_pid, uuid} ->
case Registry.lookup(
@unique_registry,
Module.concat(WandererApp.Map.MapPool, uuid)
) do
[{_pool_pid, map_ids}] -> map_ids
_ -> []
end
end)
|> Enum.uniq()
end
end
defp find_cache_inconsistencies(registry_maps) do
registry_maps
|> Enum.filter(fn map_id ->
case Cachex.get(@cache, map_id) do
{:ok, nil} ->
# Map in registry but not in cache
true
{:ok, pool_uuid} ->
# Check if the pool_uuid actually exists in registry
case Registry.lookup(
@unique_registry,
Module.concat(WandererApp.Map.MapPool, pool_uuid)
) do
[] ->
# Cache points to non-existent pool
true
[{_pool_pid, map_ids}] ->
# Check if this map is actually in the pool's map_ids
map_id not in map_ids
_ ->
false
end
{:error, _} ->
true
end
end)
end
defp cleanup_zombie_maps([]), do: :ok
defp cleanup_zombie_maps(zombie_maps) do
Logger.warning("[Map Reconciler] Found #{length(zombie_maps)} zombie maps: #{inspect(zombie_maps)}")
Enum.each(zombie_maps, fn map_id ->
Logger.info("[Map Reconciler] Cleaning up zombie map: #{map_id}")
# Remove from started_maps cache
WandererApp.Cache.insert_or_update(
"started_maps",
[],
fn started_maps ->
started_maps |> Enum.reject(fn started_map_id -> started_map_id == map_id end)
end
)
# Clean up any stale map_pool_cache entries
Cachex.del(@cache, map_id)
# Clean up map-specific caches
WandererApp.Cache.delete("map_#{map_id}:started")
WandererApp.Cache.delete("map_characters-#{map_id}")
WandererApp.Map.CacheRTree.clear_tree("rtree_#{map_id}")
WandererApp.Map.delete_map_state(map_id)
:telemetry.execute(
[:wanderer_app, :map, :reconciliation, :zombie_cleanup],
%{count: 1},
%{map_id: map_id}
)
end)
end
defp fix_orphan_maps([]), do: :ok
defp fix_orphan_maps(orphan_maps) do
Logger.warning("[Map Reconciler] Found #{length(orphan_maps)} orphan maps: #{inspect(orphan_maps)}")
Enum.each(orphan_maps, fn map_id ->
Logger.info("[Map Reconciler] Fixing orphan map: #{map_id}")
# Add to started_maps cache
WandererApp.Cache.insert_or_update(
"started_maps",
[map_id],
fn existing ->
[map_id | existing] |> Enum.uniq()
end
)
:telemetry.execute(
[:wanderer_app, :map, :reconciliation, :orphan_fixed],
%{count: 1},
%{map_id: map_id}
)
end)
end
defp fix_cache_inconsistencies([]), do: :ok
defp fix_cache_inconsistencies(inconsistent_maps) do
Logger.warning(
"[Map Reconciler] Found #{length(inconsistent_maps)} cache inconsistencies: #{inspect(inconsistent_maps)}"
)
Enum.each(inconsistent_maps, fn map_id ->
Logger.info("[Map Reconciler] Fixing cache inconsistency for map: #{map_id}")
# Find the correct pool for this map
case find_pool_for_map(map_id) do
{:ok, pool_uuid} ->
Logger.info("[Map Reconciler] Updating cache: #{map_id} -> #{pool_uuid}")
Cachex.put(@cache, map_id, pool_uuid)
:telemetry.execute(
[:wanderer_app, :map, :reconciliation, :cache_fixed],
%{count: 1},
%{map_id: map_id, pool_uuid: pool_uuid}
)
:error ->
Logger.warning("[Map Reconciler] Could not find pool for map #{map_id}, removing from cache")
Cachex.del(@cache, map_id)
end
end)
end
defp find_pool_for_map(map_id) do
case Registry.lookup(@registry, WandererApp.Map.MapPool) do
[] ->
:error
pools ->
pools
|> Enum.find_value(:error, fn {_pid, uuid} ->
case Registry.lookup(
@unique_registry,
Module.concat(WandererApp.Map.MapPool, uuid)
) do
[{_pool_pid, map_ids}] ->
if map_id in map_ids do
{:ok, uuid}
else
nil
end
_ ->
nil
end
end)
end
end
end

View File

@@ -1,15 +0,0 @@
defmodule WandererApp.Map.RegistryHelper do
@moduledoc false
alias WandererApp.MapRegistry
def list_all_maps(),
do: Registry.select(MapRegistry, [{{:"$1", :"$2", :_}, [], [%{id: :"$1", pid: :"$2"}]}])
def list_all_maps_by_map_id(map_id) do
match_all = {:"$1", :"$2", :"$3"}
guards = [{:==, :"$1", map_id}]
map_result = [%{id: :"$1", pid: :"$2"}]
Registry.select(MapRegistry, [{match_all, guards, map_result}])
end
end

View File

@@ -0,0 +1,311 @@
defmodule WandererApp.Map.Routes do
@moduledoc """
Map routes helper
"""
require Logger
@default_routes_settings %{
path_type: "shortest",
include_mass_crit: true,
include_eol: false,
include_frig: true,
include_cruise: true,
avoid_wormholes: false,
avoid_pochven: false,
avoid_edencom: false,
avoid_triglavian: false,
include_thera: true,
avoid: []
}
@minimum_route_attrs [
:system_class,
:class_title,
:security,
:triglavian_invasion_status,
:solar_system_id,
:solar_system_name,
:region_name,
:is_shattered
]
@get_link_pairs_advanced_params [
:include_mass_crit,
:include_eol,
:include_frig
]
@zarzakh_system 30_100_000
@default_avoid_systems [@zarzakh_system]
@routes_ttl :timer.minutes(15)
@logger Application.compile_env(:wanderer_app, :logger)
def find(map_id, hubs, origin, routes_settings, false) do
do_find_routes(
map_id,
origin,
hubs,
routes_settings
)
|> case do
{:ok, routes} ->
systems_static_data =
routes
|> Enum.map(fn route_info -> route_info.systems end)
|> List.flatten()
|> Enum.uniq()
|> Task.async_stream(
fn system_id ->
case WandererApp.CachedInfo.get_system_static_info(system_id) do
{:ok, nil} ->
nil
{:ok, system} ->
system |> Map.take(@minimum_route_attrs)
end
end,
max_concurrency: System.schedulers_online() * 4
)
|> Enum.map(fn {:ok, val} -> val end)
{:ok, %{routes: routes, systems_static_data: systems_static_data}}
_error ->
{:ok, %{routes: [], systems_static_data: []}}
end
end
def find(_map_id, hubs, origin, routes_settings, true) do
origin = origin |> String.to_integer()
hubs = hubs |> Enum.map(&(&1 |> String.to_integer()))
routes =
hubs
|> Enum.map(fn hub ->
%{origin: origin, destination: hub, success: false, systems: [], has_connection: false}
end)
{:ok, %{routes: routes, systems_static_data: []}}
end
defp do_find_routes(map_id, origin, hubs, routes_settings) do
origin = origin |> String.to_integer()
hubs = hubs |> Enum.map(&(&1 |> String.to_integer()))
routes_settings = @default_routes_settings |> Map.merge(routes_settings)
connections =
case routes_settings.avoid_wormholes do
false ->
map_chains =
routes_settings
|> Map.take(@get_link_pairs_advanced_params)
|> Map.put_new(:map_id, map_id)
|> WandererApp.Api.MapConnection.get_link_pairs_advanced!()
|> Enum.map(fn %{
solar_system_source: solar_system_source,
solar_system_target: solar_system_target
} ->
%{
first: solar_system_source,
second: solar_system_target
}
end)
|> Enum.uniq()
{:ok, thera_chains} =
case routes_settings.include_thera do
true ->
WandererApp.Server.TheraDataFetcher.get_chain_pairs(routes_settings)
false ->
{:ok, []}
end
chains = remove_intersection([map_chains | thera_chains] |> List.flatten())
chains =
case routes_settings.include_cruise do
false ->
{:ok, wh_class_a_systems} = WandererApp.CachedInfo.get_wh_class_a_systems()
chains
|> Enum.filter(fn x ->
not Enum.member?(wh_class_a_systems, x.first) and
not Enum.member?(wh_class_a_systems, x.second)
end)
_ ->
chains
end
chains
|> Enum.map(fn chain ->
["#{chain.first}|#{chain.second}", "#{chain.second}|#{chain.first}"]
end)
|> List.flatten()
true ->
[]
end
{:ok, trig_systems} = WandererApp.CachedInfo.get_trig_systems()
pochven_solar_systems =
trig_systems
|> Enum.filter(fn s -> s.triglavian_invasion_status == "Final" end)
|> Enum.map(& &1.solar_system_id)
triglavian_solar_systems =
trig_systems
|> Enum.filter(fn s -> s.triglavian_invasion_status == "Triglavian" end)
|> Enum.map(& &1.solar_system_id)
edencom_solar_systems =
trig_systems
|> Enum.filter(fn s -> s.triglavian_invasion_status == "Edencom" end)
|> Enum.map(& &1.solar_system_id)
avoidance_list =
case routes_settings.avoid_edencom do
true ->
edencom_solar_systems
false ->
[]
end
avoidance_list =
case routes_settings.avoid_triglavian do
true ->
[avoidance_list | triglavian_solar_systems]
false ->
avoidance_list
end
avoidance_list =
case routes_settings.avoid_pochven do
true ->
[avoidance_list | pochven_solar_systems]
false ->
avoidance_list
end
avoidance_list =
(@default_avoid_systems ++ [routes_settings.avoid | avoidance_list])
|> List.flatten()
|> Enum.uniq()
params =
%{
datasource: "tranquility",
flag: routes_settings.path_type,
connections: connections,
avoid: avoidance_list
}
{:ok, all_routes} = get_all_routes(hubs, origin, params)
routes =
all_routes
|> Enum.map(fn route_info ->
map_route_info(route_info)
end)
|> Enum.filter(fn route_info -> not is_nil(route_info) end)
{:ok, routes}
end
defp get_all_routes(hubs, origin, params, opts \\ []) do
cache_key =
"routes-#{origin}-#{hubs |> Enum.join("-")}-#{:crypto.hash(:sha, :erlang.term_to_binary(params))}"
case WandererApp.Cache.lookup(cache_key) do
{:ok, result} when not is_nil(result) ->
{:ok, result}
_ ->
case WandererApp.Esi.get_routes_custom(hubs, origin, params) do
{:ok, result} ->
WandererApp.Cache.insert(
cache_key,
result,
ttl: @routes_ttl
)
{:ok, result}
{:error, _error} ->
@logger.error(
"Error getting custom routes for #{inspect(origin)}: #{inspect(params)}"
)
WandererApp.Esi.get_routes_eve(hubs, origin, params, opts)
end
end
end
defp remove_intersection(pairs_arr) do
tuples = pairs_arr |> Enum.map(fn x -> {x.first, x.second} end)
tuples
|> Enum.reduce([], fn {first, second} = x, acc ->
if Enum.member?(tuples, {second, first}) do
acc
else
[x | acc]
end
end)
|> Enum.uniq()
|> Enum.map(fn {first, second} ->
%{
first: first,
second: second
}
end)
end
defp map_route_info(
%{
"origin" => origin,
"destination" => destination,
"systems" => result_systems,
"success" => success
} = _route_info
),
do:
map_route_info(%{
origin: origin,
destination: destination,
systems: result_systems,
success: success
})
defp map_route_info(
%{origin: origin, destination: destination, systems: result_systems, success: success} =
_route_info
) do
systems =
case result_systems do
[] ->
[]
_ ->
result_systems |> Enum.reject(fn system_id -> system_id == origin end)
end
%{
has_connection: result_systems != [],
systems: systems,
origin: origin,
destination: destination,
success: success
}
end
defp map_route_info(_), do: nil
end

View File

@@ -1,41 +0,0 @@
defmodule WandererApp.Map.RtreeDynamicSupervisor do
@moduledoc """
Dynamically starts a map server
"""
use DynamicSupervisor
def start_link(_arg) do
DynamicSupervisor.start_link(__MODULE__, nil, name: __MODULE__)
end
def init(nil) do
DynamicSupervisor.init(strategy: :one_for_one)
end
def start(map_id) do
case DynamicSupervisor.start_child(
__MODULE__,
{DDRT.DynamicRtree,
[
conf: [name: "rtree_#{map_id}", width: 150, verbose: false, seed: 0],
name: Module.concat([map_id, DDRT.DynamicRtree])
]}
) do
{:ok, pid} -> {:ok, pid}
{:error, {:already_started, pid}} -> {:ok, pid}
{:error, reason} -> {:error, reason}
end
end
def stop(map_id) do
case Process.whereis(Module.concat([map_id, DDRT.DynamicRtree])) do
nil -> :ok
pid when is_pid(pid) -> DynamicSupervisor.terminate_child(__MODULE__, pid)
end
end
def which_children do
Supervisor.which_children(__MODULE__)
end
end

View File

@@ -2,52 +2,12 @@ defmodule WandererApp.Map.Server do
@moduledoc """
Holds state for a map and exposes an interface to managing the map instance
"""
use GenServer, restart: :transient, significant: true
require Logger
alias WandererApp.Map.Server.Impl
@logger Application.compile_env(:wanderer_app, :logger)
@spec start_link(keyword()) :: GenServer.on_start()
def start_link(args) when is_list(args) do
GenServer.start_link(__MODULE__, args, name: _via(args[:map_id]))
end
@impl true
def init(args), do: {:ok, Impl.init(args), {:continue, :load_state}}
def map_pid(map_id),
do:
map_id
|> _via()
|> GenServer.whereis()
def map_pid!(map_id) do
map_id
|> map_pid()
|> case do
map_id when is_pid(map_id) ->
map_id
nil ->
WandererApp.Cache.insert("map_#{map_id}:started", false)
throw("Map server not started")
end
end
def get_map(pid) when is_pid(pid),
do:
pid
|> GenServer.call({&Impl.get_map/1, []}, :timer.minutes(5))
def get_map(map_id) when is_binary(map_id),
do:
map_id
|> map_pid!
|> get_map()
def get_export_settings(%{id: map_id, hubs: hubs} = _map) do
with {:ok, all_systems} <- WandererApp.MapSystemRepo.get_all_by_map(map_id),
{:ok, connections} <- WandererApp.MapConnectionRepo.get_by_map(map_id) do
@@ -70,256 +30,67 @@ defmodule WandererApp.Map.Server do
end
end
def get_characters(map_id) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.call({&Impl.get_characters/1, []}, :timer.minutes(1))
defdelegate untrack_characters(map_id, character_ids), to: Impl
def add_character(map_id, character, track_character \\ false) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.add_character/3, [character, track_character]})
defdelegate add_system(map_id, system_info, user_id, character_id, opts \\ []), to: Impl
def remove_character(map_id, character_id) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.remove_character/2, [character_id]})
defdelegate paste_connections(map_id, connections, user_id, character_id), to: Impl
def untrack_characters(map_id, character_ids) when is_binary(map_id) do
map_id
|> map_pid()
|> case do
pid when is_pid(pid) ->
GenServer.cast(pid, {&Impl.untrack_characters/2, [character_ids]})
defdelegate paste_systems(map_id, systems, user_id, character_id, opts \\ []), to: Impl
_ ->
WandererApp.Cache.insert("map_#{map_id}:started", false)
:ok
end
end
defdelegate add_system_comment(map_id, comment_info, user_id, character_id), to: Impl
def add_system(map_id, system_info, user_id, character_id) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.add_system/4, [system_info, user_id, character_id]})
defdelegate remove_system_comment(map_id, comment_id, user_id, character_id), to: Impl
def paste_connections(map_id, connections, user_id, character_id) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.paste_connections/4, [connections, user_id, character_id]})
defdelegate update_system_position(map_id, update), to: Impl
def paste_systems(map_id, systems, user_id, character_id) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.paste_systems/4, [systems, user_id, character_id]})
defdelegate update_system_linked_sig_eve_id(map_id, update), to: Impl
def add_system_comment(map_id, comment_info, user_id, character_id) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.add_system_comment/4, [comment_info, user_id, character_id]})
defdelegate update_system_name(map_id, update), to: Impl
def remove_system_comment(map_id, comment_id, user_id, character_id) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.remove_system_comment/4, [comment_id, user_id, character_id]})
defdelegate update_system_description(map_id, update), to: Impl
def update_system_position(map_id, update) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.update_system_position/2, [update]})
defdelegate update_system_status(map_id, update), to: Impl
def update_system_linked_sig_eve_id(map_id, update) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.update_system_linked_sig_eve_id/2, [update]})
defdelegate update_system_tag(map_id, update), to: Impl
def update_system_name(map_id, update) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.update_system_name/2, [update]})
defdelegate update_system_temporary_name(map_id, update), to: Impl
def update_system_description(map_id, update) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.update_system_description/2, [update]})
defdelegate update_system_locked(map_id, update), to: Impl
def update_system_status(map_id, update) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.update_system_status/2, [update]})
defdelegate update_system_labels(map_id, update), to: Impl
def update_system_tag(map_id, update) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.update_system_tag/2, [update]})
defdelegate add_hub(map_id, hub_info), to: Impl
def update_system_temporary_name(map_id, update) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.update_system_temporary_name/2, [update]})
defdelegate remove_hub(map_id, hub_info), to: Impl
def update_system_locked(map_id, update) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.update_system_locked/2, [update]})
defdelegate add_ping(map_id, ping_info), to: Impl
def update_system_labels(map_id, update) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.update_system_labels/2, [update]})
defdelegate cancel_ping(map_id, ping_info), to: Impl
def add_hub(map_id, hub_info) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.add_hub/2, [hub_info]})
defdelegate delete_systems(map_id, solar_system_ids, user_id, character_id), to: Impl
def remove_hub(map_id, hub_info) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.remove_hub/2, [hub_info]})
defdelegate add_connection(map_id, connection_info), to: Impl
def add_ping(map_id, ping_info) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.add_ping/2, [ping_info]})
defdelegate delete_connection(map_id, connection_info), to: Impl
def cancel_ping(map_id, ping_info) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.cancel_ping/2, [ping_info]})
defdelegate import_settings(map_id, settings, user_id), to: Impl
def delete_systems(map_id, solar_system_ids, user_id, character_id) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.delete_systems/4, [solar_system_ids, user_id, character_id]})
defdelegate update_subscription_settings(map_id, settings), to: Impl
def add_connection(map_id, connection_info) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.add_connection/2, [connection_info]})
defdelegate get_connection_info(map_id, connection_info), to: Impl
def import_settings(map_id, settings, user_id) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.call({&Impl.import_settings/3, [settings, user_id]}, :timer.minutes(30))
defdelegate update_connection_time_status(map_id, connection_info), to: Impl
def update_subscription_settings(map_id, settings) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.update_subscription_settings/2, [settings]})
defdelegate update_connection_type(map_id, connection_info), to: Impl
def delete_connection(map_id, connection_info) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.delete_connection/2, [connection_info]})
defdelegate update_connection_mass_status(map_id, connection_info), to: Impl
def get_connection_info(map_id, connection_info) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.call({&Impl.get_connection_info/2, [connection_info]}, :timer.minutes(1))
defdelegate update_connection_ship_size_type(map_id, connection_info), to: Impl
def update_connection_time_status(map_id, connection_info) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.update_connection_time_status/2, [connection_info]})
defdelegate update_connection_locked(map_id, connection_info), to: Impl
def update_connection_type(map_id, connection_info) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.update_connection_type/2, [connection_info]})
defdelegate update_connection_custom_info(map_id, connection_info), to: Impl
def update_connection_mass_status(map_id, connection_info) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.update_connection_mass_status/2, [connection_info]})
def update_connection_ship_size_type(map_id, connection_info) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.update_connection_ship_size_type/2, [connection_info]})
def update_connection_locked(map_id, connection_info) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.update_connection_locked/2, [connection_info]})
def update_connection_custom_info(map_id, connection_info) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.update_connection_custom_info/2, [connection_info]})
def update_signatures(map_id, signatures_update) when is_binary(map_id),
do:
map_id
|> map_pid!
|> GenServer.cast({&Impl.update_signatures/2, [signatures_update]})
@impl true
def handle_continue(:load_state, state),
do: {:noreply, state |> Impl.load_state(), {:continue, :start_map}}
@impl true
def handle_continue(:start_map, state), do: {:noreply, state |> Impl.start_map()}
@impl true
def handle_call(
{impl_function, args},
_from,
state
)
when is_function(impl_function),
do: WandererApp.GenImpl.apply_call(impl_function, state, args)
@impl true
def handle_cast(:stop, state), do: {:stop, :normal, state |> Impl.stop_map()}
@impl true
def handle_cast({impl_function, args}, state)
when is_function(impl_function) do
case WandererApp.GenImpl.apply_call(impl_function, state, args) do
{:reply, _return, updated_state} ->
{:noreply, updated_state}
_ ->
{:noreply, state}
end
end
@impl true
def handle_info(event, state), do: {:noreply, Impl.handle_event(event, state)}
defp _via(map_id), do: {:via, Registry, {WandererApp.MapRegistry, map_id}}
defdelegate update_signatures(map_id, signatures_update), to: Impl
end

View File

@@ -300,10 +300,9 @@ defmodule WandererApp.Map.SubscriptionManager do
defp is_expired(subscription) when is_map(subscription),
do: DateTime.compare(DateTime.utc_now(), subscription.active_till) == :gt
defp renew_subscription(%{auto_renew?: true} = subscription) when is_map(subscription) do
with {:ok, %{map: map}} <-
subscription |> WandererApp.MapSubscriptionRepo.load_relationships([:map]),
{:ok, estimated_price, discount} <- estimate_price(subscription, true),
defp renew_subscription(%{auto_renew?: true, map: map} = subscription)
when is_map(subscription) do
with {:ok, estimated_price, discount} <- estimate_price(subscription, true),
{:ok, map_balance} <- get_balance(map) do
case map_balance >= estimated_price do
true ->
@@ -328,7 +327,7 @@ defmodule WandererApp.Map.SubscriptionManager do
@pubsub_client.broadcast(
WandererApp.PubSub,
"maps:#{map.id}",
:subscription_settings_updated
{:subscription_settings_updated, map.id}
)
:telemetry.execute([:wanderer_app, :map, :subscription, :renew], %{count: 1}, %{
@@ -388,7 +387,7 @@ defmodule WandererApp.Map.SubscriptionManager do
@pubsub_client.broadcast(
WandererApp.PubSub,
"maps:#{map.id}",
:subscription_settings_updated
{:subscription_settings_updated, map.id}
)
case WandererApp.License.LicenseManager.get_license_by_map_id(map.id) do
@@ -423,7 +422,7 @@ defmodule WandererApp.Map.SubscriptionManager do
@pubsub_client.broadcast(
WandererApp.PubSub,
"maps:#{subscription.map_id}",
:subscription_settings_updated
{:subscription_settings_updated, subscription.map_id}
)
case WandererApp.License.LicenseManager.get_license_by_map_id(subscription.map_id) do

View File

@@ -29,20 +29,20 @@ defmodule WandererApp.Map.ZkbDataFetcher do
kills_enabled = Application.get_env(:wanderer_app, :wanderer_kills_service_enabled, true)
if kills_enabled do
WandererApp.Map.RegistryHelper.list_all_maps()
{:ok, started_maps_ids} = WandererApp.Cache.lookup("started_maps", [])
started_maps_ids
|> Task.async_stream(
fn %{id: map_id, pid: _server_pid} ->
fn map_id ->
try do
if WandererApp.Map.Server.map_pid(map_id) do
# Always update kill counts
update_map_kills(map_id)
# Always update kill counts
update_map_kills(map_id)
# Update detailed kills for maps with active subscriptions
{:ok, is_subscription_active} = map_id |> WandererApp.Map.is_subscription_active?()
# Update detailed kills for maps with active subscriptions
{:ok, is_subscription_active} = map_id |> WandererApp.Map.is_subscription_active?()
if is_subscription_active do
update_detailed_map_kills(map_id)
end
if is_subscription_active do
update_detailed_map_kills(map_id)
end
rescue
e ->

View File

@@ -12,7 +12,6 @@ defmodule WandererApp.Map.Operations.Connections do
# Connection type constants
@connection_type_wormhole 0
@connection_type_stargate 1
# Ship size constants
@small_ship_size 0
@@ -231,31 +230,15 @@ defmodule WandererApp.Map.Operations.Connections do
attrs
) do
with {:ok, conn_struct} <- MapConnectionRepo.get_by_id(map_id, conn_id),
result <-
:ok <-
(try do
_allowed_keys = [
:mass_status,
:ship_size_type,
:time_status,
:type
]
_update_map =
attrs
|> Enum.filter(fn {k, _v} ->
k in ["mass_status", "ship_size_type", "time_status", "type"]
end)
|> Enum.map(fn {k, v} -> {String.to_atom(k), v} end)
|> Enum.into(%{})
res = apply_connection_updates(map_id, conn_struct, attrs, char_id)
res
rescue
error ->
Logger.error("[update_connection] Exception: #{inspect(error)}")
{:error, :exception}
end),
:ok <- result do
end) do
# Since GenServer updates are asynchronous, manually apply updates to the current struct
# to return the correct data immediately instead of refetching from potentially stale cache
updated_attrs =
@@ -374,6 +357,7 @@ defmodule WandererApp.Map.Operations.Connections do
"ship_size_type" -> maybe_update_ship_size_type(map_id, conn, val)
"time_status" -> maybe_update_time_status(map_id, conn, val)
"type" -> maybe_update_type(map_id, conn, val)
"locked" -> maybe_update_locked(map_id, conn, val)
_ -> :ok
end
@@ -429,6 +413,16 @@ defmodule WandererApp.Map.Operations.Connections do
})
end
defp maybe_update_locked(_map_id, _conn, nil), do: :ok
defp maybe_update_locked(map_id, conn, value) do
Server.update_connection_locked(map_id, %{
solar_system_source_id: conn.solar_system_source,
solar_system_target_id: conn.solar_system_target,
locked: value
})
end
@doc "Creates a connection between two systems"
@spec create_connection(String.t(), map(), String.t()) ::
{:ok, :created} | {:skip, :exists} | {:error, atom()}

View File

@@ -5,9 +5,42 @@ defmodule WandererApp.Map.Operations.Signatures do
require Logger
alias WandererApp.Map.Operations
alias WandererApp.Api.{MapSystem, MapSystemSignature}
alias WandererApp.Api.{Character, MapSystem, MapSystemSignature}
alias WandererApp.Map.Server
# Private helper to validate character_eve_id from params and return internal character ID
# If character_eve_id is provided in params, validates it exists and returns the internal UUID
# If not provided, falls back to the owner's character ID (which is already the internal UUID)
@spec validate_character_eve_id(map() | nil, String.t()) ::
{:ok, String.t()} | {:error, :invalid_character}
defp validate_character_eve_id(params, fallback_char_id) when is_map(params) do
case Map.get(params, "character_eve_id") do
nil ->
# No character_eve_id provided, use fallback (owner's internal character UUID)
{:ok, fallback_char_id}
provided_char_eve_id when is_binary(provided_char_eve_id) ->
# Validate the provided character_eve_id exists and get internal UUID
case Character.by_eve_id(provided_char_eve_id) do
{:ok, character} ->
# Return the internal character UUID, not the eve_id
{:ok, character.id}
_ ->
{:error, :invalid_character}
end
_ ->
# Invalid format
{:error, :invalid_character}
end
end
# Handle nil or non-map params by falling back to owner's character
defp validate_character_eve_id(_params, fallback_char_id) do
{:ok, fallback_char_id}
end
@spec list_signatures(String.t()) :: [map()]
def list_signatures(map_id) do
systems = Operations.list_systems(map_id)
@@ -41,11 +74,14 @@ defmodule WandererApp.Map.Operations.Signatures do
%{"solar_system_id" => solar_system_id} = params
)
when is_integer(solar_system_id) do
# Convert solar_system_id to system_id for internal use
with {:ok, system} <- MapSystem.by_map_id_and_solar_system_id(map_id, solar_system_id) do
# Validate character first, then convert solar_system_id to system_id
# validated_char_uuid is the internal character UUID for Server.update_signatures
with {:ok, validated_char_uuid} <- validate_character_eve_id(params, char_id),
{:ok, system} <- MapSystem.by_map_id_and_solar_system_id(map_id, solar_system_id) do
# Keep character_eve_id in attrs if provided by user (parse_signatures will use it)
# If not provided, parse_signatures will use the character_eve_id from validated_char_uuid lookup
attrs =
params
|> Map.put("character_eve_id", char_id)
|> Map.put("system_id", system.id)
|> Map.delete("solar_system_id")
@@ -54,7 +90,7 @@ defmodule WandererApp.Map.Operations.Signatures do
updated_signatures: [],
removed_signatures: [],
solar_system_id: solar_system_id,
character_id: char_id,
character_id: validated_char_uuid, # Pass internal UUID here
user_id: user_id,
delete_connection_with_sigs: false
}) do
@@ -86,6 +122,10 @@ defmodule WandererApp.Map.Operations.Signatures do
{:error, :unexpected_error}
end
else
{:error, :invalid_character} ->
Logger.error("[create_signature] Invalid character_eve_id provided")
{:error, :invalid_character}
_ ->
Logger.error(
"[create_signature] System not found for solar_system_id: #{solar_system_id}"
@@ -111,7 +151,10 @@ defmodule WandererApp.Map.Operations.Signatures do
sig_id,
params
) do
with {:ok, sig} <- MapSystemSignature.by_id(sig_id),
# Validate character first, then look up signature and system
# validated_char_uuid is the internal character UUID
with {:ok, validated_char_uuid} <- validate_character_eve_id(params, char_id),
{:ok, sig} <- MapSystemSignature.by_id(sig_id),
{:ok, system} <- MapSystem.by_id(sig.system_id) do
base = %{
"eve_id" => sig.eve_id,
@@ -120,11 +163,11 @@ defmodule WandererApp.Map.Operations.Signatures do
"group" => sig.group,
"type" => sig.type,
"custom_info" => sig.custom_info,
"character_eve_id" => char_id,
"description" => sig.description,
"linked_system_id" => sig.linked_system_id
}
# Merge user params (which may include character_eve_id) with base
attrs = Map.merge(base, params)
:ok =
@@ -133,7 +176,7 @@ defmodule WandererApp.Map.Operations.Signatures do
updated_signatures: [attrs],
removed_signatures: [],
solar_system_id: system.solar_system_id,
character_id: char_id,
character_id: validated_char_uuid, # Pass internal UUID here
user_id: user_id,
delete_connection_with_sigs: false
})
@@ -151,6 +194,10 @@ defmodule WandererApp.Map.Operations.Signatures do
_ -> {:ok, attrs}
end
else
{:error, :invalid_character} ->
Logger.error("[update_signature] Invalid character_eve_id provided")
{:error, :invalid_character}
err ->
Logger.error("[update_signature] Unexpected error: #{inspect(err)}")
{:error, :unexpected_error}

View File

@@ -35,20 +35,37 @@ defmodule WandererApp.Map.Operations.Systems do
# Private helper for batch upsert
defp create_system_batch(%{map_id: map_id, user_id: user_id, char_id: char_id}, params) do
do_create_system(map_id, user_id, char_id, params)
{:ok, solar_system_id} = fetch_system_id(params)
update_existing = fetch_update_existing(params, false)
map_id
|> WandererApp.Map.check_location(%{solar_system_id: solar_system_id})
|> case do
{:ok, _location} ->
do_create_system(map_id, user_id, char_id, params)
{:error, :already_exists} ->
if update_existing do
do_update_system(map_id, user_id, char_id, solar_system_id, params)
else
:ok
end
end
end
defp do_create_system(map_id, user_id, char_id, params) do
with {:ok, system_id} <- fetch_system_id(params),
update_existing <- fetch_update_existing(params, false),
coords <- normalize_coordinates(params),
:ok <-
Server.add_system(
map_id,
%{solar_system_id: system_id, coordinates: coords},
%{solar_system_id: system_id, coordinates: coords, extra: params},
user_id,
char_id
char_id,
update_existing: update_existing
) do
# System creation is async, but if add_system returns :ok,
# System creation is async, but if add_system returns :ok,
# it means the operation was queued successfully
{:ok, %{solar_system_id: system_id}}
else
@@ -63,15 +80,26 @@ defmodule WandererApp.Map.Operations.Systems do
end
@spec update_system(Plug.Conn.t(), integer(), map()) :: {:ok, map()} | {:error, atom()}
def update_system(%{assigns: %{map_id: map_id}} = _conn, system_id, attrs) do
with {:ok, current} <- MapSystemRepo.get_by_map_and_solar_system_id(map_id, system_id),
x_raw <- Map.get(attrs, "position_x", Map.get(attrs, :position_x, current.position_x)),
y_raw <- Map.get(attrs, "position_y", Map.get(attrs, :position_y, current.position_y)),
def update_system(
%{assigns: %{map_id: map_id, owner_character_id: char_id, owner_user_id: user_id}} =
_conn,
solar_system_id,
attrs
) do
do_update_system(map_id, user_id, char_id, solar_system_id, attrs)
end
def update_system(_conn, _solar_system_id, _attrs), do: {:error, :missing_params}
defp do_update_system(map_id, _user_id, _char_id, solar_system_id, params) do
with {:ok, current} <- MapSystemRepo.get_by_map_and_solar_system_id(map_id, solar_system_id),
x_raw <- Map.get(params, "position_x", Map.get(params, :position_x, current.position_x)),
y_raw <- Map.get(params, "position_y", Map.get(params, :position_y, current.position_y)),
{:ok, x} <- parse_int(x_raw, "position_x"),
{:ok, y} <- parse_int(y_raw, "position_y"),
coords = %{x: x, y: y},
:ok <- apply_system_updates(map_id, system_id, attrs, coords),
{:ok, system} <- MapSystemRepo.get_by_map_and_solar_system_id(map_id, system_id) do
:ok <- apply_system_updates(map_id, solar_system_id, params, coords),
{:ok, system} <- MapSystemRepo.get_by_map_and_solar_system_id(map_id, solar_system_id) do
{:ok, system}
else
{:error, reason} when is_binary(reason) ->
@@ -84,8 +112,6 @@ defmodule WandererApp.Map.Operations.Systems do
end
end
def update_system(_conn, _system_id, _attrs), do: {:error, :missing_params}
@spec delete_system(Plug.Conn.t(), integer()) :: {:ok, integer()} | {:error, atom()}
def delete_system(
%{assigns: %{map_id: map_id, owner_character_id: char_id, owner_user_id: user_id}} =
@@ -148,6 +174,15 @@ defmodule WandererApp.Map.Operations.Systems do
defp fetch_system_id(_), do: {:error, "Missing system identifier (id)"}
defp fetch_update_existing(%{"update_existing" => update_existing}, _default),
do: update_existing
defp fetch_update_existing(%{update_existing: update_existing}, _default)
when not is_nil(update_existing),
do: update_existing
defp fetch_update_existing(_, default), do: default
defp parse_int(val, _field) when is_integer(val), do: {:ok, val}
defp parse_int(val, field) when is_binary(val) do
@@ -232,6 +267,15 @@ defmodule WandererApp.Map.Operations.Systems do
labels: Enum.join(labels, ",")
})
"custom_name" ->
{:ok, solar_system_info} =
WandererApp.CachedInfo.get_system_static_info(system_id)
Server.update_system_name(map_id, %{
solar_system_id: system_id,
name: val || solar_system_info.solar_system_name
})
"temporary_name" ->
Server.update_system_temporary_name(map_id, %{
solar_system_id: system_id,

View File

@@ -5,7 +5,7 @@ defmodule WandererApp.Map.Server.AclsImpl do
@pubsub_client Application.compile_env(:wanderer_app, :pubsub_client)
def handle_map_acl_updated(%{map_id: map_id, map: old_map} = state, added_acls, removed_acls) do
def handle_map_acl_updated(map_id, added_acls, removed_acls) do
{:ok, map} =
WandererApp.MapRepo.get(map_id,
acls: [
@@ -63,7 +63,11 @@ defmodule WandererApp.Map.Server.AclsImpl do
broadcast_acl_updates({:ok, result}, map_id)
%{state | map: Map.merge(old_map, map_update)}
{:ok, %{map: old_map}} = WandererApp.Map.get_map_state(map_id)
WandererApp.Map.update_map_state(map_id, %{
map: Map.merge(old_map, map_update)
})
end
def handle_acl_updated(map_id, acl_id) do
@@ -113,8 +117,18 @@ defmodule WandererApp.Map.Server.AclsImpl do
track_acls(rest)
end
defp track_acl(acl_id),
do: @pubsub_client.subscribe(WandererApp.PubSub, "acls:#{acl_id}")
defp track_acl(acl_id) do
Cachex.get_and_update(:acl_cache, acl_id, fn acl ->
case acl do
nil ->
@pubsub_client.subscribe(WandererApp.PubSub, "acls:#{acl_id}")
{:commit, acl_id}
_ ->
{:ignore, nil}
end
end)
end
defp broadcast_acl_updates(
{:ok,

File diff suppressed because it is too large Load Diff

View File

@@ -139,30 +139,27 @@ defmodule WandererApp.Map.Server.ConnectionsImpl do
def init_start_cache(_map_id, _connections_start_time), do: :ok
def add_connection(
%{map_id: map_id} = state,
map_id,
%{
solar_system_source_id: solar_system_source_id,
solar_system_target_id: solar_system_target_id,
character_id: character_id
} = connection_info
) do
:ok =
maybe_add_connection(
map_id,
%{solar_system_id: solar_system_target_id},
%{
solar_system_id: solar_system_source_id
},
character_id,
true,
connection_info |> Map.get(:extra_info)
)
state
end
),
do:
maybe_add_connection(
map_id,
%{solar_system_id: solar_system_target_id},
%{
solar_system_id: solar_system_source_id
},
character_id,
true,
connection_info |> Map.get(:extra_info)
)
def paste_connections(
%{map_id: map_id} = state,
map_id,
connections,
_user_id,
character_id
@@ -175,47 +172,29 @@ defmodule WandererApp.Map.Server.ConnectionsImpl do
solar_system_source_id = source |> String.to_integer()
solar_system_target_id = target |> String.to_integer()
state
|> add_connection(%{
add_connection(map_id, %{
solar_system_source_id: solar_system_source_id,
solar_system_target_id: solar_system_target_id,
character_id: character_id,
extra_info: connection
})
end)
state
end
def delete_connection(
%{map_id: map_id} = state,
map_id,
%{
solar_system_source_id: solar_system_source_id,
solar_system_target_id: solar_system_target_id
} = _connection_info
) do
:ok =
maybe_remove_connection(map_id, %{solar_system_id: solar_system_target_id}, %{
solar_system_id: solar_system_source_id
})
state
end
def update_connection_type(
%{map_id: map_id} = state,
%{
solar_system_source_id: solar_system_source_id,
solar_system_target_id: solar_system_target_id,
character_id: character_id
} = _connection_info,
type
) do
state
end
),
do:
maybe_remove_connection(map_id, %{solar_system_id: solar_system_target_id}, %{
solar_system_id: solar_system_source_id
})
def get_connection_info(
%{map_id: map_id} = _state,
map_id,
%{
solar_system_source_id: solar_system_source_id,
solar_system_target_id: solar_system_target_id
@@ -237,11 +216,11 @@ defmodule WandererApp.Map.Server.ConnectionsImpl do
end
def update_connection_time_status(
%{map_id: map_id} = state,
map_id,
connection_update
),
do:
update_connection(state, :update_time_status, [:time_status], connection_update, fn
update_connection(map_id, :update_time_status, [:time_status], connection_update, fn
%{time_status: old_time_status},
%{id: connection_id, time_status: time_status} = updated_connection ->
case time_status == @connection_time_status_eol do
@@ -268,131 +247,124 @@ defmodule WandererApp.Map.Server.ConnectionsImpl do
end)
def update_connection_type(
state,
map_id,
connection_update
),
do: update_connection(state, :update_type, [:type], connection_update)
do: update_connection(map_id, :update_type, [:type], connection_update)
def update_connection_mass_status(
state,
map_id,
connection_update
),
do: update_connection(state, :update_mass_status, [:mass_status], connection_update)
do: update_connection(map_id, :update_mass_status, [:mass_status], connection_update)
def update_connection_ship_size_type(
state,
map_id,
connection_update
),
do: update_connection(state, :update_ship_size_type, [:ship_size_type], connection_update)
do: update_connection(map_id, :update_ship_size_type, [:ship_size_type], connection_update)
def update_connection_locked(
state,
map_id,
connection_update
),
do: update_connection(state, :update_locked, [:locked], connection_update)
do: update_connection(map_id, :update_locked, [:locked], connection_update)
def update_connection_custom_info(
state,
map_id,
connection_update
),
do: update_connection(state, :update_custom_info, [:custom_info], connection_update)
do: update_connection(map_id, :update_custom_info, [:custom_info], connection_update)
def cleanup_connections(%{map_id: map_id} = state) do
def cleanup_connections(map_id) do
connection_auto_expire_hours = get_connection_auto_expire_hours()
connection_auto_eol_hours = get_connection_auto_eol_hours()
connection_eol_expire_timeout_hours = get_eol_expire_timeout_mins() / 60
state =
map_id
|> WandererApp.Map.list_connections!()
|> Enum.reduce(state, fn %{
id: connection_id,
solar_system_source: solar_system_source_id,
solar_system_target: solar_system_target_id,
time_status: time_status,
type: type
},
state ->
if type == @connection_type_wormhole do
connection_start_time = get_start_time(map_id, connection_id)
new_time_status = get_new_time_status(connection_start_time, time_status)
map_id
|> WandererApp.Map.list_connections!()
|> Enum.each(fn connection ->
maybe_update_connection_time_status(map_id, connection)
end)
if new_time_status != time_status &&
is_connection_valid(
:wormholes,
solar_system_source_id,
solar_system_target_id
) do
set_start_time(map_id, connection_id, DateTime.utc_now())
state
|> update_connection_time_status(%{
solar_system_source_id: solar_system_source_id,
solar_system_target_id: solar_system_target_id,
time_status: new_time_status
})
else
state
end
else
state
end
end)
state =
map_id
|> WandererApp.Map.list_connections!()
|> Enum.filter(fn %{
id: connection_id,
solar_system_source: solar_system_source_id,
solar_system_target: solar_system_target_id,
time_status: time_status,
type: type
} ->
is_connection_exist =
is_connection_exist(
map_id,
solar_system_source_id,
solar_system_target_id
) ||
not is_nil(
WandererApp.Map.get_connection(
map_id,
solar_system_target_id,
solar_system_source_id
)
map_id
|> WandererApp.Map.list_connections!()
|> Enum.filter(fn %{
id: connection_id,
solar_system_source: solar_system_source_id,
solar_system_target: solar_system_target_id,
time_status: time_status,
type: type
} ->
is_connection_exist =
is_connection_exist(
map_id,
solar_system_source_id,
solar_system_target_id
) ||
not is_nil(
WandererApp.Map.get_connection(
map_id,
solar_system_target_id,
solar_system_source_id
)
)
not is_connection_exist ||
(type == @connection_type_wormhole &&
time_status == @connection_time_status_eol &&
is_connection_valid(
:wormholes,
solar_system_source_id,
solar_system_target_id
) &&
DateTime.diff(
DateTime.utc_now(),
get_connection_mark_eol_time(map_id, connection_id),
:hour
) >=
connection_auto_expire_hours - connection_auto_eol_hours +
connection_eol_expire_timeout_hours)
end)
|> Enum.reduce(state, fn %{
solar_system_source: solar_system_source_id,
solar_system_target: solar_system_target_id
},
state ->
delete_connection(state, %{
solar_system_source_id: solar_system_source_id,
solar_system_target_id: solar_system_target_id
})
end)
state
not is_connection_exist ||
(type == @connection_type_wormhole &&
time_status == @connection_time_status_eol &&
is_connection_valid(
:wormholes,
solar_system_source_id,
solar_system_target_id
) &&
DateTime.diff(
DateTime.utc_now(),
get_connection_mark_eol_time(map_id, connection_id),
:hour
) >=
connection_auto_expire_hours - connection_auto_eol_hours +
connection_eol_expire_timeout_hours)
end)
|> Enum.each(fn %{
solar_system_source: solar_system_source_id,
solar_system_target: solar_system_target_id
} ->
delete_connection(map_id, %{
solar_system_source_id: solar_system_source_id,
solar_system_target_id: solar_system_target_id
})
end)
end
defp maybe_update_connection_time_status(map_id, %{
id: connection_id,
solar_system_source: solar_system_source_id,
solar_system_target: solar_system_target_id,
time_status: time_status,
type: @connection_type_wormhole
}) do
connection_start_time = get_start_time(map_id, connection_id)
new_time_status = get_new_time_status(connection_start_time, time_status)
if new_time_status != time_status &&
is_connection_valid(
:wormholes,
solar_system_source_id,
solar_system_target_id
) do
set_start_time(map_id, connection_id, DateTime.utc_now())
update_connection_time_status(map_id, %{
solar_system_source_id: solar_system_source_id,
solar_system_target_id: solar_system_target_id,
time_status: new_time_status
})
end
end
defp maybe_update_connection_time_status(_map_id, _connection), do: :ok
defp maybe_update_linked_signature_time_status(
map_id,
%{
@@ -401,36 +373,36 @@ defmodule WandererApp.Map.Server.ConnectionsImpl do
solar_system_target: solar_system_target
} = updated_connection
) do
source_system =
WandererApp.Map.find_system_by_location(
with source_system when not is_nil(source_system) <-
WandererApp.Map.find_system_by_location(
map_id,
%{solar_system_id: solar_system_source}
),
target_system when not is_nil(source_system) <-
WandererApp.Map.find_system_by_location(
map_id,
%{solar_system_id: solar_system_target}
),
source_linked_signatures <-
find_linked_signatures(source_system, target_system),
target_linked_signatures <- find_linked_signatures(target_system, source_system) do
update_signatures_time_status(
map_id,
%{solar_system_id: solar_system_source}
source_system.solar_system_id,
source_linked_signatures,
time_status
)
target_system =
WandererApp.Map.find_system_by_location(
update_signatures_time_status(
map_id,
%{solar_system_id: solar_system_target}
target_system.solar_system_id,
target_linked_signatures,
time_status
)
source_linked_signatures =
find_linked_signatures(source_system, target_system)
target_linked_signatures = find_linked_signatures(target_system, source_system)
update_signatures_time_status(
map_id,
source_system.solar_system_id,
source_linked_signatures,
time_status
)
update_signatures_time_status(
map_id,
target_system.solar_system_id,
target_linked_signatures,
time_status
)
else
error ->
Logger.warning("Failed to update_linked_signature_time_status: #{inspect(error)}")
end
end
defp find_linked_signatures(
@@ -466,7 +438,7 @@ defmodule WandererApp.Map.Server.ConnectionsImpl do
%{custom_info: updated_custom_info}
end
SignaturesImpl.apply_update_signature(%{map_id: map_id}, sig, update_params)
SignaturesImpl.apply_update_signature(map_id, sig, update_params)
end)
Impl.broadcast!(map_id, :signatures_updated, solar_system_id)
@@ -525,7 +497,11 @@ defmodule WandererApp.Map.Server.ConnectionsImpl do
time_status =
if connection_type == @connection_type_wormhole do
@connection_time_status_eol_24
get_time_status(
old_location.solar_system_id,
location.solar_system_id,
ship_size_type
)
else
@connection_time_status_default
end
@@ -561,6 +537,12 @@ defmodule WandererApp.Map.Server.ConnectionsImpl do
Impl.broadcast!(map_id, :add_connection, connection)
Impl.broadcast!(map_id, :maybe_link_signature, %{
character_id: character_id,
solar_system_source: old_location.solar_system_id,
solar_system_target: location.solar_system_id
})
# ADDITIVE: Also broadcast to external event system (webhooks/WebSocket)
WandererApp.ExternalEvents.broadcast(map_id, :connection_added, %{
connection_id: connection.id,
@@ -572,19 +554,12 @@ defmodule WandererApp.Map.Server.ConnectionsImpl do
time_status: connection.time_status
})
{:ok, _} =
WandererApp.User.ActivityTracker.track_map_event(:map_connection_added, %{
character_id: character_id,
user_id: character.user_id,
map_id: map_id,
solar_system_source_id: old_location.solar_system_id,
solar_system_target_id: location.solar_system_id
})
Impl.broadcast!(map_id, :maybe_link_signature, %{
WandererApp.User.ActivityTracker.track_map_event(:map_connection_added, %{
character_id: character_id,
solar_system_source: old_location.solar_system_id,
solar_system_target: location.solar_system_id
user_id: character.user_id,
map_id: map_id,
solar_system_source_id: old_location.solar_system_id,
solar_system_target_id: location.solar_system_id
})
:ok
@@ -681,12 +656,17 @@ defmodule WandererApp.Map.Server.ConnectionsImpl do
)
)
def is_connection_valid(:all, _from_solar_system_id, _to_solar_system_id), do: true
def is_connection_valid(_scope, from_solar_system_id, to_solar_system_id)
when is_nil(from_solar_system_id) or is_nil(to_solar_system_id),
do: false
def is_connection_valid(:all, from_solar_system_id, to_solar_system_id),
do: from_solar_system_id != to_solar_system_id
def is_connection_valid(:none, _from_solar_system_id, _to_solar_system_id), do: false
def is_connection_valid(scope, from_solar_system_id, to_solar_system_id)
when not is_nil(from_solar_system_id) and not is_nil(to_solar_system_id) do
when from_solar_system_id != to_solar_system_id do
with {:ok, known_jumps} <- find_solar_system_jump(from_solar_system_id, to_solar_system_id),
{:ok, from_system_static_info} <- get_system_static_info(from_solar_system_id),
{:ok, to_system_static_info} <- get_system_static_info(to_solar_system_id) do
@@ -775,7 +755,7 @@ defmodule WandererApp.Map.Server.ConnectionsImpl do
defp maybe_remove_connection(_map_id, _location, _old_location), do: :ok
defp update_connection(
%{map_id: map_id} = state,
map_id,
update_method,
attributes,
%{
@@ -820,12 +800,12 @@ defmodule WandererApp.Map.Server.ConnectionsImpl do
custom_info: updated_connection.custom_info
})
state
:ok
else
{:error, error} ->
Logger.error("Failed to update connection: #{inspect(error, pretty: true)}")
state
:ok
end
end
@@ -865,6 +845,41 @@ defmodule WandererApp.Map.Server.ConnectionsImpl do
defp get_ship_size_type(_source_solar_system_id, _target_solar_system_id, _connection_type),
do: @large_ship_size
defp get_time_status(
_source_solar_system_id,
_target_solar_system_id,
@frigate_ship_size
),
do: @connection_time_status_eol_4_5
defp get_time_status(
source_solar_system_id,
target_solar_system_id,
_ship_size_type
) do
# Check if either system is C1 before creating the connection
{:ok, source_system_info} = get_system_static_info(source_solar_system_id)
{:ok, target_system_info} = get_system_static_info(target_solar_system_id)
cond do
# C1/2/3/4 systems always get eol_16
source_system_info.system_class in [@c1, @c2, @c3, @c4] or
target_system_info.system_class in [@c1, @c2, @c3, @c4] ->
@connection_time_status_eol_16
# C5/6 systems always get eol_24
source_system_info.system_class in [@c5, @c6] or
target_system_info.system_class in [@c5, @c6] ->
@connection_time_status_eol_24
true ->
@connection_time_status_default
end
end
defp get_time_status(_source_solar_system_id, _target_solar_system_id, _ship_size_type),
do: @connection_time_status_default
defp get_new_time_status(_start_time, @connection_time_status_default),
do: @connection_time_status_eol_24
@@ -903,4 +918,5 @@ defmodule WandererApp.Map.Server.ConnectionsImpl do
defp get_time_status_minutes(@connection_time_status_eol_16), do: @connection_eol_16_minutes
defp get_time_status_minutes(@connection_time_status_eol_24), do: @connection_eol_24_minutes
defp get_time_status_minutes(@connection_time_status_eol_48), do: @connection_eol_48_minutes
defp get_time_status_minutes(_), do: @connection_eol_24_minutes
end

View File

@@ -24,12 +24,9 @@ defmodule WandererApp.Map.Server.Impl do
map_opts: []
]
@systems_cleanup_timeout :timer.minutes(30)
@characters_cleanup_timeout :timer.minutes(5)
@connections_cleanup_timeout :timer.minutes(1)
@pubsub_client Application.compile_env(:wanderer_app, :pubsub_client)
@backup_state_timeout :timer.minutes(1)
@ddrt Application.compile_env(:wanderer_app, :ddrt)
@update_presence_timeout :timer.seconds(5)
@update_characters_timeout :timer.seconds(1)
@update_tracked_characters_timeout :timer.minutes(1)
@@ -37,51 +34,104 @@ defmodule WandererApp.Map.Server.Impl do
def new(), do: __struct__()
def new(args), do: __struct__(args)
def init(args) do
map_id = args[:map_id]
Logger.info("Starting map server for #{map_id}")
def do_init_state(opts) do
map_id = opts[:map_id]
ErrorTracker.set_context(%{map_id: map_id})
WandererApp.Cache.insert("map_#{map_id}:started", false)
initial_state =
%{
map_id: map_id,
rtree_name: "rtree_#{map_id}"
}
|> new()
%{
map_id: map_id,
rtree_name: Module.concat([map_id, DDRT.DynamicRtree])
}
|> new()
end
# Parallelize database queries for faster initialization
start_time = System.monotonic_time(:millisecond)
def load_state(%__MODULE__{map_id: map_id} = state) do
with {:ok, map} <-
WandererApp.MapRepo.get(map_id, [
:owner,
:characters,
acls: [
:owner_id,
members: [:role, :eve_character_id, :eve_corporation_id, :eve_alliance_id]
]
]),
{:ok, systems} <- WandererApp.MapSystemRepo.get_visible_by_map(map_id),
{:ok, connections} <- WandererApp.MapConnectionRepo.get_by_map(map_id),
{:ok, subscription_settings} <-
WandererApp.Map.SubscriptionManager.get_active_map_subscription(map_id) do
state
tasks = [
Task.async(fn ->
{:map,
WandererApp.MapRepo.get(map_id, [
:owner,
:characters,
acls: [
:owner_id,
members: [:role, :eve_character_id, :eve_corporation_id, :eve_alliance_id]
]
])}
end),
Task.async(fn ->
{:systems, WandererApp.MapSystemRepo.get_visible_by_map(map_id)}
end),
Task.async(fn ->
{:connections, WandererApp.MapConnectionRepo.get_by_map(map_id)}
end),
Task.async(fn ->
{:subscription, WandererApp.Map.SubscriptionManager.get_active_map_subscription(map_id)}
end)
]
results = Task.await_many(tasks, :timer.seconds(15))
duration = System.monotonic_time(:millisecond) - start_time
# Emit telemetry for slow initializations
if duration > 5_000 do
Logger.warning("[Map Server] Slow map state initialization: #{map_id} took #{duration}ms")
:telemetry.execute(
[:wanderer_app, :map, :slow_init],
%{duration_ms: duration},
%{map_id: map_id}
)
end
# Extract results
map_result =
Enum.find_value(results, fn
{:map, result} -> result
_ -> nil
end)
systems_result =
Enum.find_value(results, fn
{:systems, result} -> result
_ -> nil
end)
connections_result =
Enum.find_value(results, fn
{:connections, result} -> result
_ -> nil
end)
subscription_result =
Enum.find_value(results, fn
{:subscription, result} -> result
_ -> nil
end)
# Process results
with {:ok, map} <- map_result,
{:ok, systems} <- systems_result,
{:ok, connections} <- connections_result,
{:ok, subscription_settings} <- subscription_result do
initial_state
|> init_map(
map,
subscription_settings,
systems,
connections
)
|> SystemsImpl.init_map_systems(systems)
|> init_map_cache()
else
error ->
Logger.error("Failed to load map state: #{inspect(error, pretty: true)}")
state
initial_state
end
end
def start_map(%__MODULE__{map: map, map_id: map_id} = state) do
def start_map(%__MODULE__{map: map, map_id: map_id} = _state) do
WandererApp.Cache.insert("map_#{map_id}:started", false)
# Check if map was loaded successfully
case map do
nil ->
@@ -95,297 +145,113 @@ defmodule WandererApp.Map.Server.Impl do
"maps:#{map_id}"
)
Process.send_after(self(), :update_characters, @update_characters_timeout)
Process.send_after(self(), {:update_characters, map_id}, @update_characters_timeout)
Process.send_after(
self(),
:update_tracked_characters,
{:update_tracked_characters, map_id},
@update_tracked_characters_timeout
)
Process.send_after(self(), :update_presence, @update_presence_timeout)
Process.send_after(self(), :cleanup_connections, @connections_cleanup_timeout)
Process.send_after(self(), :cleanup_systems, 10_000)
Process.send_after(self(), :cleanup_characters, @characters_cleanup_timeout)
Process.send_after(self(), :backup_state, @backup_state_timeout)
Process.send_after(self(), {:update_presence, map_id}, @update_presence_timeout)
WandererApp.Cache.insert("map_#{map_id}:started", true)
# Initialize zkb cache structure to prevent timing issues
cache_key = "map:#{map_id}:zkb:detailed_kills"
WandererApp.Cache.insert(cache_key, %{}, ttl: :timer.hours(24))
WandererApp.Cache.insert("map:#{map_id}:zkb:detailed_kills", %{}, ttl: :timer.hours(24))
broadcast!(map_id, :map_server_started)
@pubsub_client.broadcast!(WandererApp.PubSub, "maps", :map_server_started)
:telemetry.execute([:wanderer_app, :map, :started], %{count: 1})
state
else
error ->
Logger.error("Failed to start map: #{inspect(error, pretty: true)}")
state
end
end
end
def stop_map(%{map_id: map_id} = state) do
def stop_map(map_id) do
Logger.debug(fn -> "Stopping map server for #{map_id}" end)
@pubsub_client.unsubscribe(
WandererApp.PubSub,
"maps:#{map_id}"
)
WandererApp.Cache.delete("map_#{map_id}:started")
WandererApp.Cache.delete("map_characters-#{map_id}")
WandererApp.Map.CacheRTree.clear_tree("rtree_#{map_id}")
WandererApp.Map.delete_map_state(map_id)
WandererApp.Cache.insert_or_update(
"started_maps",
[],
fn started_maps ->
started_maps
|> Enum.reject(fn started_map_id -> started_map_id == map_id end)
end
)
:telemetry.execute([:wanderer_app, :map, :stopped], %{count: 1})
state
|> maybe_stop_rtree()
end
def get_map(%{map: map} = _state), do: {:ok, map}
defdelegate add_character(state, character, track_character), to: CharactersImpl
def remove_character(%{map_id: map_id} = state, character_id) do
CharactersImpl.remove_character(map_id, character_id)
state
end
def untrack_characters(%{map_id: map_id} = state, characters_ids) do
CharactersImpl.untrack_characters(map_id, characters_ids)
state
end
defdelegate add_system(state, system_info, user_id, character_id), to: SystemsImpl
defdelegate paste_systems(state, systems, user_id, character_id), to: SystemsImpl
defdelegate add_system_comment(state, comment_info, user_id, character_id), to: SystemsImpl
defdelegate remove_system_comment(state, comment_id, user_id, character_id), to: SystemsImpl
defdelegate cleanup_systems(map_id), to: SystemsImpl
defdelegate cleanup_connections(map_id), to: ConnectionsImpl
defdelegate cleanup_characters(map_id), to: CharactersImpl
defdelegate untrack_characters(map_id, characters_ids), to: CharactersImpl
defdelegate add_system(map_id, system_info, user_id, character_id, opts \\ []), to: SystemsImpl
defdelegate paste_connections(map_id, connections, user_id, character_id), to: ConnectionsImpl
defdelegate paste_systems(map_id, systems, user_id, character_id, opts), to: SystemsImpl
defdelegate add_system_comment(map_id, comment_info, user_id, character_id), to: SystemsImpl
defdelegate remove_system_comment(map_id, comment_id, user_id, character_id), to: SystemsImpl
defdelegate delete_systems(
state,
map_id,
removed_ids,
user_id,
character_id
),
to: SystemsImpl
defdelegate update_system_name(state, update), to: SystemsImpl
defdelegate update_system_name(map_id, update), to: SystemsImpl
defdelegate update_system_description(map_id, update), to: SystemsImpl
defdelegate update_system_status(map_id, update), to: SystemsImpl
defdelegate update_system_tag(map_id, update), to: SystemsImpl
defdelegate update_system_temporary_name(map_id, update), to: SystemsImpl
defdelegate update_system_locked(map_id, update), to: SystemsImpl
defdelegate update_system_labels(map_id, update), to: SystemsImpl
defdelegate update_system_linked_sig_eve_id(map_id, update), to: SystemsImpl
defdelegate update_system_position(map_id, update), to: SystemsImpl
defdelegate add_hub(map_id, hub_info), to: SystemsImpl
defdelegate remove_hub(map_id, hub_info), to: SystemsImpl
defdelegate add_ping(map_id, ping_info), to: PingsImpl
defdelegate cancel_ping(map_id, ping_info), to: PingsImpl
defdelegate add_connection(map_id, connection_info), to: ConnectionsImpl
defdelegate delete_connection(map_id, connection_info), to: ConnectionsImpl
defdelegate get_connection_info(map_id, connection_info), to: ConnectionsImpl
defdelegate update_connection_time_status(map_id, connection_update), to: ConnectionsImpl
defdelegate update_connection_type(map_id, connection_update), to: ConnectionsImpl
defdelegate update_connection_mass_status(map_id, connection_update), to: ConnectionsImpl
defdelegate update_connection_ship_size_type(map_id, connection_update), to: ConnectionsImpl
defdelegate update_connection_locked(map_id, connection_update), to: ConnectionsImpl
defdelegate update_connection_custom_info(map_id, connection_update), to: ConnectionsImpl
defdelegate update_signatures(map_id, signatures_update), to: SignaturesImpl
defdelegate update_system_description(state, update), to: SystemsImpl
defdelegate update_system_status(state, update), to: SystemsImpl
defdelegate update_system_tag(state, update), to: SystemsImpl
defdelegate update_system_temporary_name(state, update), to: SystemsImpl
defdelegate update_system_locked(state, update), to: SystemsImpl
defdelegate update_system_labels(state, update), to: SystemsImpl
defdelegate update_system_linked_sig_eve_id(state, update), to: SystemsImpl
defdelegate update_system_position(state, update), to: SystemsImpl
defdelegate add_hub(state, hub_info), to: SystemsImpl
defdelegate remove_hub(state, hub_info), to: SystemsImpl
defdelegate add_ping(state, ping_info), to: PingsImpl
defdelegate cancel_ping(state, ping_info), to: PingsImpl
defdelegate add_connection(state, connection_info), to: ConnectionsImpl
defdelegate delete_connection(state, connection_info), to: ConnectionsImpl
defdelegate get_connection_info(state, connection_info), to: ConnectionsImpl
defdelegate paste_connections(state, connections, user_id, character_id), to: ConnectionsImpl
defdelegate update_connection_time_status(state, connection_update), to: ConnectionsImpl
defdelegate update_connection_type(state, connection_update), to: ConnectionsImpl
defdelegate update_connection_mass_status(state, connection_update), to: ConnectionsImpl
defdelegate update_connection_ship_size_type(state, connection_update), to: ConnectionsImpl
defdelegate update_connection_locked(state, connection_update), to: ConnectionsImpl
defdelegate update_connection_custom_info(state, signatures_update), to: ConnectionsImpl
defdelegate update_signatures(state, signatures_update), to: SignaturesImpl
def import_settings(%{map_id: map_id} = state, settings, user_id) do
def import_settings(map_id, settings, user_id) do
WandererApp.Cache.put(
"map_#{map_id}:importing",
true
)
state =
state
|> maybe_import_systems(settings, user_id, nil)
|> maybe_import_connections(settings, user_id)
|> maybe_import_hubs(settings, user_id)
maybe_import_systems(map_id, settings, user_id, nil)
maybe_import_connections(map_id, settings, user_id)
maybe_import_hubs(map_id, settings, user_id)
WandererApp.Cache.take("map_#{map_id}:importing")
state
end
def update_subscription_settings(%{map: map} = state, subscription_settings),
do: %{
state
| map: map |> WandererApp.Map.update_subscription_settings!(subscription_settings)
}
def handle_event(:update_characters, state) do
Process.send_after(self(), :update_characters, @update_characters_timeout)
CharactersImpl.update_characters(state)
state
end
def handle_event(:update_tracked_characters, %{map_id: map_id} = state) do
Process.send_after(self(), :update_tracked_characters, @update_tracked_characters_timeout)
CharactersImpl.update_tracked_characters(map_id)
state
end
def handle_event(:update_presence, %{map_id: map_id} = state) do
Process.send_after(self(), :update_presence, @update_presence_timeout)
update_presence(map_id)
state
end
def handle_event(:backup_state, state) do
Process.send_after(self(), :backup_state, @backup_state_timeout)
{:ok, _map_state} = state |> save_map_state()
state
end
def handle_event(
{:map_acl_updated, added_acls, removed_acls},
state
) do
state |> AclsImpl.handle_map_acl_updated(added_acls, removed_acls)
end
def handle_event({:acl_updated, %{acl_id: acl_id}}, %{map_id: map_id} = state) do
AclsImpl.handle_acl_updated(map_id, acl_id)
state
end
def handle_event({:acl_deleted, %{acl_id: acl_id}}, %{map_id: map_id} = state) do
AclsImpl.handle_acl_deleted(map_id, acl_id)
state
end
def handle_event(:cleanup_connections, state) do
Process.send_after(self(), :cleanup_connections, @connections_cleanup_timeout)
state |> ConnectionsImpl.cleanup_connections()
end
def handle_event(:cleanup_characters, %{map_id: map_id, map: %{owner_id: owner_id}} = state) do
Process.send_after(self(), :cleanup_characters, @characters_cleanup_timeout)
CharactersImpl.cleanup_characters(map_id, owner_id)
state
end
def handle_event(:cleanup_systems, state) do
Process.send_after(self(), :cleanup_systems, @systems_cleanup_timeout)
state |> SystemsImpl.cleanup_systems()
end
def handle_event(:subscription_settings_updated, %{map: map, map_id: map_id} = state) do
{:ok, subscription_settings} =
WandererApp.Map.SubscriptionManager.get_active_map_subscription(map_id)
%{
state
| map:
map
|> WandererApp.Map.update_subscription_settings!(subscription_settings)
}
end
def handle_event({:options_updated, options}, %{map: map} = state) do
map |> WandererApp.Map.update_options!(options)
%{state | map_opts: map_options(options)}
end
def handle_event({ref, _result}, %{map_id: _map_id} = state) when is_reference(ref) do
Process.demonitor(ref, [:flush])
state
end
def handle_event(msg, state) do
Logger.warning("Unhandled event: #{inspect(msg)}")
state
end
def broadcast!(map_id, event, payload \\ nil) do
if can_broadcast?(map_id) do
@pubsub_client.broadcast!(WandererApp.PubSub, map_id, %{
event: event,
payload: payload
})
end
:ok
end
defp can_broadcast?(map_id),
do:
not WandererApp.Cache.lookup!("map_#{map_id}:importing", false) and
WandererApp.Cache.lookup!("map_#{map_id}:started", false)
def get_update_map(update, attributes),
do:
{:ok,
Enum.reduce(attributes, Map.new(), fn attribute, map ->
map |> Map.put_new(attribute, get_in(update, [Access.key(attribute)]))
end)}
defp map_options(options) do
[
layout: options |> Map.get("layout", "left_to_right"),
store_custom_labels:
options |> Map.get("store_custom_labels", "false") |> String.to_existing_atom(),
show_linked_signature_id:
options |> Map.get("show_linked_signature_id", "false") |> String.to_existing_atom(),
show_linked_signature_id_temp_name:
options
|> Map.get("show_linked_signature_id_temp_name", "false")
|> String.to_existing_atom(),
show_temp_system_name:
options |> Map.get("show_temp_system_name", "false") |> String.to_existing_atom(),
restrict_offline_showing:
options |> Map.get("restrict_offline_showing", "false") |> String.to_existing_atom()
]
end
defp save_map_state(%{map_id: map_id} = _state) do
def save_map_state(map_id) do
systems_last_activity =
map_id
|> WandererApp.Map.list_systems!()
@@ -430,19 +296,172 @@ defmodule WandererApp.Map.Server.Impl do
})
end
defp maybe_stop_rtree(%{rtree_name: rtree_name} = state) do
case Process.whereis(rtree_name) do
nil ->
:ok
def handle_event({:update_characters, map_id} = event) do
Process.send_after(self(), event, @update_characters_timeout)
pid when is_pid(pid) ->
GenServer.stop(pid, :normal)
end
state
CharactersImpl.update_characters(map_id)
end
defp init_map_cache(%__MODULE__{map_id: map_id} = state) do
def handle_event({:update_tracked_characters, map_id} = event) do
Process.send_after(
self(),
event,
@update_tracked_characters_timeout
)
CharactersImpl.update_tracked_characters(map_id)
end
def handle_event({:update_presence, map_id} = event) do
Process.send_after(self(), event, @update_presence_timeout)
update_presence(map_id)
end
def handle_event({:map_acl_updated, map_id, added_acls, removed_acls}) do
AclsImpl.handle_map_acl_updated(map_id, added_acls, removed_acls)
end
def handle_event({:acl_updated, %{acl_id: acl_id}}) do
# Find all maps that use this ACL
case Ash.read(
WandererApp.Api.MapAccessList
|> Ash.Query.for_read(:read_by_acl, %{acl_id: acl_id})
) do
{:ok, map_acls} ->
Logger.debug(fn ->
"Found #{length(map_acls)} maps using ACL #{acl_id}: #{inspect(Enum.map(map_acls, & &1.map_id))}"
end)
# Broadcast to each map
Enum.each(map_acls, fn %{map_id: map_id} ->
Logger.debug(fn -> "Broadcasting acl_updated to map #{map_id}" end)
AclsImpl.handle_acl_updated(map_id, acl_id)
end)
Logger.debug(fn ->
"Successfully broadcast acl_updated event to #{length(map_acls)} maps"
end)
{:error, error} ->
Logger.error("Failed to find maps for ACL #{acl_id}: #{inspect(error)}")
:ok
end
end
def handle_event({:acl_deleted, %{acl_id: acl_id}}) do
case Ash.read(
WandererApp.Api.MapAccessList
|> Ash.Query.for_read(:read_by_acl, %{acl_id: acl_id})
) do
{:ok, map_acls} ->
Logger.debug(fn ->
"Found #{length(map_acls)} maps using ACL #{acl_id}: #{inspect(Enum.map(map_acls, & &1.map_id))}"
end)
# Broadcast to each map
Enum.each(map_acls, fn %{map_id: map_id} ->
Logger.debug(fn -> "Broadcasting acl_deleted to map #{map_id}" end)
AclsImpl.handle_acl_deleted(map_id, acl_id)
end)
Logger.debug(fn ->
"Successfully broadcast acl_deleted event to #{length(map_acls)} maps"
end)
{:error, error} ->
Logger.error("Failed to find maps for ACL #{acl_id}: #{inspect(error)}")
:ok
end
end
def handle_event({:subscription_settings_updated, map_id}) do
{:ok, subscription_settings} =
WandererApp.Map.SubscriptionManager.get_active_map_subscription(map_id)
update_subscription_settings(map_id, subscription_settings)
end
def handle_event({:options_updated, map_id, options}) do
update_options(map_id, options)
end
def handle_event(:map_deleted) do
# Map has been deleted - this event is handled by MapPool to stop the server
# and by MapLive to redirect users. Nothing to do here.
Logger.debug("Map deletion event received, will be handled by MapPool")
:ok
end
def handle_event({ref, _result}) when is_reference(ref) do
Process.demonitor(ref, [:flush])
end
def handle_event(msg) do
Logger.warning("Unhandled event: #{inspect(msg)}")
end
def update_subscription_settings(map_id, subscription_settings) do
{:ok, %{map: map}} = WandererApp.Map.get_map_state(map_id)
WandererApp.Map.update_map_state(map_id, %{
map: map |> WandererApp.Map.update_subscription_settings!(subscription_settings)
})
end
def update_options(map_id, options) do
{:ok, %{map: map}} = WandererApp.Map.get_map_state(map_id)
WandererApp.Map.update_map_state(map_id, %{
map: map |> WandererApp.Map.update_options!(options),
map_opts: map_options(options)
})
end
def broadcast!(map_id, event, payload \\ nil) do
if can_broadcast?(map_id) do
@pubsub_client.broadcast!(WandererApp.PubSub, map_id, %{
event: event,
payload: payload
})
end
:ok
end
defp can_broadcast?(map_id),
do:
not WandererApp.Cache.lookup!("map_#{map_id}:importing", false) and
WandererApp.Cache.lookup!("map_#{map_id}:started", false)
def get_update_map(update, attributes),
do:
{:ok,
Enum.reduce(attributes, Map.new(), fn attribute, map ->
map |> Map.put_new(attribute, get_in(update, [Access.key(attribute)]))
end)}
defp map_options(options) do
[
layout: options |> Map.get("layout", "left_to_right"),
store_custom_labels:
options |> Map.get("store_custom_labels", "false") |> String.to_existing_atom(),
show_linked_signature_id:
options |> Map.get("show_linked_signature_id", "false") |> String.to_existing_atom(),
show_linked_signature_id_temp_name:
options
|> Map.get("show_linked_signature_id_temp_name", "false")
|> String.to_existing_atom(),
show_temp_system_name:
options |> Map.get("show_temp_system_name", "false") |> String.to_existing_atom(),
restrict_offline_showing:
options |> Map.get("restrict_offline_showing", "false") |> String.to_existing_atom(),
allowed_copy_for: options |> Map.get("allowed_copy_for", "admin"),
allowed_paste_for: options |> Map.get("allowed_paste_for", "member")
]
end
defp init_map_cache(map_id) do
case WandererApp.Api.MapState.by_map_id(map_id) do
{:ok,
%{
@@ -454,10 +473,8 @@ defmodule WandererApp.Map.Server.Impl do
ConnectionsImpl.init_eol_cache(map_id, connections_eol_time)
ConnectionsImpl.init_start_cache(map_id, connections_start_time)
state
_ ->
state
:ok
end
end
@@ -470,6 +487,8 @@ defmodule WandererApp.Map.Server.Impl do
) do
{:ok, options} = WandererApp.MapRepo.options_to_form_data(initial_map)
@ddrt.init_tree("rtree_#{map_id}", %{width: 150, verbose: false})
map =
initial_map
|> WandererApp.Map.new()
@@ -479,54 +498,70 @@ defmodule WandererApp.Map.Server.Impl do
|> WandererApp.Map.add_connections!(connections)
|> WandererApp.Map.add_characters!(characters)
SystemsImpl.init_map_systems(map_id, systems)
character_ids =
map_id
|> WandererApp.Map.get_map!()
|> Map.get(:characters, [])
init_map_cache(map_id)
WandererApp.Cache.insert("map_#{map_id}:invalidate_character_ids", character_ids)
%{state | map: map, map_opts: map_options(options)}
end
def maybe_import_systems(state, %{"systems" => systems} = _settings, user_id, character_id) do
state =
systems
|> Enum.reduce(state, fn %{
"description" => description,
"id" => id,
"labels" => labels,
"locked" => locked,
"name" => name,
"position" => %{"x" => x, "y" => y},
"status" => status,
"tag" => tag,
"temporary_name" => temporary_name
} = _system,
acc ->
acc
|> add_system(
%{
solar_system_id: id |> String.to_integer(),
coordinates: %{"x" => round(x), "y" => round(y)}
},
user_id,
character_id
)
|> update_system_name(%{solar_system_id: id |> String.to_integer(), name: name})
|> update_system_description(%{
solar_system_id: id |> String.to_integer(),
description: description
})
|> update_system_status(%{solar_system_id: id |> String.to_integer(), status: status})
|> update_system_tag(%{solar_system_id: id |> String.to_integer(), tag: tag})
|> update_system_temporary_name(%{
solar_system_id: id |> String.to_integer(),
temporary_name: temporary_name
})
|> update_system_locked(%{solar_system_id: id |> String.to_integer(), locked: locked})
|> update_system_labels(%{solar_system_id: id |> String.to_integer(), labels: labels})
end)
def maybe_import_systems(
map_id,
%{"systems" => systems} = _settings,
user_id,
character_id
) do
systems
|> Enum.each(fn %{
"description" => description,
"id" => id,
"labels" => labels,
"locked" => locked,
"name" => name,
"position" => %{"x" => x, "y" => y},
"status" => status,
"tag" => tag,
"temporary_name" => temporary_name
} ->
solar_system_id = id |> String.to_integer()
add_system(
map_id,
%{
solar_system_id: solar_system_id,
coordinates: %{"x" => round(x), "y" => round(y)}
},
user_id,
character_id
)
update_system_name(map_id, %{solar_system_id: solar_system_id, name: name})
update_system_description(map_id, %{
solar_system_id: solar_system_id,
description: description
})
update_system_status(map_id, %{solar_system_id: solar_system_id, status: status})
update_system_tag(map_id, %{solar_system_id: solar_system_id, tag: tag})
update_system_temporary_name(map_id, %{
solar_system_id: solar_system_id,
temporary_name: temporary_name
})
update_system_locked(map_id, %{solar_system_id: solar_system_id, locked: locked})
update_system_labels(map_id, %{solar_system_id: solar_system_id, labels: labels})
end)
removed_system_ids =
systems
@@ -534,39 +569,39 @@ defmodule WandererApp.Map.Server.Impl do
|> Enum.map(fn system -> system["id"] end)
|> Enum.map(&String.to_integer/1)
state
|> delete_systems(removed_system_ids, user_id, character_id)
delete_systems(map_id, removed_system_ids, user_id, character_id)
end
def maybe_import_connections(state, %{"connections" => connections} = _settings, _user_id) do
def maybe_import_connections(map_id, %{"connections" => connections} = _settings, _user_id) do
connections
|> Enum.reduce(state, fn %{
"source" => source,
"target" => target,
"mass_status" => mass_status,
"time_status" => time_status,
"ship_size_type" => ship_size_type
} = _system,
acc ->
|> Enum.each(fn %{
"source" => source,
"target" => target,
"mass_status" => mass_status,
"time_status" => time_status,
"ship_size_type" => ship_size_type
} ->
source_id = source |> String.to_integer()
target_id = target |> String.to_integer()
acc
|> add_connection(%{
add_connection(map_id, %{
solar_system_source_id: source_id,
solar_system_target_id: target_id
})
|> update_connection_time_status(%{
update_connection_time_status(map_id, %{
solar_system_source_id: source_id,
solar_system_target_id: target_id,
time_status: time_status
})
|> update_connection_mass_status(%{
update_connection_mass_status(map_id, %{
solar_system_source_id: source_id,
solar_system_target_id: target_id,
mass_status: mass_status
})
|> update_connection_ship_size_type(%{
update_connection_ship_size_type(map_id, %{
solar_system_source_id: source_id,
solar_system_target_id: target_id,
ship_size_type: ship_size_type
@@ -574,13 +609,12 @@ defmodule WandererApp.Map.Server.Impl do
end)
end
def maybe_import_hubs(state, %{"hubs" => hubs} = _settings, _user_id) do
def maybe_import_hubs(map_id, %{"hubs" => hubs} = _settings, _user_id) do
hubs
|> Enum.reduce(state, fn hub, acc ->
|> Enum.each(fn hub ->
solar_system_id = hub |> String.to_integer()
acc
|> add_hub(%{solar_system_id: solar_system_id})
add_hub(map_id, %{solar_system_id: solar_system_id})
end)
end

View File

@@ -8,14 +8,14 @@ defmodule WandererApp.Map.Server.PingsImpl do
@ping_auto_expire_timeout :timer.minutes(15)
def add_ping(
%{map_id: map_id} = state,
map_id,
%{
solar_system_id: solar_system_id,
type: type,
message: message,
character_id: character_id,
user_id: user_id
} = ping_info
} = _ping_info
) do
with {:ok, character} <- WandererApp.Character.get_character(character_id),
system <-
@@ -57,23 +57,20 @@ defmodule WandererApp.Map.Server.PingsImpl do
map_id: map_id,
solar_system_id: "#{solar_system_id}"
})
state
else
error ->
Logger.error("Failed to add_ping: #{inspect(error, pretty: true)}")
state
end
end
def cancel_ping(
%{map_id: map_id} = state,
map_id,
%{
id: ping_id,
character_id: character_id,
user_id: user_id,
type: type
} = ping_info
} = _ping_info
) do
with {:ok, character} <- WandererApp.Character.get_character(character_id),
{:ok,
@@ -105,12 +102,9 @@ defmodule WandererApp.Map.Server.PingsImpl do
map_id: map_id,
solar_system_id: solar_system_id
})
state
else
error ->
Logger.error("Failed to cancel_ping: #{inspect(error, pretty: true)}")
state
end
end
end

View File

@@ -13,7 +13,7 @@ defmodule WandererApp.Map.Server.SignaturesImpl do
Public entrypoint for updating signatures on a map system.
"""
def update_signatures(
%{map_id: map_id} = state,
map_id,
%{
solar_system_id: system_solar_id,
character_id: char_id,
@@ -31,7 +31,7 @@ defmodule WandererApp.Map.Server.SignaturesImpl do
solar_system_id: system_solar_id
}) do
do_update_signatures(
state,
map_id,
system,
char_id,
user_id,
@@ -43,14 +43,13 @@ defmodule WandererApp.Map.Server.SignaturesImpl do
else
error ->
Logger.warning("Skipping signature update: #{inspect(error)}")
state
end
end
def update_signatures(state, _), do: state
def update_signatures(_map_id, _), do: :ok
defp do_update_signatures(
state,
map_id,
system,
character_id,
user_id,
@@ -86,14 +85,14 @@ defmodule WandererApp.Map.Server.SignaturesImpl do
# 1. Removals
existing_current
|> Enum.filter(&(&1.eve_id in removed_ids))
|> Enum.each(&remove_signature(&1, state, system, delete_conn?))
|> Enum.each(&remove_signature(map_id, &1, system, delete_conn?))
# 2. Updates
existing_current
|> Enum.filter(&(&1.eve_id in updated_ids))
|> Enum.each(fn existing ->
update = Enum.find(updated_sigs, &(&1.eve_id == existing.eve_id))
apply_update_signature(state, existing, update)
apply_update_signature(map_id, existing, update)
end)
# 3. Additions & restorations
@@ -119,7 +118,7 @@ defmodule WandererApp.Map.Server.SignaturesImpl do
if added_ids != [] do
track_activity(
:signatures_added,
state.map_id,
map_id,
system.solar_system_id,
user_id,
character_id,
@@ -130,7 +129,7 @@ defmodule WandererApp.Map.Server.SignaturesImpl do
if removed_ids != [] do
track_activity(
:signatures_removed,
state.map_id,
map_id,
system.solar_system_id,
user_id,
character_id,
@@ -139,12 +138,12 @@ defmodule WandererApp.Map.Server.SignaturesImpl do
end
# 5. Broadcast to any live subscribers
Impl.broadcast!(state.map_id, :signatures_updated, system.solar_system_id)
Impl.broadcast!(map_id, :signatures_updated, system.solar_system_id)
# ADDITIVE: Also broadcast to external event system (webhooks/WebSocket)
# Send individual signature events
Enum.each(added_sigs, fn sig ->
WandererApp.ExternalEvents.broadcast(state.map_id, :signature_added, %{
WandererApp.ExternalEvents.broadcast(map_id, :signature_added, %{
solar_system_id: system.solar_system_id,
signature_id: sig.eve_id,
name: sig.name,
@@ -155,27 +154,25 @@ defmodule WandererApp.Map.Server.SignaturesImpl do
end)
Enum.each(removed_ids, fn sig_eve_id ->
WandererApp.ExternalEvents.broadcast(state.map_id, :signature_removed, %{
WandererApp.ExternalEvents.broadcast(map_id, :signature_removed, %{
solar_system_id: system.solar_system_id,
signature_id: sig_eve_id
})
end)
# Also send the summary event for backwards compatibility
WandererApp.ExternalEvents.broadcast(state.map_id, :signatures_updated, %{
WandererApp.ExternalEvents.broadcast(map_id, :signatures_updated, %{
solar_system_id: system.solar_system_id,
added_count: length(added_ids),
updated_count: length(updated_ids),
removed_count: length(removed_ids)
})
state
end
defp remove_signature(sig, state, system, delete_conn?) do
defp remove_signature(map_id, sig, system, delete_conn?) do
# optionally remove the linked connection
if delete_conn? && sig.linked_system_id do
ConnectionsImpl.delete_connection(state, %{
ConnectionsImpl.delete_connection(map_id, %{
solar_system_source_id: system.solar_system_id,
solar_system_target_id: sig.linked_system_id
})
@@ -183,7 +180,7 @@ defmodule WandererApp.Map.Server.SignaturesImpl do
# clear any linked_sig_eve_id on the target system
if sig.linked_system_id do
SystemsImpl.update_system_linked_sig_eve_id(state, %{
SystemsImpl.update_system_linked_sig_eve_id(map_id, %{
solar_system_id: sig.linked_system_id,
linked_sig_eve_id: nil
})
@@ -194,7 +191,7 @@ defmodule WandererApp.Map.Server.SignaturesImpl do
end
def apply_update_signature(
state,
map_id,
%MapSystemSignature{} = existing,
update_params
)
@@ -204,8 +201,8 @@ defmodule WandererApp.Map.Server.SignaturesImpl do
update_params |> Map.put(:update_forced_at, DateTime.utc_now())
) do
{:ok, updated} ->
maybe_update_connection_time_status(state, existing, updated)
maybe_update_connection_mass_status(state, existing, updated)
maybe_update_connection_time_status(map_id, existing, updated)
maybe_update_connection_mass_status(map_id, existing, updated)
:ok
{:error, reason} ->
@@ -214,10 +211,10 @@ defmodule WandererApp.Map.Server.SignaturesImpl do
end
defp maybe_update_connection_time_status(
state,
%{custom_info: old_custom_info} = old_sig,
map_id,
%{custom_info: old_custom_info} = _old_sig,
%{custom_info: new_custom_info, system_id: system_id, linked_system_id: linked_system_id} =
updated_sig
_updated_sig
)
when not is_nil(linked_system_id) do
old_time_status = get_time_status(old_custom_info)
@@ -226,7 +223,7 @@ defmodule WandererApp.Map.Server.SignaturesImpl do
if old_time_status != new_time_status do
{:ok, source_system} = MapSystem.by_id(system_id)
ConnectionsImpl.update_connection_time_status(state, %{
ConnectionsImpl.update_connection_time_status(map_id, %{
solar_system_source_id: source_system.solar_system_id,
solar_system_target_id: linked_system_id,
time_status: new_time_status
@@ -234,13 +231,13 @@ defmodule WandererApp.Map.Server.SignaturesImpl do
end
end
defp maybe_update_connection_time_status(_state, _old_sig, _updated_sig), do: :ok
defp maybe_update_connection_time_status(_map_id, _old_sig, _updated_sig), do: :ok
defp maybe_update_connection_mass_status(
state,
%{type: old_type} = old_sig,
map_id,
%{type: old_type} = _old_sig,
%{type: new_type, system_id: system_id, linked_system_id: linked_system_id} =
updated_sig
_updated_sig
)
when not is_nil(linked_system_id) do
if old_type != new_type do
@@ -248,7 +245,7 @@ defmodule WandererApp.Map.Server.SignaturesImpl do
signature_ship_size_type = EVEUtil.get_wh_size(new_type)
if not is_nil(signature_ship_size_type) do
ConnectionsImpl.update_connection_ship_size_type(state, %{
ConnectionsImpl.update_connection_ship_size_type(map_id, %{
solar_system_source_id: source_system.solar_system_id,
solar_system_target_id: linked_system_id,
ship_size_type: signature_ship_size_type
@@ -257,7 +254,7 @@ defmodule WandererApp.Map.Server.SignaturesImpl do
end
end
defp maybe_update_connection_mass_status(_state, _old_sig, _updated_sig), do: :ok
defp maybe_update_connection_mass_status(_map_id, _old_sig, _updated_sig), do: :ok
defp track_activity(event, map_id, solar_system_id, user_id, character_id, signatures) do
ActivityTracker.track_map_event(event, %{
@@ -282,7 +279,8 @@ defmodule WandererApp.Map.Server.SignaturesImpl do
group: sig["group"],
type: Map.get(sig, "type"),
custom_info: Map.get(sig, "custom_info"),
character_eve_id: character_eve_id,
# Use character_eve_id from sig if provided, otherwise use the default
character_eve_id: Map.get(sig, "character_eve_id", character_eve_id),
deleted: false
}
end)

View File

@@ -1,22 +0,0 @@
defmodule WandererApp.Map.ServerSupervisor do
@moduledoc false
use Supervisor, restart: :transient
alias WandererApp.Map.Server
def start_link(args), do: Supervisor.start_link(__MODULE__, args)
@impl true
def init(args) do
children = [
{Server, args},
{DDRT.DynamicRtree,
[
conf: [name: "rtree_#{args[:map_id]}", width: 150, verbose: false, seed: 0],
name: Module.concat([args[:map_id], DDRT.DynamicRtree])
]}
]
Supervisor.init(children, strategy: :one_for_one, auto_shutdown: :any_significant)
end
end

View File

@@ -20,14 +20,14 @@ defmodule WandererApp.Map.Server.SystemsImpl do
end)
end
def init_map_systems(state, [] = _systems), do: state
def init_map_systems(_map_id, [] = _systems), do: :ok
def init_map_systems(%{map_id: map_id, rtree_name: rtree_name} = state, systems) do
def init_map_systems(map_id, systems) do
systems
|> Enum.each(fn %{id: system_id, solar_system_id: solar_system_id} = system ->
@ddrt.insert(
{solar_system_id, WandererApp.Map.PositionCalculator.get_system_bounding_rect(system)},
rtree_name
"rtree_#{map_id}"
)
WandererApp.Cache.put(
@@ -36,32 +36,34 @@ defmodule WandererApp.Map.Server.SystemsImpl do
ttl: @system_inactive_timeout
)
end)
state
end
def add_system(
%{map_id: map_id} = state,
map_id,
%{
solar_system_id: solar_system_id
} = system_info,
user_id,
character_id
character_id,
_opts
) do
case map_id |> WandererApp.Map.check_location(%{solar_system_id: solar_system_id}) do
map_id
|> WandererApp.Map.check_location(%{solar_system_id: solar_system_id})
|> case do
{:ok, _location} ->
state |> _add_system(system_info, user_id, character_id)
do_add_system(map_id, system_info, user_id, character_id)
{:error, :already_exists} ->
state
:ok
end
end
def paste_systems(
%{map_id: map_id} = state,
map_id,
systems,
user_id,
character_id
character_id,
opts
) do
systems
|> Enum.each(fn %{
@@ -72,28 +74,34 @@ defmodule WandererApp.Map.Server.SystemsImpl do
case map_id |> WandererApp.Map.check_location(%{solar_system_id: solar_system_id}) do
{:ok, _location} ->
state
|> _add_system(
%{solar_system_id: solar_system_id, coordinates: coordinates, extra_info: system},
user_id,
character_id
)
if opts |> Keyword.get(:add_not_existing, true) do
do_add_system(
map_id,
%{solar_system_id: solar_system_id, coordinates: coordinates, extra_info: system},
user_id,
character_id
)
else
:ok
end
{:error, :already_exists} ->
:ok
if opts |> Keyword.get(:update_existing, false) do
:ok
else
:ok
end
end
end)
state
end
def add_system_comment(
%{map_id: map_id} = state,
map_id,
%{
solar_system_id: solar_system_id,
text: text
} = comment_info,
user_id,
} = _comment_info,
_user_id,
character_id
) do
system =
@@ -116,12 +124,10 @@ defmodule WandererApp.Map.Server.SystemsImpl do
solar_system_id: solar_system_id,
comment: comment
})
state
end
def remove_system_comment(
%{map_id: map_id} = state,
map_id,
comment_id,
user_id,
character_id
@@ -135,11 +141,9 @@ defmodule WandererApp.Map.Server.SystemsImpl do
solar_system_id: system.solar_system_id,
comment_id: comment_id
})
state
end
def cleanup_systems(%{map_id: map_id} = state) do
def cleanup_systems(map_id) do
expired_systems =
map_id
|> WandererApp.Map.list_systems!()
@@ -174,71 +178,66 @@ defmodule WandererApp.Map.Server.SystemsImpl do
end)
|> Enum.map(& &1.solar_system_id)
case expired_systems |> Enum.empty?() do
false ->
state |> delete_systems(expired_systems, nil, nil)
_ ->
state
if expired_systems |> Enum.empty?() |> Kernel.not() do
delete_systems(map_id, expired_systems, nil, nil)
end
end
def update_system_name(
state,
map_id,
update
),
do: state |> update_system(:update_name, [:name], update)
do: update_system(map_id, :update_name, [:name], update)
def update_system_description(
state,
map_id,
update
),
do: state |> update_system(:update_description, [:description], update)
do: update_system(map_id, :update_description, [:description], update)
def update_system_status(
state,
map_id,
update
),
do: state |> update_system(:update_status, [:status], update)
do: update_system(map_id, :update_status, [:status], update)
def update_system_tag(
state,
map_id,
update
),
do: state |> update_system(:update_tag, [:tag], update)
do: update_system(map_id, :update_tag, [:tag], update)
def update_system_temporary_name(
state,
map_id,
update
) do
state |> update_system(:update_temporary_name, [:temporary_name], update)
end
),
do: update_system(map_id, :update_temporary_name, [:temporary_name], update)
def update_system_locked(
state,
map_id,
update
),
do: state |> update_system(:update_locked, [:locked], update)
do: update_system(map_id, :update_locked, [:locked], update)
def update_system_labels(
state,
map_id,
update
),
do: state |> update_system(:update_labels, [:labels], update)
do: update_system(map_id, :update_labels, [:labels], update)
def update_system_linked_sig_eve_id(
state,
map_id,
update
),
do: state |> update_system(:update_linked_sig_eve_id, [:linked_sig_eve_id], update)
do: update_system(map_id, :update_linked_sig_eve_id, [:linked_sig_eve_id], update)
def update_system_position(
%{rtree_name: rtree_name} = state,
map_id,
update
),
do:
state
|> update_system(
update_system(
map_id,
:update_position,
[:position_x, :position_y],
update,
@@ -246,13 +245,13 @@ defmodule WandererApp.Map.Server.SystemsImpl do
@ddrt.update(
updated_system.solar_system_id,
WandererApp.Map.PositionCalculator.get_system_bounding_rect(updated_system),
rtree_name
"rtree_#{map_id}"
)
end
)
def add_hub(
%{map_id: map_id} = state,
map_id,
hub_info
) do
with :ok <- WandererApp.Map.add_hub(map_id, hub_info),
@@ -260,16 +259,15 @@ defmodule WandererApp.Map.Server.SystemsImpl do
{:ok, _} <-
WandererApp.MapRepo.update_hubs(map_id, hubs) do
Impl.broadcast!(map_id, :update_map, %{hubs: hubs})
state
else
error ->
Logger.error("Failed to add hub: #{inspect(error, pretty: true)}")
state
:ok
end
end
def remove_hub(
%{map_id: map_id} = state,
map_id,
hub_info
) do
with :ok <- WandererApp.Map.remove_hub(map_id, hub_info),
@@ -277,16 +275,15 @@ defmodule WandererApp.Map.Server.SystemsImpl do
{:ok, _} <-
WandererApp.MapRepo.update_hubs(map_id, hubs) do
Impl.broadcast!(map_id, :update_map, %{hubs: hubs})
state
else
error ->
Logger.error("Failed to remove hub: #{inspect(error, pretty: true)}")
state
:ok
end
end
def delete_systems(
%{map_id: map_id, rtree_name: rtree_name} = state,
map_id,
removed_ids,
user_id,
character_id
@@ -304,9 +301,9 @@ defmodule WandererApp.Map.Server.SystemsImpl do
map_id
|> WandererApp.MapSystemRepo.remove_from_map(solar_system_id)
|> case do
{:ok, _} ->
{:ok, result} ->
:ok = WandererApp.Map.remove_system(map_id, solar_system_id)
@ddrt.delete([solar_system_id], rtree_name)
@ddrt.delete([solar_system_id], "rtree_#{map_id}")
Impl.broadcast!(map_id, :systems_removed, [solar_system_id])
# ADDITIVE: Also broadcast to external event system (webhooks/WebSocket)
@@ -334,7 +331,7 @@ defmodule WandererApp.Map.Server.SystemsImpl do
end
try do
cleanup_linked_system_sig_eve_ids(state, [system_id])
cleanup_linked_system_sig_eve_ids(map_id, [system_id])
rescue
e ->
Logger.error("Failed to cleanup system linked sig eve ids: #{inspect(e)}")
@@ -347,8 +344,6 @@ defmodule WandererApp.Map.Server.SystemsImpl do
:ok
end
end)
state
end
defp track_systems_removed(map_id, user_id, character_id, removed_solar_system_ids)
@@ -414,7 +409,7 @@ defmodule WandererApp.Map.Server.SystemsImpl do
end)
end
defp cleanup_linked_system_sig_eve_ids(state, system_ids_to_remove) do
defp cleanup_linked_system_sig_eve_ids(map_id, system_ids_to_remove) do
linked_system_ids =
system_ids_to_remove
|> Enum.map(fn system_id ->
@@ -427,17 +422,29 @@ defmodule WandererApp.Map.Server.SystemsImpl do
linked_system_ids
|> Enum.each(fn linked_system_id ->
update_system_linked_sig_eve_id(state, %{
update_system(map_id, :update_linked_sig_eve_id, [:linked_sig_eve_id], %{
solar_system_id: linked_system_id,
linked_sig_eve_id: nil
})
end)
end
def maybe_add_system(map_id, location, old_location, rtree_name, map_opts)
def maybe_add_system(map_id, location, old_location, map_opts)
when not is_nil(location) do
:telemetry.execute(
[:wanderer_app, :map, :system_addition, :start],
%{system_time: System.system_time()},
%{
map_id: map_id,
solar_system_id: location.solar_system_id,
from_system: old_location && old_location.solar_system_id
}
)
case WandererApp.Map.check_location(map_id, location) do
{:ok, location} ->
rtree_name = "rtree_#{map_id}"
{:ok, position} = calc_new_system_position(map_id, old_location, rtree_name, map_opts)
case WandererApp.MapSystemRepo.get_by_map_and_solar_system_id(
@@ -484,49 +491,142 @@ defmodule WandererApp.Map.Server.SystemsImpl do
position_y: updated_system.position_y
})
:telemetry.execute(
[:wanderer_app, :map, :system_addition, :complete],
%{system_time: System.system_time()},
%{
map_id: map_id,
solar_system_id: updated_system.solar_system_id,
system_id: updated_system.id,
operation: :update_existing
}
)
:ok
_ ->
{:ok, solar_system_info} =
WandererApp.CachedInfo.get_system_static_info(location.solar_system_id)
WandererApp.MapSystemRepo.create(%{
map_id: map_id,
solar_system_id: location.solar_system_id,
name: solar_system_info.solar_system_name,
position_x: position.x,
position_y: position.y
})
WandererApp.CachedInfo.get_system_static_info(location.solar_system_id)
|> case do
{:ok, new_system} ->
@ddrt.insert(
{new_system.solar_system_id,
WandererApp.Map.PositionCalculator.get_system_bounding_rect(new_system)},
rtree_name
)
WandererApp.Cache.put(
"map_#{map_id}:system_#{new_system.id}:last_activity",
DateTime.utc_now(),
ttl: @system_inactive_timeout
)
WandererApp.Map.add_system(map_id, new_system)
Impl.broadcast!(map_id, :add_system, new_system)
# ADDITIVE: Also broadcast to external event system (webhooks/WebSocket)
WandererApp.ExternalEvents.broadcast(map_id, :add_system, %{
solar_system_id: new_system.solar_system_id,
name: new_system.name,
position_x: new_system.position_x,
position_y: new_system.position_y
{:ok, solar_system_info} ->
# Use upsert instead of create - handles race conditions gracefully
WandererApp.MapSystemRepo.upsert(%{
map_id: map_id,
solar_system_id: location.solar_system_id,
name: solar_system_info.solar_system_name,
position_x: position.x,
position_y: position.y
})
|> case do
{:ok, system} ->
# System was either created or updated - both cases are success
@ddrt.insert(
{system.solar_system_id,
WandererApp.Map.PositionCalculator.get_system_bounding_rect(system)},
rtree_name
)
:ok
WandererApp.Cache.put(
"map_#{map_id}:system_#{system.id}:last_activity",
DateTime.utc_now(),
ttl: @system_inactive_timeout
)
WandererApp.Map.add_system(map_id, system)
Impl.broadcast!(map_id, :add_system, system)
# ADDITIVE: Also broadcast to external event system (webhooks/WebSocket)
WandererApp.ExternalEvents.broadcast(map_id, :add_system, %{
solar_system_id: system.solar_system_id,
name: system.name,
position_x: system.position_x,
position_y: system.position_y
})
:telemetry.execute(
[:wanderer_app, :map, :system_addition, :complete],
%{system_time: System.system_time()},
%{
map_id: map_id,
solar_system_id: system.solar_system_id,
system_id: system.id,
operation: :upsert
}
)
:ok
{:error, error} = result ->
Logger.warning(
"[CharacterTracking] Failed to upsert system #{location.solar_system_id} on map #{map_id}: #{inspect(error, pretty: true)}"
)
:telemetry.execute(
[:wanderer_app, :map, :system_addition, :error],
%{system_time: System.system_time()},
%{
map_id: map_id,
solar_system_id: location.solar_system_id,
error: error,
reason: :db_upsert_failed
}
)
result
error ->
Logger.warning(
"[CharacterTracking] Failed to upsert system #{location.solar_system_id} on map #{map_id}: #{inspect(error, pretty: true)}"
)
:telemetry.execute(
[:wanderer_app, :map, :system_addition, :error],
%{system_time: System.system_time()},
%{
map_id: map_id,
solar_system_id: location.solar_system_id,
error: error,
reason: :db_upsert_failed_unexpected
}
)
{:error, error}
end
{:error, error} = result ->
Logger.warning(
"[CharacterTracking] Failed to add system #{inspect(location.solar_system_id)} on map #{map_id}: #{inspect(error, pretty: true)}"
)
:telemetry.execute(
[:wanderer_app, :map, :system_addition, :error],
%{system_time: System.system_time()},
%{
map_id: map_id,
solar_system_id: location.solar_system_id,
error: error,
reason: :db_upsert_failed
}
)
result
error ->
Logger.warning("Failed to create system: #{inspect(error, pretty: true)}")
:ok
Logger.warning(
"[CharacterTracking] Failed to add system #{inspect(location.solar_system_id)} on map #{map_id}: #{inspect(error, pretty: true)}"
)
:telemetry.execute(
[:wanderer_app, :map, :system_addition, :error],
%{system_time: System.system_time()},
%{
map_id: map_id,
solar_system_id: location.solar_system_id,
error: error,
reason: :db_upsert_failed_unexpected
}
)
{:error, error}
end
end
@@ -536,10 +636,10 @@ defmodule WandererApp.Map.Server.SystemsImpl do
end
end
def maybe_add_system(_map_id, _location, _old_location, _rtree_name, _map_opts), do: :ok
def maybe_add_system(_map_id, _location, _old_location, _map_opts), do: :ok
defp _add_system(
%{map_id: map_id, map_opts: map_opts, rtree_name: rtree_name} = state,
defp do_add_system(
map_id,
%{
solar_system_id: solar_system_id,
coordinates: coordinates
@@ -548,6 +648,8 @@ defmodule WandererApp.Map.Server.SystemsImpl do
character_id
) do
extra_info = system_info |> Map.get(:extra_info)
rtree_name = "rtree_#{map_id}"
{:ok, %{map_opts: map_opts}} = WandererApp.Map.get_map_state(map_id)
%{"x" => x, "y" => y} =
coordinates
@@ -621,7 +723,7 @@ defmodule WandererApp.Map.Server.SystemsImpl do
})
end
:ok = map_id |> WandererApp.Map.add_system(system)
:ok = WandererApp.Map.add_system(map_id, system)
WandererApp.Cache.put(
"map_#{map_id}:system_#{system.id}:last_activity",
@@ -633,7 +735,7 @@ defmodule WandererApp.Map.Server.SystemsImpl do
# ADDITIVE: Also broadcast to external event system (webhooks/WebSocket)
Logger.debug(fn ->
"SystemsImpl._add_system calling ExternalEvents.broadcast for map #{map_id}, system: #{solar_system_id}"
"SystemsImpl.do_add_system calling ExternalEvents.broadcast for map #{map_id}, system: #{solar_system_id}"
end)
WandererApp.ExternalEvents.broadcast(map_id, :add_system, %{
@@ -643,15 +745,12 @@ defmodule WandererApp.Map.Server.SystemsImpl do
position_y: system.position_y
})
{:ok, _} =
WandererApp.User.ActivityTracker.track_map_event(:system_added, %{
character_id: character_id,
user_id: user_id,
map_id: map_id,
solar_system_id: solar_system_id
})
state
WandererApp.User.ActivityTracker.track_map_event(:system_added, %{
character_id: character_id,
user_id: user_id,
map_id: map_id,
solar_system_id: solar_system_id
})
end
defp maybe_update_extra_info(system, nil), do: system
@@ -783,7 +882,7 @@ defmodule WandererApp.Map.Server.SystemsImpl do
|> WandererApp.Map.PositionCalculator.get_new_system_position(rtree_name, opts)}
defp update_system(
%{map_id: map_id} = state,
map_id,
update_method,
attributes,
update,
@@ -807,12 +906,14 @@ defmodule WandererApp.Map.Server.SystemsImpl do
end
update_map_system_last_activity(map_id, updated_system)
state
else
{:error, error} ->
Logger.error("Failed to update system: #{inspect(error, pretty: true)}")
:ok
error ->
Logger.error("Failed to update system: #{inspect(error, pretty: true)}")
state
:ok
end
end
@@ -832,13 +933,9 @@ defmodule WandererApp.Map.Server.SystemsImpl do
WandererApp.ExternalEvents.broadcast(map_id, :system_metadata_changed, %{
solar_system_id: updated_system.solar_system_id,
name: updated_system.name,
# ADD
temporary_name: updated_system.temporary_name,
# ADD
labels: updated_system.labels,
# ADD
description: updated_system.description,
# ADD
status: updated_system.status
})
end

View File

@@ -0,0 +1,429 @@
defmodule WandererApp.Map.SlugRecovery do
@moduledoc """
Handles automatic recovery from duplicate map slug scenarios.
This module provides functions to:
- Detect duplicate slugs in the database (including deleted maps)
- Automatically fix duplicates by renaming newer maps
- Verify and recreate unique indexes (enforced on all maps, including deleted)
- Safely handle race conditions during recovery
## Slug Uniqueness Policy
All map slugs must be unique across the entire maps_v1 table, including
deleted maps. This prevents confusion and ensures that a slug can always
unambiguously identify a specific map in the system's history.
The recovery process is designed to be:
- Idempotent (safe to run multiple times)
- Production-safe (minimal locking, fast execution)
- Observable (telemetry events for monitoring)
"""
require Logger
alias WandererApp.Repo
@doc """
Recovers from a duplicate slug scenario for a specific slug.
This function:
1. Finds all maps with the given slug (including deleted)
2. Keeps the oldest map with the original slug
3. Renames newer duplicates with numeric suffixes
4. Verifies the unique index exists
Returns:
- `{:ok, result}` - Recovery successful
- `{:error, reason}` - Recovery failed
## Examples
iex> recover_duplicate_slug("home-2")
{:ok, %{fixed_count: 1, kept_map_id: "...", renamed_maps: [...]}}
"""
def recover_duplicate_slug(slug) do
start_time = System.monotonic_time(:millisecond)
Logger.warning("Starting slug recovery for '#{slug}'",
slug: slug,
operation: :recover_duplicate_slug
)
:telemetry.execute(
[:wanderer_app, :map, :slug_recovery, :start],
%{system_time: System.system_time()},
%{slug: slug, operation: :recover_duplicate_slug}
)
result =
Repo.transaction(fn ->
# Find all maps with this slug (including deleted), ordered by insertion time
duplicates = find_duplicate_maps(slug)
case duplicates do
[] ->
Logger.info("No maps found with slug '#{slug}' during recovery")
%{fixed_count: 0, kept_map_id: nil, renamed_maps: []}
[_single_map] ->
Logger.info("Only one map found with slug '#{slug}', no recovery needed")
%{fixed_count: 0, kept_map_id: nil, renamed_maps: []}
[kept_map | maps_to_rename] ->
# Convert binary UUID to string for consistency
kept_map_id_str =
if is_binary(kept_map.id), do: Ecto.UUID.load!(kept_map.id), else: kept_map.id
Logger.warning(
"Found #{length(maps_to_rename)} duplicate maps for slug '#{slug}', fixing...",
slug: slug,
kept_map_id: kept_map_id_str,
duplicate_count: length(maps_to_rename)
)
# Rename the duplicate maps
renamed_maps =
maps_to_rename
|> Enum.with_index(2)
|> Enum.map(fn {map, index} ->
new_slug = generate_unique_slug(slug, index)
rename_map(map, new_slug)
end)
%{
fixed_count: length(renamed_maps),
kept_map_id: kept_map_id_str,
renamed_maps: renamed_maps
}
end
end)
case result do
{:ok, recovery_result} ->
duration = System.monotonic_time(:millisecond) - start_time
:telemetry.execute(
[:wanderer_app, :map, :slug_recovery, :complete],
%{
duration_ms: duration,
fixed_count: recovery_result.fixed_count,
system_time: System.system_time()
},
%{slug: slug, result: recovery_result}
)
Logger.info("Slug recovery completed successfully",
slug: slug,
fixed_count: recovery_result.fixed_count,
duration_ms: duration
)
{:ok, recovery_result}
{:error, reason} = error ->
duration = System.monotonic_time(:millisecond) - start_time
:telemetry.execute(
[:wanderer_app, :map, :slug_recovery, :error],
%{duration_ms: duration, system_time: System.system_time()},
%{slug: slug, error: inspect(reason)}
)
Logger.error("Slug recovery failed",
slug: slug,
error: inspect(reason),
duration_ms: duration
)
error
end
end
@doc """
Verifies that the unique index on map slugs exists.
If missing, attempts to create it (after fixing any duplicates).
Returns:
- `{:ok, :exists}` - Index already exists
- `{:ok, :created}` - Index was created
- `{:error, reason}` - Failed to create index
"""
def verify_unique_index do
Logger.debug("Verifying unique index on maps_v1.slug")
# Check if the index exists
index_query = """
SELECT 1
FROM pg_indexes
WHERE tablename = 'maps_v1'
AND indexname = 'maps_v1_unique_slug_index'
LIMIT 1
"""
case Repo.query(index_query, []) do
{:ok, %{rows: [[1]]}} ->
Logger.debug("Unique index exists")
{:ok, :exists}
{:ok, %{rows: []}} ->
Logger.warning("Unique index missing, attempting to create")
create_unique_index()
{:error, reason} ->
Logger.error("Failed to check for unique index", error: inspect(reason))
{:error, reason}
end
end
@doc """
Performs a full recovery scan of all maps, fixing any duplicates found.
Processes both deleted and non-deleted maps.
This function will:
1. Drop the unique index if it exists (to allow fixing duplicates)
2. Find and fix all duplicate slugs
3. Return statistics about the recovery
Note: This function does NOT recreate the index. Call `verify_unique_index/0`
after this function completes to ensure the index is recreated.
This is a more expensive operation and should be run:
- During maintenance windows
- After detecting multiple duplicate slug errors
- As part of deployment verification
Returns:
- `{:ok, stats}` - Recovery completed with statistics
- `{:error, reason}` - Recovery failed
"""
def recover_all_duplicates do
Logger.info("Starting full duplicate slug recovery (including deleted maps)")
start_time = System.monotonic_time(:millisecond)
:telemetry.execute(
[:wanderer_app, :map, :full_recovery, :start],
%{system_time: System.system_time()},
%{}
)
# Drop the unique index if it exists to allow fixing duplicates
drop_unique_index_if_exists()
# Find all slugs that have duplicates (including deleted maps)
duplicate_slugs_query = """
SELECT slug, COUNT(*) as count
FROM maps_v1
GROUP BY slug
HAVING COUNT(*) > 1
"""
case Repo.query(duplicate_slugs_query, []) do
{:ok, %{rows: []}} ->
Logger.info("No duplicate slugs found")
{:ok, %{total_slugs_fixed: 0, total_maps_renamed: 0}}
{:ok, %{rows: duplicate_rows}} ->
Logger.warning("Found #{length(duplicate_rows)} slugs with duplicates",
duplicate_count: length(duplicate_rows)
)
# Fix each duplicate slug
results =
Enum.map(duplicate_rows, fn [slug, _count] ->
case recover_duplicate_slug(slug) do
{:ok, result} -> result
{:error, _} -> %{fixed_count: 0, kept_map_id: nil, renamed_maps: []}
end
end)
stats = %{
total_slugs_fixed: length(results),
total_maps_renamed: Enum.sum(Enum.map(results, & &1.fixed_count))
}
duration = System.monotonic_time(:millisecond) - start_time
:telemetry.execute(
[:wanderer_app, :map, :full_recovery, :complete],
%{
duration_ms: duration,
slugs_fixed: stats.total_slugs_fixed,
maps_renamed: stats.total_maps_renamed,
system_time: System.system_time()
},
%{stats: stats}
)
Logger.info("Full recovery completed",
stats: stats,
duration_ms: duration
)
{:ok, stats}
{:error, reason} = error ->
Logger.error("Failed to query for duplicates", error: inspect(reason))
error
end
end
# Private functions
defp find_duplicate_maps(slug) do
# Find all maps (including deleted) with this slug
query = """
SELECT id, name, slug, deleted, inserted_at
FROM maps_v1
WHERE slug = $1
ORDER BY inserted_at ASC
"""
case Repo.query(query, [slug]) do
{:ok, %{rows: rows}} ->
Enum.map(rows, fn [id, name, slug, deleted, inserted_at] ->
%{id: id, name: name, slug: slug, deleted: deleted, inserted_at: inserted_at}
end)
{:error, reason} ->
Logger.error("Failed to query for duplicate maps",
slug: slug,
error: inspect(reason)
)
[]
end
end
defp rename_map(map, new_slug) do
# Convert binary UUID to string for logging
map_id_str = if is_binary(map.id), do: Ecto.UUID.load!(map.id), else: map.id
Logger.info("Renaming map #{map_id_str} from '#{map.slug}' to '#{new_slug}'",
map_id: map_id_str,
old_slug: map.slug,
new_slug: new_slug,
deleted: map.deleted
)
update_query = """
UPDATE maps_v1
SET slug = $1, updated_at = NOW()
WHERE id = $2
"""
case Repo.query(update_query, [new_slug, map.id]) do
{:ok, _} ->
Logger.info("Successfully renamed map #{map_id_str} to '#{new_slug}'")
%{
map_id: map_id_str,
old_slug: map.slug,
new_slug: new_slug,
map_name: map.name,
deleted: map.deleted
}
{:error, reason} ->
map_id_str = if is_binary(map.id), do: Ecto.UUID.load!(map.id), else: map.id
Logger.error("Failed to rename map #{map_id_str}",
map_id: map_id_str,
old_slug: map.slug,
new_slug: new_slug,
error: inspect(reason)
)
%{
map_id: map_id_str,
old_slug: map.slug,
new_slug: nil,
error: reason
}
end
end
defp generate_unique_slug(base_slug, index) do
candidate = "#{base_slug}-#{index}"
# Verify this slug is actually unique (check all maps, including deleted)
query = "SELECT 1 FROM maps_v1 WHERE slug = $1 LIMIT 1"
case Repo.query(query, [candidate]) do
{:ok, %{rows: []}} ->
candidate
{:ok, %{rows: [[1]]}} ->
# This slug is taken, try the next one
generate_unique_slug(base_slug, index + 1)
{:error, _} ->
# On error, be conservative and try next number
generate_unique_slug(base_slug, index + 1)
end
end
defp create_unique_index do
Logger.warning("Creating unique index on maps_v1.slug")
# Create index on all maps (including deleted ones)
# This enforces slug uniqueness across all maps regardless of deletion status
create_index_query = """
CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS maps_v1_unique_slug_index
ON maps_v1 (slug)
"""
case Repo.query(create_index_query, []) do
{:ok, _} ->
Logger.info("Successfully created unique index (includes deleted maps)")
:telemetry.execute(
[:wanderer_app, :map, :index_created],
%{system_time: System.system_time()},
%{index_name: "maps_v1_unique_slug_index"}
)
{:ok, :created}
{:error, reason} ->
Logger.error("Failed to create unique index", error: inspect(reason))
{:error, reason}
end
end
defp drop_unique_index_if_exists do
Logger.debug("Checking if unique index exists before recovery")
check_query = """
SELECT 1
FROM pg_indexes
WHERE tablename = 'maps_v1'
AND indexname = 'maps_v1_unique_slug_index'
LIMIT 1
"""
case Repo.query(check_query, []) do
{:ok, %{rows: [[1]]}} ->
Logger.info("Dropping unique index to allow duplicate recovery")
drop_query = "DROP INDEX IF EXISTS maps_v1_unique_slug_index"
case Repo.query(drop_query, []) do
{:ok, _} ->
Logger.info("Successfully dropped unique index")
:ok
{:error, reason} ->
Logger.warning("Failed to drop unique index", error: inspect(reason))
:ok
end
{:ok, %{rows: []}} ->
Logger.debug("Unique index does not exist, no need to drop")
:ok
{:error, reason} ->
Logger.warning("Failed to check for unique index", error: inspect(reason))
:ok
end
end
end

View File

@@ -6,65 +6,6 @@ defmodule WandererApp.Maps do
import Ecto.Query
require Logger
@minimum_route_attrs [
:system_class,
:class_title,
:security,
:triglavian_invasion_status,
:solar_system_id,
:solar_system_name,
:region_name,
:is_shattered
]
def find_routes(map_id, hubs, origin, routes_settings, false) do
WandererApp.Esi.find_routes(
map_id,
origin,
hubs,
routes_settings
)
|> case do
{:ok, routes} ->
systems_static_data =
routes
|> Enum.map(fn route_info -> route_info.systems end)
|> List.flatten()
|> Enum.uniq()
|> Task.async_stream(
fn system_id ->
case WandererApp.CachedInfo.get_system_static_info(system_id) do
{:ok, nil} ->
nil
{:ok, system} ->
system |> Map.take(@minimum_route_attrs)
end
end,
max_concurrency: System.schedulers_online() * 4
)
|> Enum.map(fn {:ok, val} -> val end)
{:ok, %{routes: routes, systems_static_data: systems_static_data}}
error ->
{:ok, %{routes: [], systems_static_data: []}}
end
end
def find_routes(map_id, hubs, origin, routes_settings, true) do
origin = origin |> String.to_integer()
hubs = hubs |> Enum.map(&(&1 |> String.to_integer()))
routes =
hubs
|> Enum.map(fn hub ->
%{origin: origin, destination: hub, success: false, systems: [], has_connection: false}
end)
{:ok, %{routes: routes, systems_static_data: []}}
end
def get_available_maps() do
case WandererApp.Api.Map.available() do
{:ok, maps} -> {:ok, maps}

View File

@@ -23,10 +23,12 @@ defmodule WandererApp.Release do
IO.puts("Run migrations..")
prepare()
for repo <- repos() do
for repo <- repos do
{:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :up, all: true))
end
run_post_migration_tasks()
:init.stop()
end
@@ -76,6 +78,8 @@ defmodule WandererApp.Release do
Enum.each(streaks, fn {repo, up_to_version} ->
{:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :up, to: up_to_version))
end)
run_post_migration_tasks()
end
defp migration_streaks(pending_migrations) do
@@ -215,4 +219,40 @@ defmodule WandererApp.Release do
IO.puts("Starting repos..")
Enum.each(repos(), & &1.start_link(pool_size: 2))
end
defp run_post_migration_tasks do
IO.puts("Running post-migration tasks..")
# Recover any duplicate map slugs
IO.puts("Checking for duplicate map slugs..")
case WandererApp.Map.SlugRecovery.recover_all_duplicates() do
{:ok, %{total_slugs_fixed: 0}} ->
IO.puts("No duplicate slugs found.")
{:ok, %{total_slugs_fixed: count, total_maps_renamed: renamed}} ->
IO.puts("Successfully fixed #{count} duplicate slug(s), renamed #{renamed} map(s).")
{:error, reason} ->
IO.puts("Warning: Failed to recover duplicate slugs: #{inspect(reason)}")
IO.puts("Application will continue, but you may need to manually fix duplicate slugs.")
end
# Ensure the unique index exists after recovery
IO.puts("Verifying unique index on map slugs..")
case WandererApp.Map.SlugRecovery.verify_unique_index() do
{:ok, :exists} ->
IO.puts("Unique index already exists.")
{:ok, :created} ->
IO.puts("Successfully created unique index.")
{:error, reason} ->
IO.puts("Warning: Failed to verify/create unique index: #{inspect(reason)}")
IO.puts("You may need to manually create the index.")
end
IO.puts("Post-migration tasks completed.")
end
end

Some files were not shown because too many files have changed in this diff Show More