mirror of
https://github.com/henrygd/beszel.git
synced 2025-11-24 14:06:19 +00:00
Compare commits
752 Commits
v0.7.0
...
extra-disk
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1bec07359c | ||
|
|
9eefacd482 | ||
|
|
aaa788bc2f | ||
|
|
3eede6bead | ||
|
|
02abfbcb54 | ||
|
|
01d20562f0 | ||
|
|
cbe6ee6499 | ||
|
|
9a61ea8356 | ||
|
|
85b786d11a | ||
|
|
1af7ff600f | ||
|
|
02d594cc82 | ||
|
|
7d0b5c1c67 | ||
|
|
d3dc8a7af0 | ||
|
|
d67fefe7c5 | ||
|
|
4d364c5e4d | ||
|
|
954400ea45 | ||
|
|
04b6067e64 | ||
|
|
d77ee5554f | ||
|
|
2e034bdead | ||
|
|
0cb7dba050 | ||
|
|
fc0947aa04 | ||
|
|
1d546a4091 | ||
|
|
c98472ca0b | ||
|
|
f60b3bbbfb | ||
|
|
8e99b9f1ad | ||
|
|
fa5ed2bc11 | ||
|
|
21d961ab97 | ||
|
|
aaa93b84d2 | ||
|
|
6a562ce03b | ||
|
|
3dbc48727e | ||
|
|
85ac2e5e9a | ||
|
|
af6bd4e505 | ||
|
|
e54c4b3499 | ||
|
|
078c88f825 | ||
|
|
85169b6c5e | ||
|
|
d0ff8ee2c0 | ||
|
|
e898768997 | ||
|
|
0f5b504f23 | ||
|
|
365d291393 | ||
|
|
3dbab24c0f | ||
|
|
1f67fb7c8d | ||
|
|
219e09fc78 | ||
|
|
cd9c2bd9ab | ||
|
|
9f969d843c | ||
|
|
b22a6472fc | ||
|
|
d231ace28e | ||
|
|
473cb7f437 | ||
|
|
783ed9f456 | ||
|
|
9a9a89ee50 | ||
|
|
5122d0341d | ||
|
|
81731689da | ||
|
|
b3e9857448 | ||
|
|
2eda9eb0e3 | ||
|
|
82a5df5048 | ||
|
|
f11564a7ac | ||
|
|
9df4d29236 | ||
|
|
1452817423 | ||
|
|
c57e496f5e | ||
|
|
6287f7003c | ||
|
|
37037b1f4e | ||
|
|
7cf123a99e | ||
|
|
97394e775f | ||
|
|
d5c381188b | ||
|
|
b107d12a62 | ||
|
|
e646f2c1fc | ||
|
|
b18528d24a | ||
|
|
a6e64df399 | ||
|
|
66ba21dd41 | ||
|
|
1851e7a111 | ||
|
|
74b78e96b3 | ||
|
|
a9657f9c00 | ||
|
|
1dee63a0eb | ||
|
|
d608cf0955 | ||
|
|
b9139a1f9b | ||
|
|
7f372c46db | ||
|
|
40010ad9b9 | ||
|
|
5927f45a4a | ||
|
|
962613df7c | ||
|
|
92b1f236e3 | ||
|
|
a911670a2d | ||
|
|
b0cb0c2269 | ||
|
|
735d03577f | ||
|
|
a33f88d822 | ||
|
|
dfd1fc8fda | ||
|
|
1df08801a2 | ||
|
|
62f5f986bb | ||
|
|
a87b9af9d5 | ||
|
|
03900e54cc | ||
|
|
f4abbd1a5b | ||
|
|
77ed90cb4a | ||
|
|
2fe3b1adb1 | ||
|
|
f56093d0f0 | ||
|
|
77dba42f17 | ||
|
|
e233a0b0dc | ||
|
|
18e4c88875 | ||
|
|
904a6038cd | ||
|
|
ae55b86493 | ||
|
|
5360f762e4 | ||
|
|
0d464787f2 | ||
|
|
24f72ef596 | ||
|
|
2d8739052b | ||
|
|
1e32d13650 | ||
|
|
dbf3f94247 | ||
|
|
8a81c7bbac | ||
|
|
d24150c78b | ||
|
|
013da18789 | ||
|
|
5b663621e4 | ||
|
|
4056345216 | ||
|
|
d00c0488c3 | ||
|
|
d352ce00fa | ||
|
|
1623f5e751 | ||
|
|
612ad1238f | ||
|
|
1ad4409609 | ||
|
|
2a94e1d1ec | ||
|
|
75b372437c | ||
|
|
b661d00159 | ||
|
|
898dbf73c8 | ||
|
|
e099304948 | ||
|
|
b61b7a12dc | ||
|
|
37769050e5 | ||
|
|
d81e137291 | ||
|
|
ae820d348e | ||
|
|
ddb298ac7c | ||
|
|
cca7b36039 | ||
|
|
adda381d9d | ||
|
|
1630b1558f | ||
|
|
733c10ff31 | ||
|
|
ed3fd185d3 | ||
|
|
b1fd7e6695 | ||
|
|
7d6230de74 | ||
|
|
f9a39c6004 | ||
|
|
f21a6d15fe | ||
|
|
bf38716095 | ||
|
|
45816e7de6 | ||
|
|
2a6946906e | ||
|
|
ca58ff66ba | ||
|
|
133d229361 | ||
|
|
960cac4060 | ||
|
|
d83865cb4f | ||
|
|
4b43d68da6 | ||
|
|
c790d76211 | ||
|
|
29b182fd7b | ||
|
|
fc78b959aa | ||
|
|
b8b3604aec | ||
|
|
e45606fdec | ||
|
|
640afd82ad | ||
|
|
d025e51c67 | ||
|
|
f70c30345a | ||
|
|
63bdac83a1 | ||
|
|
65897a8df6 | ||
|
|
0dc9b3e273 | ||
|
|
c1c0d8d672 | ||
|
|
1811ab64be | ||
|
|
5578520054 | ||
|
|
7b128d09ac | ||
|
|
d295507c0b | ||
|
|
79fbbb7ad0 | ||
|
|
e7325b23c4 | ||
|
|
c5eba6547a | ||
|
|
82e7c04b25 | ||
|
|
a9ce16cfdd | ||
|
|
2af8b6057f | ||
|
|
3fae4360a8 | ||
|
|
10073d85e1 | ||
|
|
e240ced018 | ||
|
|
ae1e17f5ed | ||
|
|
3abb7c213b | ||
|
|
240e75f025 | ||
|
|
ea984844ff | ||
|
|
0d157b5857 | ||
|
|
d0b6e725c8 | ||
|
|
ffe7f8547a | ||
|
|
37817b0f15 | ||
|
|
a66ac418ae | ||
|
|
2ee2f53267 | ||
|
|
e5c766c00b | ||
|
|
da43ba10e1 | ||
|
|
fca13004bd | ||
|
|
376a86829c | ||
|
|
ef48613f3f | ||
|
|
49976c6f61 | ||
|
|
d68f1f0985 | ||
|
|
273a090200 | ||
|
|
59057a2ba4 | ||
|
|
1b9e781d45 | ||
|
|
4e0ca7c2ba | ||
|
|
a9e7bcd37f | ||
|
|
4635f24fb2 | ||
|
|
3e73399b87 | ||
|
|
e149366451 | ||
|
|
8da1ded73e | ||
|
|
efa37b2312 | ||
|
|
bcdb4c92b5 | ||
|
|
a7d07310b6 | ||
|
|
8db87e5497 | ||
|
|
e601a0d564 | ||
|
|
07491108cd | ||
|
|
42ab17de1f | ||
|
|
2d14174f61 | ||
|
|
a19ccc9263 | ||
|
|
956880aa59 | ||
|
|
b2b54db409 | ||
|
|
32d5188eef | ||
|
|
46dab7f531 | ||
|
|
c898a9ebbc | ||
|
|
8a13b05c20 | ||
|
|
86ea23fe39 | ||
|
|
a284dd74dd | ||
|
|
6a0075291c | ||
|
|
f542bc70a1 | ||
|
|
270e59d9ea | ||
|
|
0d97a604f8 | ||
|
|
f6078fc232 | ||
|
|
6f5d95031c | ||
|
|
4e26defdca | ||
|
|
cda8fa7efd | ||
|
|
fd050f2a8f | ||
|
|
e53d41dcec | ||
|
|
a1eb15dabb | ||
|
|
eb4bdafbea | ||
|
|
fea2330534 | ||
|
|
5e37469ea9 | ||
|
|
e027479bb1 | ||
|
|
1597e869c1 | ||
|
|
862399d8ec | ||
|
|
f6f85f8f9d | ||
|
|
e22d7ca801 | ||
|
|
c382c1d5f6 | ||
|
|
f7618ed6b0 | ||
|
|
d1295b7c50 | ||
|
|
a162a54a58 | ||
|
|
794db0ac6a | ||
|
|
e9fb9b856f | ||
|
|
66bca11d36 | ||
|
|
86e87f0d47 | ||
|
|
fadfc5d81d | ||
|
|
fc39ff1e4d | ||
|
|
82ccfc66e0 | ||
|
|
890bad1c39 | ||
|
|
9c458885f1 | ||
|
|
d2aed0dc72 | ||
|
|
3dbcb5d7da | ||
|
|
57a1a8b39e | ||
|
|
ab81c04569 | ||
|
|
0c32be3bea | ||
|
|
81d43fbf6e | ||
|
|
96f441de40 | ||
|
|
0e95caaee9 | ||
|
|
7697a12b42 | ||
|
|
94245a9ba4 | ||
|
|
b084814aea | ||
|
|
cce74246ee | ||
|
|
a3420b8c67 | ||
|
|
e1bb17ee9e | ||
|
|
52983f60b7 | ||
|
|
1f053fd85d | ||
|
|
a989d121d3 | ||
|
|
50d2406423 | ||
|
|
059d2d0a5b | ||
|
|
621bef30b5 | ||
|
|
5f4d3dc730 | ||
|
|
8fa9aece63 | ||
|
|
2f1a022e2a | ||
|
|
4815cd29bc | ||
|
|
e49bfaf5d7 | ||
|
|
b13915b76f | ||
|
|
e2a57dc43b | ||
|
|
7222224b40 | ||
|
|
02ff475b84 | ||
|
|
09cd8d0db9 | ||
|
|
36f1a0c53b | ||
|
|
0b0e94e045 | ||
|
|
20ca6edf81 | ||
|
|
1990f8c6df | ||
|
|
6e9dbf863f | ||
|
|
fa921d77f1 | ||
|
|
ff854d481d | ||
|
|
4ce491fe48 | ||
|
|
493bae7eb6 | ||
|
|
ae5532aa36 | ||
|
|
a1eae6413a | ||
|
|
ee52bf1fbf | ||
|
|
2ff0bd6b44 | ||
|
|
a385233b7d | ||
|
|
f5648a415d | ||
|
|
556fb18953 | ||
|
|
a482f78739 | ||
|
|
4a580ce972 | ||
|
|
e07558237f | ||
|
|
fb3c70a1bc | ||
|
|
cba4d60895 | ||
|
|
8b655ef2b9 | ||
|
|
0188418055 | ||
|
|
72334c42d0 | ||
|
|
0638ff3c21 | ||
|
|
b64318d9e8 | ||
|
|
0f5b1b5157 | ||
|
|
3c4ae46f50 | ||
|
|
c158b1aeeb | ||
|
|
684d92c497 | ||
|
|
bbd9595ec0 | ||
|
|
bbebb3e301 | ||
|
|
9d25181d1d | ||
|
|
7ba1f366ba | ||
|
|
37c6b920f9 | ||
|
|
49db81dac8 | ||
|
|
a9e90ec19c | ||
|
|
2ad60507b7 | ||
|
|
12059ee3db | ||
|
|
de56544ca3 | ||
|
|
065c7facb6 | ||
|
|
630c92c139 | ||
|
|
e11d452d91 | ||
|
|
99c7f7bd8a | ||
|
|
8af3a0eb5b | ||
|
|
5f7950b474 | ||
|
|
df9e2dec28 | ||
|
|
a0f271545a | ||
|
|
aa2bc9f118 | ||
|
|
b22ae87022 | ||
|
|
79e79079bc | ||
|
|
1811ebdee4 | ||
|
|
137f3f3e24 | ||
|
|
ed1d1e77c0 | ||
|
|
8c36dd1caa | ||
|
|
57bfe72486 | ||
|
|
75f66b0246 | ||
|
|
ce93d54aa7 | ||
|
|
39dbe0eac5 | ||
|
|
7282044f80 | ||
|
|
d77c37c0b0 | ||
|
|
e362cbbca5 | ||
|
|
118544926b | ||
|
|
d4bb0a0a30 | ||
|
|
fe5e35d1a9 | ||
|
|
60a6ae2caa | ||
|
|
80338d36aa | ||
|
|
f0d2c242e8 | ||
|
|
559f83d99c | ||
|
|
d3a751ee6c | ||
|
|
fb70a166fa | ||
|
|
c12457b707 | ||
|
|
3e53d73d56 | ||
|
|
80338c5e98 | ||
|
|
249cd8ad19 | ||
|
|
ccdff46370 | ||
|
|
91679b5cc0 | ||
|
|
6953edf59e | ||
|
|
b91c77ec92 | ||
|
|
3ac0b185d1 | ||
|
|
1e675cabb5 | ||
|
|
5f44965c2c | ||
|
|
f080929296 | ||
|
|
f055658eba | ||
|
|
e430c747fe | ||
|
|
ca62b1db36 | ||
|
|
38569b7057 | ||
|
|
203244090f | ||
|
|
2bed722045 | ||
|
|
13f3a52760 | ||
|
|
16b9827c70 | ||
|
|
0fc352d7fc | ||
|
|
8a2bee11d4 | ||
|
|
485f7d16ff | ||
|
|
46fdc94cb8 | ||
|
|
261f7fb76c | ||
|
|
18d9258907 | ||
|
|
9d7fb8ab80 | ||
|
|
3730a78e5a | ||
|
|
7cdd0907e8 | ||
|
|
3586f73f30 | ||
|
|
752ccc6beb | ||
|
|
f577476c81 | ||
|
|
49ae424698 | ||
|
|
d4fd19522b | ||
|
|
5c047e4afd | ||
|
|
6576141f54 | ||
|
|
926e807020 | ||
|
|
d91847c6c5 | ||
|
|
0abd88270c | ||
|
|
806c4e51c5 | ||
|
|
6520783fe9 | ||
|
|
48c8a3a4a5 | ||
|
|
e0c839f78c | ||
|
|
1ba362bafe | ||
|
|
b5d55ead4a | ||
|
|
4f879ccc66 | ||
|
|
cd9e0f7b5b | ||
|
|
780644eeae | ||
|
|
71f081da20 | ||
|
|
11c61bcf42 | ||
|
|
402a1584d7 | ||
|
|
99d61a0193 | ||
|
|
5ddb200a75 | ||
|
|
faa247dbda | ||
|
|
6d1cec3c42 | ||
|
|
529df84273 | ||
|
|
e0e21eedd6 | ||
|
|
4356ffbe9b | ||
|
|
be1366b785 | ||
|
|
3dc7e02ed0 | ||
|
|
d67d638a6b | ||
|
|
7b36036455 | ||
|
|
1b58560acf | ||
|
|
1627c41f84 | ||
|
|
4395520a28 | ||
|
|
8c52f30a71 | ||
|
|
46316ebffa | ||
|
|
0b04f60b6c | ||
|
|
20b822d072 | ||
|
|
ca7642cc91 | ||
|
|
68009c85a5 | ||
|
|
1c7c64c4aa | ||
|
|
b05966d30b | ||
|
|
ea90f6a596 | ||
|
|
f1e43b2593 | ||
|
|
748d18321d | ||
|
|
ae84919c39 | ||
|
|
b23221702e | ||
|
|
4d5b096230 | ||
|
|
7caf7d1b31 | ||
|
|
6107f52d07 | ||
|
|
f4fb7a89e5 | ||
|
|
5439066f4d | ||
|
|
7c18f3d8b4 | ||
|
|
63af81666b | ||
|
|
c0a6153a43 | ||
|
|
df334caca6 | ||
|
|
ffb3ec0477 | ||
|
|
3a97edd0d5 | ||
|
|
ab1d1c1273 | ||
|
|
0fb39edae4 | ||
|
|
3a977a8e1f | ||
|
|
081979de24 | ||
|
|
23fe189797 | ||
|
|
e9d429b9b8 | ||
|
|
99202c85b6 | ||
|
|
d5c3d8f84e | ||
|
|
8f442992e6 | ||
|
|
39820c8ac1 | ||
|
|
0c8b10af99 | ||
|
|
8e072492b7 | ||
|
|
88d6307ce0 | ||
|
|
2cc516f9e5 | ||
|
|
ab6ea71695 | ||
|
|
6280323cb1 | ||
|
|
17c8e7e1bd | ||
|
|
f60fb6f8a9 | ||
|
|
3eebbce2d4 | ||
|
|
e92a94a24d | ||
|
|
7c7c073ae4 | ||
|
|
c009a40749 | ||
|
|
5e85b803e0 | ||
|
|
256d3c5ba1 | ||
|
|
bd048a8989 | ||
|
|
f6b4231500 | ||
|
|
bda06f30b3 | ||
|
|
38f2ba3984 | ||
|
|
1a7d897bdc | ||
|
|
c74e7430ef | ||
|
|
2467bbc0f0 | ||
|
|
ea665e02da | ||
|
|
358e05d544 | ||
|
|
aab5725d82 | ||
|
|
e94a1cd421 | ||
|
|
73c1a1b208 | ||
|
|
0526c88ce0 | ||
|
|
a2e9056a00 | ||
|
|
fd4ac60908 | ||
|
|
330e4c67f3 | ||
|
|
5d840bd473 | ||
|
|
54e3f3eba1 | ||
|
|
d79111fce4 | ||
|
|
93c3c7b9d8 | ||
|
|
410d236f89 | ||
|
|
9a8071c314 | ||
|
|
80df0efccd | ||
|
|
3f1f4c7596 | ||
|
|
04ac688be4 | ||
|
|
ace83172ff | ||
|
|
e8b864b515 | ||
|
|
7057f2e917 | ||
|
|
47b2689f24 | ||
|
|
9b65110aef | ||
|
|
3935a9bf00 | ||
|
|
fb2adf08dc | ||
|
|
61441b115b | ||
|
|
3ad78a2588 | ||
|
|
81514d4deb | ||
|
|
faeb801512 | ||
|
|
968ca70670 | ||
|
|
5837b4f25c | ||
|
|
c38d04b34b | ||
|
|
cadc09b493 | ||
|
|
edefc6f53e | ||
|
|
400ea89587 | ||
|
|
3058c24e82 | ||
|
|
521be05bc1 | ||
|
|
6b766b2653 | ||
|
|
d36b8369cc | ||
|
|
ae22334645 | ||
|
|
1d7c0ebc27 | ||
|
|
3b9910351d | ||
|
|
f397ab0797 | ||
|
|
b1fc715ec9 | ||
|
|
d25c7c58c1 | ||
|
|
a6daa70010 | ||
|
|
d722e4712c | ||
|
|
1d61ad5d7c | ||
|
|
28589455bf | ||
|
|
dd21c18939 | ||
|
|
fd79bc3341 | ||
|
|
7edcf8db85 | ||
|
|
245a047062 | ||
|
|
520b52e532 | ||
|
|
c421ffac70 | ||
|
|
6767392ea8 | ||
|
|
25b73bfb85 | ||
|
|
5fbc0de07f | ||
|
|
c8130a10d4 | ||
|
|
0619eabec2 | ||
|
|
5b4d5c648e | ||
|
|
0443a85015 | ||
|
|
c4d8deb986 | ||
|
|
681286eb4f | ||
|
|
99cdb196ca | ||
|
|
31431fd211 | ||
|
|
9e56f4611f | ||
|
|
a1f6eeb9eb | ||
|
|
f8a1d9fc5d | ||
|
|
d81db6e319 | ||
|
|
17a163de26 | ||
|
|
85db31a8cd | ||
|
|
327db38953 | ||
|
|
0413368762 | ||
|
|
db73928604 | ||
|
|
add1b27346 | ||
|
|
2ef1fe6b2a | ||
|
|
2b43ba3cbe | ||
|
|
b2b1a0b6ea | ||
|
|
b11d0aae61 | ||
|
|
2b73d8845a | ||
|
|
41e3e3d760 | ||
|
|
c22b57ce67 | ||
|
|
23bee0aa7c | ||
|
|
0c2629f57e | ||
|
|
a4b689e8f1 | ||
|
|
0c5841133b | ||
|
|
78a645fa05 | ||
|
|
f1208a9f00 | ||
|
|
aeb5f1424b | ||
|
|
4a1fb513c5 | ||
|
|
e5a66cc156 | ||
|
|
aef3b3e610 | ||
|
|
1d13ecd8ec | ||
|
|
6404895a47 | ||
|
|
ba7db28e80 | ||
|
|
6b41a98338 | ||
|
|
b958e9eefe | ||
|
|
baf56fe83b | ||
|
|
96f9128d1a | ||
|
|
1fb60e05d7 | ||
|
|
a9a9a932a6 | ||
|
|
e0a1a49a8f | ||
|
|
79eb42d04d | ||
|
|
25b70af196 | ||
|
|
c12b27afb5 | ||
|
|
7485f79071 | ||
|
|
d170e7a00d | ||
|
|
1a6a2a64f2 | ||
|
|
646b899851 | ||
|
|
821e2e3a78 | ||
|
|
9be3fcb8ca | ||
|
|
f271b5a56c | ||
|
|
4f80a58929 | ||
|
|
2ab2cc83de | ||
|
|
3376a97bea | ||
|
|
0c54f95546 | ||
|
|
5ea6eb08a1 | ||
|
|
6b2a9463ca | ||
|
|
a94cfff965 | ||
|
|
1f69937572 | ||
|
|
aa3de511b9 | ||
|
|
3afab00937 | ||
|
|
e6054058b9 | ||
|
|
31d52d5e15 | ||
|
|
44d930a700 | ||
|
|
d7ada1b1c5 | ||
|
|
1daf35406a | ||
|
|
2216e40f04 | ||
|
|
f4480c7aa7 | ||
|
|
5b478c11eb | ||
|
|
58085bf300 | ||
|
|
ce171cf375 | ||
|
|
e689f547ef | ||
|
|
5a8e8c1512 | ||
|
|
ff5eb07716 | ||
|
|
fdbbbc77b0 | ||
|
|
20cba1b695 | ||
|
|
207d58a07e | ||
|
|
0759a3607c | ||
|
|
0b4742d064 | ||
|
|
4557f18195 | ||
|
|
83668e5727 | ||
|
|
120aff0d18 | ||
|
|
7170b24160 | ||
|
|
3441b39a02 | ||
|
|
31d306f8be | ||
|
|
76347f25e5 | ||
|
|
c157f38957 | ||
|
|
d185dfdef8 | ||
|
|
319a9895b0 | ||
|
|
68dae3967d | ||
|
|
0a331524cc | ||
|
|
5b625db57c | ||
|
|
0943e01b71 | ||
|
|
cfda7d0740 | ||
|
|
4e3d198b7b | ||
|
|
a6a9719565 | ||
|
|
ddb4f1c8f8 | ||
|
|
55d13c551a | ||
|
|
fef30b1750 | ||
|
|
0cee9e4e4b | ||
|
|
90378d09a3 | ||
|
|
7adf7ef549 | ||
|
|
ee6a456b66 | ||
|
|
4789f48ad0 | ||
|
|
ea098fd61c | ||
|
|
e7c214799a | ||
|
|
9cabc103e5 | ||
|
|
be955e0122 | ||
|
|
8a0f2d61a8 | ||
|
|
48ed4abc02 | ||
|
|
e8c680bda7 | ||
|
|
37c7a32c10 | ||
|
|
1c4533f1f2 | ||
|
|
5d88599c9a | ||
|
|
73427306d1 | ||
|
|
f24a7313d6 | ||
|
|
ad55d1ca88 | ||
|
|
9c669d8833 | ||
|
|
81fa4f16d6 | ||
|
|
40cc1a875e | ||
|
|
1ac165d7d3 | ||
|
|
9619e6cf89 | ||
|
|
fc31cefd4c | ||
|
|
5fd9010b39 | ||
|
|
c2e3dd5ab1 | ||
|
|
8e531e6b3c | ||
|
|
527e6b57d5 | ||
|
|
245fa538e9 | ||
|
|
e14a851398 | ||
|
|
0c9bc47a3a | ||
|
|
19b4477a75 | ||
|
|
558d051c42 | ||
|
|
9c8528bae1 | ||
|
|
229ef19376 | ||
|
|
e5fb4d611a | ||
|
|
bc9dc9704c | ||
|
|
e88eb1a884 | ||
|
|
d8f3206e8b | ||
|
|
729d306157 | ||
|
|
c35df48754 | ||
|
|
0f97f37a79 | ||
|
|
b08219dacf | ||
|
|
dd10fb97c0 | ||
|
|
87354df2de | ||
|
|
1bd04498b9 | ||
|
|
52394bc99b | ||
|
|
add85e9747 | ||
|
|
e82986adff | ||
|
|
f201267e4e | ||
|
|
9db41f8830 | ||
|
|
ba64c59632 | ||
|
|
d2626d8337 | ||
|
|
ded1090190 | ||
|
|
1114baaaa0 | ||
|
|
cf13c1c671 | ||
|
|
e70de6a59e | ||
|
|
5110eaf10f | ||
|
|
0234682720 | ||
|
|
80a7322fa1 | ||
|
|
59bdc0ce0d | ||
|
|
a288d0925b | ||
|
|
e7d2f0d82b | ||
|
|
825d8269ff | ||
|
|
f7775d173a | ||
|
|
58bced5f09 | ||
|
|
6e08507dde | ||
|
|
617a03fc15 | ||
|
|
f86bda304d | ||
|
|
1d414e659b | ||
|
|
87f7390eca | ||
|
|
ed01752546 | ||
|
|
46002a2171 | ||
|
|
14716d36a6 | ||
|
|
b4bc8a31aa | ||
|
|
b01fc316c3 | ||
|
|
4479249ac7 | ||
|
|
0529837ac8 | ||
|
|
d51ffa17ed | ||
|
|
c434a44bc4 | ||
|
|
7b5ac23a4b | ||
|
|
87ef769086 | ||
|
|
bcefb8e43c | ||
|
|
a1641c5bcc | ||
|
|
e6839480d9 | ||
|
|
4e64d9efad | ||
|
|
d68f4514cc | ||
|
|
8a69c09939 | ||
|
|
e87af81db4 | ||
|
|
6043c59da8 | ||
|
|
4cb7b97416 | ||
|
|
b1db450e00 | ||
|
|
2e8ac98924 | ||
|
|
529a628368 | ||
|
|
8a2e821c8f | ||
|
|
3cd11d6bc4 | ||
|
|
db092d2440 | ||
|
|
a4a7c91fc1 | ||
|
|
543fd44cb2 | ||
|
|
eab262c3f7 | ||
|
|
52bde8ea6d | ||
|
|
03de73560c | ||
|
|
bcb7de1b9a | ||
|
|
ca94bd32f2 | ||
|
|
cd10727795 | ||
|
|
8262a9a45b | ||
|
|
b433437636 | ||
|
|
02825ed109 | ||
|
|
a97e6149bb | ||
|
|
946b1e7f54 | ||
|
|
b5ed7cd555 | ||
|
|
233349fb2a | ||
|
|
c54e6ff0ea | ||
|
|
98c4102f72 | ||
|
|
640ee7a88e | ||
|
|
8a85246a0b | ||
|
|
655bfc95ca | ||
|
|
37a066e6bd | ||
|
|
9e959a6b7b | ||
|
|
2b6560b9e1 | ||
|
|
d8836d53bf | ||
|
|
aa15876aa2 | ||
|
|
7ca960b521 | ||
|
|
4eaedcf825 | ||
|
|
b337ba1d7f | ||
|
|
c9b72f724f | ||
|
|
35d8996e00 | ||
|
|
6e61c5f1e4 | ||
|
|
6bb147c349 | ||
|
|
3668aa4e8e |
48
.dockerignore
Normal file
48
.dockerignore
Normal file
@@ -0,0 +1,48 @@
|
||||
# Node.js dependencies
|
||||
node_modules
|
||||
internalsite/node_modules
|
||||
|
||||
# Go build artifacts and binaries
|
||||
build
|
||||
dist
|
||||
*.exe
|
||||
beszel-agent
|
||||
beszel_data*
|
||||
pb_data
|
||||
data
|
||||
temp
|
||||
|
||||
# Development and IDE files
|
||||
.vscode
|
||||
.idea*
|
||||
*.swc
|
||||
__debug_*
|
||||
|
||||
# Git and version control
|
||||
.git
|
||||
.gitignore
|
||||
|
||||
# Documentation and supplemental files
|
||||
*.md
|
||||
supplemental
|
||||
freebsd-port
|
||||
|
||||
# Test files (exclude from production builds)
|
||||
*_test.go
|
||||
coverage
|
||||
|
||||
# Docker files
|
||||
dockerfile_*
|
||||
|
||||
# Temporary files
|
||||
*.tmp
|
||||
*.bak
|
||||
*.log
|
||||
|
||||
# OS specific files
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# .NET build artifacts
|
||||
agent/lhm/obj
|
||||
agent/lhm/bin
|
||||
61
.github/DISCUSSION_TEMPLATE/support.yml
vendored
Normal file
61
.github/DISCUSSION_TEMPLATE/support.yml
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
### Before opening a discussion:
|
||||
|
||||
- Check the [common issues guide](https://beszel.dev/guide/common-issues).
|
||||
- Search existing [issues](https://github.com/henrygd/beszel/issues) and [discussions](https://github.com/henrygd/beszel/discussions) (including closed).
|
||||
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: A clear and concise description of the issue or question. If applicable, add screenshots to help explain your problem.
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: system
|
||||
attributes:
|
||||
label: OS / Architecture
|
||||
placeholder: linux/amd64 (agent), freebsd/arm64 (hub)
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: Beszel version
|
||||
placeholder: 0.9.1
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: install-method
|
||||
attributes:
|
||||
label: Installation method
|
||||
options:
|
||||
- Docker
|
||||
- Binary
|
||||
- Nix
|
||||
- Unraid
|
||||
- Coolify
|
||||
- Other (please describe above)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: config
|
||||
attributes:
|
||||
label: Configuration
|
||||
description: Please provide any relevant service configuration
|
||||
render: yaml
|
||||
- type: textarea
|
||||
id: hub-logs
|
||||
attributes:
|
||||
label: Hub Logs
|
||||
description: Check the logs page in PocketBase (`/_/#/logs`) for relevant errors (copy JSON).
|
||||
render: json
|
||||
- type: textarea
|
||||
id: agent-logs
|
||||
attributes:
|
||||
label: Agent Logs
|
||||
description: Please provide any logs from the agent, if relevant. Use `LOG_LEVEL=debug` for more info.
|
||||
render: shell
|
||||
134
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
134
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
name: 🐛 Bug report
|
||||
description: Report a new bug or issue.
|
||||
title: '[Bug]: '
|
||||
labels: ['bug', "needs confirmation"]
|
||||
body:
|
||||
- type: dropdown
|
||||
id: component
|
||||
attributes:
|
||||
label: Component
|
||||
description: Which part of Beszel is this about?
|
||||
options:
|
||||
- Hub
|
||||
- Agent
|
||||
- Hub & Agent
|
||||
validations:
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
### Thanks for taking the time to fill out this bug report!
|
||||
|
||||
- For more general support, please [start a support thread](https://github.com/henrygd/beszel/discussions/new?category=support).
|
||||
- To request a change or feature, use the [feature request form](https://github.com/henrygd/beszel/issues/new?template=feature_request.yml).
|
||||
- Please do not submit bugs that are specific to ZFS. We plan to add integration with ZFS utilities in the near future.
|
||||
|
||||
### Before submitting a bug report:
|
||||
|
||||
- Check the [common issues guide](https://beszel.dev/guide/common-issues).
|
||||
- Search existing [issues](https://github.com/henrygd/beszel/issues) and [discussions](https://github.com/henrygd/beszel/discussions) (including closed).
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: Explain the issue you experienced clearly and concisely.
|
||||
placeholder: I went to the coffee pot and it was empty.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: expected-behavior
|
||||
attributes:
|
||||
label: Expected Behavior
|
||||
description: In a perfect world, what should have happened?
|
||||
placeholder: When I got to the coffee pot, it should have been full.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: steps-to-reproduce
|
||||
attributes:
|
||||
label: Steps to Reproduce
|
||||
description: Describe how to reproduce the issue in repeatable steps.
|
||||
placeholder: |
|
||||
1. Go to the coffee pot.
|
||||
2. Make more coffee.
|
||||
3. Pour it into a cup.
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: category
|
||||
attributes:
|
||||
label: Category
|
||||
description: Which category does this relate to most?
|
||||
options:
|
||||
- Metrics
|
||||
- Charts & Visualization
|
||||
- Settings & Configuration
|
||||
- Notifications & Alerts
|
||||
- Authentication
|
||||
- Installation
|
||||
- Performance
|
||||
- UI / UX
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: metrics
|
||||
attributes:
|
||||
label: Affected Metrics
|
||||
description: If applicable, which specific metric does this relate to most?
|
||||
options:
|
||||
- CPU
|
||||
- Memory
|
||||
- Storage
|
||||
- Network
|
||||
- Containers
|
||||
- GPU
|
||||
- Sensors
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: system
|
||||
attributes:
|
||||
label: OS / Architecture
|
||||
placeholder: linux/amd64 (agent), freebsd/arm64 (hub)
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: Beszel version
|
||||
placeholder: 0.9.1
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: install-method
|
||||
attributes:
|
||||
label: Installation method
|
||||
options:
|
||||
- Docker
|
||||
- Binary
|
||||
- Nix
|
||||
- Unraid
|
||||
- Coolify
|
||||
- Other (please describe above)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: config
|
||||
attributes:
|
||||
label: Configuration
|
||||
description: Please provide any relevant service configuration
|
||||
render: yaml
|
||||
- type: textarea
|
||||
id: hub-logs
|
||||
attributes:
|
||||
label: Hub Logs
|
||||
description: Check the logs page in PocketBase (`/_/#/logs`) for relevant errors (copy JSON).
|
||||
render: json
|
||||
- type: textarea
|
||||
id: agent-logs
|
||||
attributes:
|
||||
label: Agent Logs
|
||||
description: Please provide any logs from the agent, if relevant. Use `LOG_LEVEL=debug` for more info.
|
||||
render: shell
|
||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: 💬 Support and questions
|
||||
url: https://github.com/henrygd/beszel/discussions
|
||||
about: Ask and answer questions here.
|
||||
- name: ℹ️ View the Common Issues page
|
||||
url: https://beszel.dev/guide/common-issues
|
||||
about: Find information about commonly encountered problems.
|
||||
76
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
76
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
name: 🚀 Feature request
|
||||
description: Request a new feature or change.
|
||||
title: "[Feature]: "
|
||||
labels: ["enhancement", "needs review"]
|
||||
body:
|
||||
- type: dropdown
|
||||
id: component
|
||||
attributes:
|
||||
label: Component
|
||||
description: Which part of Beszel is this about?
|
||||
options:
|
||||
- Hub
|
||||
- Agent
|
||||
- Hub & Agent
|
||||
validations:
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: Before submitting, please search existing [issues](https://github.com/henrygd/beszel/issues) and [discussions](https://github.com/henrygd/beszel/discussions) (including closed).
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe the feature you would like to see
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: motivation
|
||||
attributes:
|
||||
label: Motivation / Use Case
|
||||
description: Why do you want this feature? What problem does it solve?
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe how you would like to see this feature implemented
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Screenshots
|
||||
description: Please attach any relevant screenshots, such as images from your current solution or similar implementations.
|
||||
validations:
|
||||
required: false
|
||||
- type: dropdown
|
||||
id: category
|
||||
attributes:
|
||||
label: Category
|
||||
description: Which category does this relate to most?
|
||||
options:
|
||||
- Metrics
|
||||
- Charts & Visualization
|
||||
- Settings & Configuration
|
||||
- Notifications & Alerts
|
||||
- Authentication
|
||||
- Installation
|
||||
- Performance
|
||||
- UI / UX
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: metrics
|
||||
attributes:
|
||||
label: Affected Metrics
|
||||
description: If applicable, which specific metric does this relate to most?
|
||||
options:
|
||||
- CPU
|
||||
- Memory
|
||||
- Storage
|
||||
- Network
|
||||
- Containers
|
||||
- GPU
|
||||
- Sensors
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
1
.github/funding.yml
vendored
Normal file
1
.github/funding.yml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
buy_me_a_coffee: henrygd
|
||||
33
.github/pull_request_template.md
vendored
Normal file
33
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
## 📃 Description
|
||||
|
||||
A short description of the pull request changes should go here and the sections below should list in detail all changes. You can remove the sections you don't need.
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
Add a link to the PR for [documentation](https://github.com/henrygd/beszel-docs) changes.
|
||||
|
||||
## 🪵 Changelog
|
||||
|
||||
### ➕ Added
|
||||
|
||||
- one
|
||||
- two
|
||||
|
||||
### ✏️ Changed
|
||||
|
||||
- one
|
||||
- two
|
||||
|
||||
### 🔧 Fixed
|
||||
|
||||
- one
|
||||
- two
|
||||
|
||||
### 🗑️ Removed
|
||||
|
||||
- one
|
||||
- two
|
||||
|
||||
## 📷 Screenshots
|
||||
|
||||
If this PR has any UI/UX changes it's strongly suggested you add screenshots here.
|
||||
161
.github/workflows/docker-images.yml
vendored
161
.github/workflows/docker-images.yml
vendored
@@ -3,7 +3,7 @@ name: Make docker images
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
- "v*"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -12,12 +12,138 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
# henrygd/beszel
|
||||
- image: henrygd/beszel
|
||||
context: ./beszel
|
||||
dockerfile: ./beszel/dockerfile_Hub
|
||||
dockerfile: ./internal/dockerfile_hub
|
||||
registry: docker.io
|
||||
username_secret: DOCKERHUB_USERNAME
|
||||
password_secret: DOCKERHUB_TOKEN
|
||||
tags: |
|
||||
type=raw,value=edge
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||
|
||||
# henrygd/beszel-agent
|
||||
- image: henrygd/beszel-agent
|
||||
context: ./beszel
|
||||
dockerfile: ./beszel/dockerfile_Agent
|
||||
dockerfile: ./internal/dockerfile_agent
|
||||
registry: docker.io
|
||||
username_secret: DOCKERHUB_USERNAME
|
||||
password_secret: DOCKERHUB_TOKEN
|
||||
tags: |
|
||||
type=raw,value=edge
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||
|
||||
# henrygd/beszel-agent-nvidia
|
||||
- image: henrygd/beszel-agent-nvidia
|
||||
dockerfile: ./internal/dockerfile_agent_nvidia
|
||||
platforms: linux/amd64
|
||||
registry: docker.io
|
||||
username_secret: DOCKERHUB_USERNAME
|
||||
password_secret: DOCKERHUB_TOKEN
|
||||
tags: |
|
||||
type=raw,value=edge
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||
|
||||
# henrygd/beszel-agent-intel
|
||||
- image: henrygd/beszel-agent-intel
|
||||
dockerfile: ./internal/dockerfile_agent_intel
|
||||
platforms: linux/amd64
|
||||
registry: docker.io
|
||||
username_secret: DOCKERHUB_USERNAME
|
||||
password_secret: DOCKERHUB_TOKEN
|
||||
tags: |
|
||||
type=raw,value=edge
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||
|
||||
# henrygd/beszel-agent:alpine
|
||||
- image: henrygd/beszel-agent
|
||||
dockerfile: ./internal/dockerfile_agent_alpine
|
||||
registry: docker.io
|
||||
username_secret: DOCKERHUB_USERNAME
|
||||
password_secret: DOCKERHUB_TOKEN
|
||||
tags: |
|
||||
type=raw,value=alpine
|
||||
type=semver,pattern={{version}}-alpine
|
||||
type=semver,pattern={{major}}.{{minor}}-alpine
|
||||
type=semver,pattern={{major}}-alpine
|
||||
|
||||
# ghcr.io/henrygd/beszel
|
||||
- image: ghcr.io/${{ github.repository }}/beszel
|
||||
dockerfile: ./internal/dockerfile_hub
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password_secret: GITHUB_TOKEN
|
||||
tags: |
|
||||
type=raw,value=edge
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||
|
||||
# ghcr.io/henrygd/beszel-agent
|
||||
- image: ghcr.io/${{ github.repository }}/beszel-agent
|
||||
dockerfile: ./internal/dockerfile_agent
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password_secret: GITHUB_TOKEN
|
||||
tags: |
|
||||
type=raw,value=edge
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||
|
||||
# ghcr.io/henrygd/beszel-agent-nvidia
|
||||
- image: ghcr.io/${{ github.repository }}/beszel-agent-nvidia
|
||||
dockerfile: ./internal/dockerfile_agent_nvidia
|
||||
platforms: linux/amd64
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password_secret: GITHUB_TOKEN
|
||||
tags: |
|
||||
type=raw,value=edge
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||
|
||||
# ghcr.io/henrygd/beszel-agent-intel
|
||||
- image: ghcr.io/${{ github.repository }}/beszel-agent-intel
|
||||
dockerfile: ./internal/dockerfile_agent_intel
|
||||
platforms: linux/amd64
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password_secret: GITHUB_TOKEN
|
||||
tags: |
|
||||
type=raw,value=edge
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||
|
||||
# ghcr.io/henrygd/beszel-agent:alpine
|
||||
- image: ghcr.io/${{ github.repository }}/beszel-agent
|
||||
dockerfile: ./internal/dockerfile_agent_alpine
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password_secret: GITHUB_TOKEN
|
||||
tags: |
|
||||
type=raw,value=alpine
|
||||
type=semver,pattern={{version}}-alpine
|
||||
type=semver,pattern={{major}}.{{minor}}-alpine
|
||||
type=semver,pattern={{major}}-alpine
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
@@ -30,10 +156,10 @@ jobs:
|
||||
uses: oven-sh/setup-bun@v2
|
||||
|
||||
- name: Install dependencies
|
||||
run: bun install --no-save --cwd ./beszel/site
|
||||
run: bun install --no-save --cwd ./internal/site
|
||||
|
||||
- name: Build site
|
||||
run: bun run --cwd ./beszel/site build
|
||||
run: bun run --cwd ./internal/site build
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
@@ -46,28 +172,27 @@ jobs:
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ matrix.image }}
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||
tags: ${{ matrix.tags }}
|
||||
|
||||
# https://github.com/docker/login-action
|
||||
- name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
env:
|
||||
password_secret_exists: ${{ secrets[matrix.password_secret] != '' && 'true' || 'false' }}
|
||||
if: github.event_name != 'pull_request' && env.password_secret_exists == 'true'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
username: ${{ matrix.username || secrets[matrix.username_secret] }}
|
||||
password: ${{ secrets[matrix.password_secret] }}
|
||||
registry: ${{ matrix.registry }}
|
||||
|
||||
# Build and push Docker image with Buildx (don't push on PR)
|
||||
# https://github.com/docker/build-push-action
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: '${{ matrix.context }}'
|
||||
context: ./
|
||||
file: ${{ matrix.dockerfile }}
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
push: ${{ github.ref_type == 'tag' }}
|
||||
platforms: ${{ matrix.platforms || 'linux/amd64,linux/arm64,linux/arm/v7' }}
|
||||
push: ${{ github.ref_type == 'tag' && secrets[matrix.password_secret] != '' }}
|
||||
tags: ${{ steps.metadata.outputs.tags }}
|
||||
labels: ${{ steps.metadata.outputs.labels }}
|
||||
|
||||
43
.github/workflows/inactivity-actions.yml
vendored
Normal file
43
.github/workflows/inactivity-actions.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: 'Issue and PR Maintenance'
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *' # runs at midnight UTC
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
close-stale:
|
||||
name: Close Stale Issues
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Close Stale Issues
|
||||
uses: actions/stale@v9
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Messaging
|
||||
stale-issue-message: >
|
||||
👋 This issue has been automatically marked as stale due to inactivity.
|
||||
If this issue is still relevant, please comment to keep it open.
|
||||
Without activity, it will be closed in 7 days.
|
||||
|
||||
close-issue-message: >
|
||||
🔒 This issue has been automatically closed due to prolonged inactivity.
|
||||
Feel free to open a new issue if you have further questions or concerns.
|
||||
|
||||
# Timing
|
||||
days-before-issue-stale: 14
|
||||
days-before-issue-close: 7
|
||||
|
||||
# Labels
|
||||
stale-issue-label: 'stale'
|
||||
remove-stale-when-updated: true
|
||||
only-issue-labels: 'awaiting-requester'
|
||||
|
||||
# Exemptions
|
||||
exempt-assignees: true
|
||||
exempt-milestones: true
|
||||
82
.github/workflows/label-from-dropdown.yml
vendored
Normal file
82
.github/workflows/label-from-dropdown.yml
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
name: Label issues from dropdowns
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
label_from_dropdown:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
steps:
|
||||
- name: Apply labels based on dropdown choices
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
|
||||
const issueNumber = context.issue.number;
|
||||
const owner = context.repo.owner;
|
||||
const repo = context.repo.repo;
|
||||
|
||||
// Get the issue body
|
||||
const body = context.payload.issue.body;
|
||||
|
||||
// Helper to find dropdown value in the body (assuming markdown format)
|
||||
function extractSectionValue(heading) {
|
||||
const regex = new RegExp(`### ${heading}\\s+([\\s\\S]*?)(?:\\n###|$)`, 'i');
|
||||
const match = body.match(regex);
|
||||
if (match) {
|
||||
// Get the first non-empty line after the heading
|
||||
const lines = match[1].split('\n').map(l => l.trim()).filter(Boolean);
|
||||
return lines[0] || null;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// Extract dropdown selections
|
||||
const category = extractSectionValue('Category');
|
||||
const metrics = extractSectionValue('Affected Metrics');
|
||||
const component = extractSectionValue('Component');
|
||||
|
||||
// Build labels to add
|
||||
let labelsToAdd = [];
|
||||
if (category) labelsToAdd.push(category);
|
||||
if (metrics) labelsToAdd.push(metrics);
|
||||
if (component) labelsToAdd.push(component);
|
||||
|
||||
// Get existing labels in the repo
|
||||
const { data: existingLabels } = await github.rest.issues.listLabelsForRepo({
|
||||
owner,
|
||||
repo,
|
||||
per_page: 100
|
||||
});
|
||||
const existingLabelNames = existingLabels.map(l => l.name);
|
||||
|
||||
// Find labels that need to be created
|
||||
const labelsToCreate = labelsToAdd.filter(label => !existingLabelNames.includes(label));
|
||||
|
||||
// Create missing labels (with a default color)
|
||||
for (const label of labelsToCreate) {
|
||||
try {
|
||||
await github.rest.issues.createLabel({
|
||||
owner,
|
||||
repo,
|
||||
name: label,
|
||||
color: 'ededed' // light gray, you can pick any hex color
|
||||
});
|
||||
} catch (e) {
|
||||
// Ignore if label already exists (race condition), otherwise rethrow
|
||||
if (!e || e.status !== 422) throw e;
|
||||
}
|
||||
}
|
||||
|
||||
// Now apply all labels (they all exist now)
|
||||
if (labelsToAdd.length > 0) {
|
||||
await github.rest.issues.addLabels({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issueNumber,
|
||||
labels: labelsToAdd
|
||||
});
|
||||
}
|
||||
24
.github/workflows/release.yml
vendored
24
.github/workflows/release.yml
vendored
@@ -3,7 +3,7 @@ name: Make release and binaries
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
- "v*"
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -21,22 +21,34 @@ jobs:
|
||||
uses: oven-sh/setup-bun@v2
|
||||
|
||||
- name: Install dependencies
|
||||
run: bun install --no-save --cwd ./beszel/site
|
||||
run: bun install --no-save --cwd ./internal/site
|
||||
|
||||
- name: Build site
|
||||
run: bun run --cwd ./beszel/site build
|
||||
run: bun run --cwd ./internal/site build
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '^1.22.1'
|
||||
go-version: "^1.22.1"
|
||||
|
||||
- name: Set up .NET
|
||||
uses: actions/setup-dotnet@v4
|
||||
with:
|
||||
dotnet-version: "9.0.x"
|
||||
|
||||
- name: Build .NET LHM executable for Windows sensors
|
||||
run: |
|
||||
dotnet build -c Release ./agent/lhm/beszel_lhm.csproj
|
||||
shell: bash
|
||||
|
||||
- name: GoReleaser beszel
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
workdir: ./beszel
|
||||
workdir: ./
|
||||
distribution: goreleaser
|
||||
version: latest
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.TOKEN || secrets.GITHUB_TOKEN }}
|
||||
WINGET_TOKEN: ${{ secrets.WINGET_TOKEN }}
|
||||
IS_FORK: ${{ github.repository_owner != 'henrygd' }}
|
||||
|
||||
33
.github/workflows/vulncheck.yml
vendored
Normal file
33
.github/workflows/vulncheck.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
# https://github.com/minio/minio/blob/master/.github/workflows/vulncheck.yml
|
||||
|
||||
name: VulnCheck
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
vulncheck:
|
||||
name: VulnCheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.25.x
|
||||
# cached: false
|
||||
- name: Get official govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
shell: bash
|
||||
- name: Run govulncheck
|
||||
run: govulncheck -show verbose ./...
|
||||
shell: bash
|
||||
13
.gitignore
vendored
13
.gitignore
vendored
@@ -8,8 +8,15 @@ beszel_data
|
||||
beszel_data*
|
||||
dist
|
||||
*.exe
|
||||
beszel/cmd/hub/hub
|
||||
beszel/cmd/agent/agent
|
||||
internal/cmd/hub/hub
|
||||
internal/cmd/agent/agent
|
||||
node_modules
|
||||
beszel/build
|
||||
build
|
||||
*timestamp*
|
||||
.swc
|
||||
internal/site/src/locales/**/*.ts
|
||||
*.bak
|
||||
__debug_*
|
||||
agent/lhm/obj
|
||||
agent/lhm/bin
|
||||
dockerfile_agent_dev
|
||||
|
||||
238
.goreleaser.yml
Normal file
238
.goreleaser.yml
Normal file
@@ -0,0 +1,238 @@
|
||||
version: 2
|
||||
|
||||
project_name: beszel
|
||||
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
- go generate -run fetchsmartctl ./agent
|
||||
|
||||
builds:
|
||||
- id: beszel
|
||||
binary: beszel
|
||||
main: internal/cmd/hub/hub.go
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
- arm
|
||||
|
||||
- id: beszel-agent
|
||||
binary: beszel-agent
|
||||
main: internal/cmd/agent/agent.go
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
- freebsd
|
||||
- openbsd
|
||||
- windows
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
- arm
|
||||
- mips64
|
||||
- riscv64
|
||||
- mipsle
|
||||
- mips
|
||||
- ppc64le
|
||||
gomips:
|
||||
- hardfloat
|
||||
- softfloat
|
||||
ignore:
|
||||
- goos: freebsd
|
||||
goarch: arm
|
||||
- goos: openbsd
|
||||
goarch: arm
|
||||
- goos: linux
|
||||
goarch: mips64
|
||||
gomips: softfloat
|
||||
- goos: linux
|
||||
goarch: mipsle
|
||||
gomips: hardfloat
|
||||
- goos: linux
|
||||
goarch: mips
|
||||
gomips: hardfloat
|
||||
- goos: windows
|
||||
goarch: arm
|
||||
- goos: darwin
|
||||
goarch: riscv64
|
||||
- goos: windows
|
||||
goarch: riscv64
|
||||
|
||||
archives:
|
||||
- id: beszel-agent
|
||||
formats: [tar.gz]
|
||||
ids:
|
||||
- beszel-agent
|
||||
name_template: >-
|
||||
{{ .Binary }}_
|
||||
{{- .Os }}_
|
||||
{{- .Arch }}
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
formats: [zip]
|
||||
|
||||
- id: beszel
|
||||
formats: [tar.gz]
|
||||
ids:
|
||||
- beszel
|
||||
name_template: >-
|
||||
{{ .Binary }}_
|
||||
{{- .Os }}_
|
||||
{{- .Arch }}
|
||||
|
||||
nfpms:
|
||||
- id: beszel-agent
|
||||
package_name: beszel-agent
|
||||
description: |-
|
||||
Agent for Beszel
|
||||
Beszel is a lightweight server monitoring platform that includes Docker
|
||||
statistics, historical data, and alert functions. It has a friendly web
|
||||
interface, simple configuration, and is ready to use out of the box.
|
||||
It supports automatic backup, multi-user, OAuth authentication, and
|
||||
API access.
|
||||
maintainer: henrygd <hank@henrygd.me>
|
||||
section: net
|
||||
ids:
|
||||
- beszel-agent
|
||||
formats:
|
||||
- deb
|
||||
contents:
|
||||
- src: ./supplemental/debian/beszel-agent.service
|
||||
dst: lib/systemd/system/beszel-agent.service
|
||||
packager: deb
|
||||
- src: ./supplemental/debian/copyright
|
||||
dst: usr/share/doc/beszel-agent/copyright
|
||||
packager: deb
|
||||
- src: ./supplemental/debian/lintian-overrides
|
||||
dst: usr/share/lintian/overrides/beszel-agent
|
||||
packager: deb
|
||||
scripts:
|
||||
postinstall: ./supplemental/debian/postinstall.sh
|
||||
preremove: ./supplemental/debian/prerm.sh
|
||||
postremove: ./supplemental/debian/postrm.sh
|
||||
deb:
|
||||
predepends:
|
||||
- adduser
|
||||
- debconf
|
||||
scripts:
|
||||
templates: ./supplemental/debian/templates
|
||||
# Currently broken due to a bug in goreleaser
|
||||
# https://github.com/goreleaser/goreleaser/issues/5487
|
||||
#config: ./supplemental/debian/config.sh
|
||||
|
||||
scoops:
|
||||
- ids: [beszel-agent]
|
||||
name: beszel-agent
|
||||
repository:
|
||||
owner: henrygd
|
||||
name: beszel-scoops
|
||||
homepage: "https://beszel.dev"
|
||||
description: "Agent for Beszel, a lightweight server monitoring platform."
|
||||
license: MIT
|
||||
skip_upload: '{{ if eq (tolower .Env.IS_FORK) "true" }}true{{ else }}auto{{ end }}'
|
||||
|
||||
# # Needs choco installed, so doesn't build on linux / default gh workflow :(
|
||||
# chocolateys:
|
||||
# - title: Beszel Agent
|
||||
# ids: [beszel-agent]
|
||||
# package_source_url: https://github.com/henrygd/beszel-chocolatey
|
||||
# owners: henrygd
|
||||
# authors: henrygd
|
||||
# summary: 'Agent for Beszel, a lightweight server monitoring platform.'
|
||||
# description: |
|
||||
# Beszel is a lightweight server monitoring platform that includes Docker statistics, historical data, and alert functions.
|
||||
|
||||
# It has a friendly web interface, simple configuration, and is ready to use out of the box. It supports automatic backup, multi-user, OAuth authentication, and API access.
|
||||
# license_url: https://github.com/henrygd/beszel/blob/main/LICENSE
|
||||
# project_url: https://beszel.dev
|
||||
# project_source_url: https://github.com/henrygd/beszel
|
||||
# docs_url: https://beszel.dev/guide/getting-started
|
||||
# icon_url: https://cdn.jsdelivr.net/gh/selfhst/icons/png/beszel.png
|
||||
# bug_tracker_url: https://github.com/henrygd/beszel/issues
|
||||
# copyright: 2025 henrygd
|
||||
# tags: foss cross-platform admin monitoring
|
||||
# require_license_acceptance: false
|
||||
# release_notes: 'https://github.com/henrygd/beszel/releases/tag/v{{ .Version }}'
|
||||
|
||||
brews:
|
||||
- ids: [beszel-agent]
|
||||
name: beszel-agent
|
||||
repository:
|
||||
owner: henrygd
|
||||
name: homebrew-beszel
|
||||
homepage: "https://beszel.dev"
|
||||
description: "Agent for Beszel, a lightweight server monitoring platform."
|
||||
license: MIT
|
||||
skip_upload: '{{ if eq (tolower .Env.IS_FORK) "true" }}true{{ else }}auto{{ end }}'
|
||||
extra_install: |
|
||||
(bin/"beszel-agent-launcher").write <<~EOS
|
||||
#!/bin/bash
|
||||
set -a
|
||||
if [ -f "$HOME/.config/beszel/beszel-agent.env" ]; then
|
||||
source "$HOME/.config/beszel/beszel-agent.env"
|
||||
fi
|
||||
set +a
|
||||
exec #{bin}/beszel-agent "$@"
|
||||
EOS
|
||||
(bin/"beszel-agent-launcher").chmod 0755
|
||||
service: |
|
||||
run ["#{bin}/beszel-agent-launcher"]
|
||||
log_path "#{Dir.home}/.cache/beszel/beszel-agent.log"
|
||||
error_log_path "#{Dir.home}/.cache/beszel/beszel-agent.log"
|
||||
keep_alive true
|
||||
restart_delay 5
|
||||
process_type :background
|
||||
|
||||
winget:
|
||||
- ids: [beszel-agent]
|
||||
name: beszel-agent
|
||||
package_identifier: henrygd.beszel-agent
|
||||
publisher: henrygd
|
||||
license: MIT
|
||||
license_url: "https://github.com/henrygd/beszel/blob/main/LICENSE"
|
||||
copyright: "2025 henrygd"
|
||||
homepage: "https://beszel.dev"
|
||||
release_notes_url: "https://github.com/henrygd/beszel/releases/tag/v{{ .Version }}"
|
||||
publisher_support_url: "https://github.com/henrygd/beszel/issues"
|
||||
short_description: "Agent for Beszel, a lightweight server monitoring platform."
|
||||
skip_upload: '{{ if eq (tolower .Env.IS_FORK) "true" }}true{{ else }}auto{{ end }}'
|
||||
description: |
|
||||
Beszel is a lightweight server monitoring platform that includes Docker
|
||||
statistics, historical data, and alert functions. It has a friendly web
|
||||
interface, simple configuration, and is ready to use out of the box.
|
||||
It supports automatic backup, multi-user, OAuth authentication, and
|
||||
API access.
|
||||
tags:
|
||||
- homelab
|
||||
- monitoring
|
||||
- self-hosted
|
||||
repository:
|
||||
owner: henrygd
|
||||
name: beszel-winget
|
||||
branch: henrygd.beszel-agent-{{ .Version }}
|
||||
token: "{{ .Env.WINGET_TOKEN }}"
|
||||
# pull_request:
|
||||
# enabled: true
|
||||
# draft: false
|
||||
# base:
|
||||
# owner: microsoft
|
||||
# name: winget-pkgs
|
||||
# branch: master
|
||||
|
||||
release:
|
||||
draft: true
|
||||
|
||||
changelog:
|
||||
disable: true
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- "^docs:"
|
||||
- "^test:"
|
||||
108
Makefile
Normal file
108
Makefile
Normal file
@@ -0,0 +1,108 @@
|
||||
# Default OS/ARCH values
|
||||
OS ?= $(shell go env GOOS)
|
||||
ARCH ?= $(shell go env GOARCH)
|
||||
# Skip building the web UI if true
|
||||
SKIP_WEB ?= false
|
||||
|
||||
# Set executable extension based on target OS
|
||||
EXE_EXT := $(if $(filter windows,$(OS)),.exe,)
|
||||
|
||||
.PHONY: tidy build-agent build-hub build-hub-dev build clean lint dev-server dev-agent dev-hub dev generate-locales fetch-smartctl-conditional
|
||||
.DEFAULT_GOAL := build
|
||||
|
||||
clean:
|
||||
go clean
|
||||
rm -rf ./build
|
||||
|
||||
lint:
|
||||
golangci-lint run
|
||||
|
||||
test: export GOEXPERIMENT=synctest
|
||||
test:
|
||||
go test -tags=testing ./...
|
||||
|
||||
tidy:
|
||||
go mod tidy
|
||||
|
||||
build-web-ui:
|
||||
@if command -v bun >/dev/null 2>&1; then \
|
||||
bun install --cwd ./internal/site && \
|
||||
bun run --cwd ./internal/site build; \
|
||||
else \
|
||||
npm install --prefix ./internal/site && \
|
||||
npm run --prefix ./internal/site build; \
|
||||
fi
|
||||
|
||||
# Conditional .NET build - only for Windows
|
||||
build-dotnet-conditional:
|
||||
@if [ "$(OS)" = "windows" ]; then \
|
||||
echo "Building .NET executable for Windows..."; \
|
||||
if command -v dotnet >/dev/null 2>&1; then \
|
||||
rm -rf ./agent/lhm/bin; \
|
||||
dotnet build -c Release ./agent/lhm/beszel_lhm.csproj; \
|
||||
else \
|
||||
echo "Error: dotnet not found. Install .NET SDK to build Windows agent."; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
# Download smartctl.exe at build time for Windows (skips if already present)
|
||||
fetch-smartctl-conditional:
|
||||
@if [ "$(OS)" = "windows" ]; then \
|
||||
go generate -run fetchsmartctl ./agent; \
|
||||
fi
|
||||
|
||||
# Update build-agent to include conditional .NET build
|
||||
build-agent: tidy build-dotnet-conditional fetch-smartctl-conditional
|
||||
GOOS=$(OS) GOARCH=$(ARCH) go build -o ./build/beszel-agent_$(OS)_$(ARCH)$(EXE_EXT) -ldflags "-w -s" ./internal/cmd/agent
|
||||
|
||||
build-hub: tidy $(if $(filter false,$(SKIP_WEB)),build-web-ui)
|
||||
GOOS=$(OS) GOARCH=$(ARCH) go build -o ./build/beszel_$(OS)_$(ARCH)$(EXE_EXT) -ldflags "-w -s" ./internal/cmd/hub
|
||||
|
||||
build-hub-dev: tidy
|
||||
mkdir -p ./internal/site/dist && touch ./internal/site/dist/index.html
|
||||
GOOS=$(OS) GOARCH=$(ARCH) go build -tags development -o ./build/beszel-dev_$(OS)_$(ARCH)$(EXE_EXT) -ldflags "-w -s" ./internal/cmd/hub
|
||||
|
||||
build: build-agent build-hub
|
||||
|
||||
generate-locales:
|
||||
@if [ ! -f ./internal/site/src/locales/en/en.ts ]; then \
|
||||
echo "Generating locales..."; \
|
||||
command -v bun >/dev/null 2>&1 && cd ./internal/site && bun install && bun run sync || cd ./internal/site && npm install && npm run sync; \
|
||||
fi
|
||||
|
||||
dev-server: generate-locales
|
||||
cd ./internal/site
|
||||
@if command -v bun >/dev/null 2>&1; then \
|
||||
cd ./internal/site && bun run dev --host 0.0.0.0; \
|
||||
else \
|
||||
cd ./internal/site && npm run dev --host 0.0.0.0; \
|
||||
fi
|
||||
|
||||
dev-hub: export ENV=dev
|
||||
dev-hub:
|
||||
mkdir -p ./internal/site/dist && touch ./internal/site/dist/index.html
|
||||
@if command -v entr >/dev/null 2>&1; then \
|
||||
find ./internal -type f -name '*.go' | entr -r -s "cd ./internal/cmd/hub && go run -tags development . serve --http 0.0.0.0:8090"; \
|
||||
else \
|
||||
cd ./internal/cmd/hub && go run -tags development . serve --http 0.0.0.0:8090; \
|
||||
fi
|
||||
|
||||
dev-agent:
|
||||
@if command -v entr >/dev/null 2>&1; then \
|
||||
find ./internal/cmd/agent/*.go ./agent/*.go | entr -r go run github.com/henrygd/beszel/internal/cmd/agent; \
|
||||
else \
|
||||
go run github.com/henrygd/beszel/internal/cmd/agent; \
|
||||
fi
|
||||
|
||||
build-dotnet:
|
||||
@if command -v dotnet >/dev/null 2>&1; then \
|
||||
rm -rf ./agent/lhm/bin; \
|
||||
dotnet build -c Release ./agent/lhm/beszel_lhm.csproj; \
|
||||
else \
|
||||
echo "dotnet not found"; \
|
||||
fi
|
||||
|
||||
|
||||
# KEY="..." make -j dev
|
||||
dev: dev-server dev-hub dev-agent
|
||||
@@ -2,8 +2,6 @@
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you find a vulnerability in the latest version, please email me directly at hank@henrygd.me, or [submit a private advisory](https://github.com/henrygd/beszel/security/advisories/new).
|
||||
If you find a vulnerability in the latest version, please [submit a private advisory](https://github.com/henrygd/beszel/security/advisories/new).
|
||||
|
||||
If you submit an advisory, open an empty issue as well to let me know that you did (or email me), as I'm not sure if I get notifications for that.
|
||||
|
||||
If the issue is low severity (use best judgement) you may open an issue for it instead of contacting me directly.
|
||||
If it's low severity (use best judgement) you may open an issue instead of an advisory.
|
||||
|
||||
224
agent/agent.go
Normal file
224
agent/agent.go
Normal file
@@ -0,0 +1,224 @@
|
||||
// Package agent implements the Beszel monitoring agent that collects and serves system metrics.
|
||||
//
|
||||
// The agent runs on monitored systems and communicates collected data
|
||||
// to the Beszel hub for centralized monitoring and alerting.
|
||||
package agent
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/gliderlabs/ssh"
|
||||
"github.com/henrygd/beszel"
|
||||
"github.com/henrygd/beszel/agent/deltatracker"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/shirou/gopsutil/v4/host"
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
type Agent struct {
|
||||
sync.Mutex // Used to lock agent while collecting data
|
||||
debug bool // true if LOG_LEVEL is set to debug
|
||||
zfs bool // true if system has arcstats
|
||||
memCalc string // Memory calculation formula
|
||||
fsNames []string // List of filesystem device names being monitored
|
||||
fsStats map[string]*system.FsStats // Keeps track of disk stats for each filesystem
|
||||
diskPrev map[uint16]map[string]prevDisk // Previous disk I/O counters per cache interval
|
||||
netInterfaces map[string]struct{} // Stores all valid network interfaces
|
||||
netIoStats map[uint16]system.NetIoStats // Keeps track of bandwidth usage per cache interval
|
||||
netInterfaceDeltaTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64] // Per-cache-time NIC delta trackers
|
||||
dockerManager *dockerManager // Manages Docker API requests
|
||||
sensorConfig *SensorConfig // Sensors config
|
||||
systemInfo system.Info // Host system info
|
||||
gpuManager *GPUManager // Manages GPU data
|
||||
cache *systemDataCache // Cache for system stats based on cache time
|
||||
connectionManager *ConnectionManager // Channel to signal connection events
|
||||
handlerRegistry *HandlerRegistry // Registry for routing incoming messages
|
||||
server *ssh.Server // SSH server
|
||||
dataDir string // Directory for persisting data
|
||||
keys []gossh.PublicKey // SSH public keys
|
||||
smartManager *SmartManager // Manages SMART data
|
||||
systemdManager *systemdManager // Manages systemd services
|
||||
}
|
||||
|
||||
// NewAgent creates a new agent with the given data directory for persisting data.
|
||||
// If the data directory is not set, it will attempt to find the optimal directory.
|
||||
func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
||||
agent = &Agent{
|
||||
fsStats: make(map[string]*system.FsStats),
|
||||
cache: NewSystemDataCache(),
|
||||
}
|
||||
|
||||
// Initialize disk I/O previous counters storage
|
||||
agent.diskPrev = make(map[uint16]map[string]prevDisk)
|
||||
// Initialize per-cache-time network tracking structures
|
||||
agent.netIoStats = make(map[uint16]system.NetIoStats)
|
||||
agent.netInterfaceDeltaTrackers = make(map[uint16]*deltatracker.DeltaTracker[string, uint64])
|
||||
|
||||
agent.dataDir, err = getDataDir(dataDir...)
|
||||
if err != nil {
|
||||
slog.Warn("Data directory not found")
|
||||
} else {
|
||||
slog.Info("Data directory", "path", agent.dataDir)
|
||||
}
|
||||
|
||||
agent.memCalc, _ = GetEnv("MEM_CALC")
|
||||
agent.sensorConfig = agent.newSensorConfig()
|
||||
// Set up slog with a log level determined by the LOG_LEVEL env var
|
||||
if logLevelStr, exists := GetEnv("LOG_LEVEL"); exists {
|
||||
switch strings.ToLower(logLevelStr) {
|
||||
case "debug":
|
||||
agent.debug = true
|
||||
slog.SetLogLoggerLevel(slog.LevelDebug)
|
||||
case "warn":
|
||||
slog.SetLogLoggerLevel(slog.LevelWarn)
|
||||
case "error":
|
||||
slog.SetLogLoggerLevel(slog.LevelError)
|
||||
}
|
||||
}
|
||||
|
||||
slog.Debug(beszel.Version)
|
||||
|
||||
// initialize system info
|
||||
agent.initializeSystemInfo()
|
||||
|
||||
// initialize connection manager
|
||||
agent.connectionManager = newConnectionManager(agent)
|
||||
|
||||
// initialize handler registry
|
||||
agent.handlerRegistry = NewHandlerRegistry()
|
||||
|
||||
// initialize disk info
|
||||
agent.initializeDiskInfo()
|
||||
|
||||
// initialize net io stats
|
||||
agent.initializeNetIoStats()
|
||||
|
||||
// initialize docker manager
|
||||
agent.dockerManager = newDockerManager(agent)
|
||||
|
||||
agent.systemdManager, err = newSystemdManager()
|
||||
if err != nil {
|
||||
slog.Debug("Systemd", "err", err)
|
||||
}
|
||||
|
||||
agent.smartManager, err = NewSmartManager()
|
||||
if err != nil {
|
||||
slog.Debug("SMART", "err", err)
|
||||
}
|
||||
|
||||
// initialize GPU manager
|
||||
agent.gpuManager, err = NewGPUManager()
|
||||
if err != nil {
|
||||
slog.Debug("GPU", "err", err)
|
||||
}
|
||||
|
||||
// if debugging, print stats
|
||||
if agent.debug {
|
||||
slog.Debug("Stats", "data", agent.gatherStats(0))
|
||||
}
|
||||
|
||||
return agent, nil
|
||||
}
|
||||
|
||||
// GetEnv retrieves an environment variable with a "BESZEL_AGENT_" prefix, or falls back to the unprefixed key.
|
||||
func GetEnv(key string) (value string, exists bool) {
|
||||
if value, exists = os.LookupEnv("BESZEL_AGENT_" + key); exists {
|
||||
return value, exists
|
||||
}
|
||||
// Fallback to the old unprefixed key
|
||||
return os.LookupEnv(key)
|
||||
}
|
||||
|
||||
func (a *Agent) gatherStats(cacheTimeMs uint16) *system.CombinedData {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
|
||||
data, isCached := a.cache.Get(cacheTimeMs)
|
||||
if isCached {
|
||||
slog.Debug("Cached data", "cacheTimeMs", cacheTimeMs)
|
||||
return data
|
||||
}
|
||||
|
||||
*data = system.CombinedData{
|
||||
Stats: a.getSystemStats(cacheTimeMs),
|
||||
Info: a.systemInfo,
|
||||
}
|
||||
// slog.Info("System data", "data", data, "cacheTimeMs", cacheTimeMs)
|
||||
|
||||
if a.dockerManager != nil {
|
||||
if containerStats, err := a.dockerManager.getDockerStats(cacheTimeMs); err == nil {
|
||||
data.Containers = containerStats
|
||||
slog.Debug("Containers", "data", data.Containers)
|
||||
} else {
|
||||
slog.Debug("Containers", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// skip updating systemd services if cache time is not the default 60sec interval
|
||||
if a.systemdManager != nil && cacheTimeMs == 60_000 && a.systemdManager.hasFreshStats {
|
||||
data.SystemdServices = a.systemdManager.getServiceStats(nil, false)
|
||||
}
|
||||
|
||||
data.Stats.ExtraFs = make(map[string]*system.FsStats)
|
||||
data.Info.ExtraFsPct = make(map[string]float64)
|
||||
for name, stats := range a.fsStats {
|
||||
if !stats.Root && stats.DiskTotal > 0 {
|
||||
// Use custom name if available, otherwise use device name
|
||||
key := name
|
||||
if stats.Name != "" {
|
||||
key = stats.Name
|
||||
}
|
||||
data.Stats.ExtraFs[key] = stats
|
||||
// Add percentage info to Info struct for dashboard
|
||||
if stats.DiskTotal > 0 {
|
||||
pct := twoDecimals((stats.DiskUsed / stats.DiskTotal) * 100)
|
||||
data.Info.ExtraFsPct[key] = pct
|
||||
}
|
||||
}
|
||||
}
|
||||
slog.Debug("Extra FS", "data", data.Stats.ExtraFs)
|
||||
|
||||
a.cache.Set(data, cacheTimeMs)
|
||||
return data
|
||||
}
|
||||
|
||||
// StartAgent initializes and starts the agent with optional WebSocket connection
|
||||
func (a *Agent) Start(serverOptions ServerOptions) error {
|
||||
a.keys = serverOptions.Keys
|
||||
return a.connectionManager.Start(serverOptions)
|
||||
}
|
||||
|
||||
func (a *Agent) getFingerprint() string {
|
||||
// first look for a fingerprint in the data directory
|
||||
if a.dataDir != "" {
|
||||
if fp, err := os.ReadFile(filepath.Join(a.dataDir, "fingerprint")); err == nil {
|
||||
return string(fp)
|
||||
}
|
||||
}
|
||||
|
||||
// if no fingerprint is found, generate one
|
||||
fingerprint, err := host.HostID()
|
||||
if err != nil || fingerprint == "" {
|
||||
fingerprint = a.systemInfo.Hostname + a.systemInfo.CpuModel
|
||||
}
|
||||
|
||||
// hash fingerprint
|
||||
sum := sha256.Sum256([]byte(fingerprint))
|
||||
fingerprint = hex.EncodeToString(sum[:24])
|
||||
|
||||
// save fingerprint to data directory
|
||||
if a.dataDir != "" {
|
||||
err = os.WriteFile(filepath.Join(a.dataDir, "fingerprint"), []byte(fingerprint), 0644)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to save fingerprint", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
return fingerprint
|
||||
}
|
||||
55
agent/agent_cache.go
Normal file
55
agent/agent_cache.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
)
|
||||
|
||||
type systemDataCache struct {
|
||||
sync.RWMutex
|
||||
cache map[uint16]*cacheNode
|
||||
}
|
||||
|
||||
type cacheNode struct {
|
||||
data *system.CombinedData
|
||||
lastUpdate time.Time
|
||||
}
|
||||
|
||||
// NewSystemDataCache creates a cache keyed by the polling interval in milliseconds.
|
||||
func NewSystemDataCache() *systemDataCache {
|
||||
return &systemDataCache{
|
||||
cache: make(map[uint16]*cacheNode),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns cached combined data when the entry is still considered fresh.
|
||||
func (c *systemDataCache) Get(cacheTimeMs uint16) (stats *system.CombinedData, isCached bool) {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
node, ok := c.cache[cacheTimeMs]
|
||||
if !ok {
|
||||
return &system.CombinedData{}, false
|
||||
}
|
||||
// allowedSkew := time.Second
|
||||
// isFresh := time.Since(node.lastUpdate) < time.Duration(cacheTimeMs)*time.Millisecond-allowedSkew
|
||||
// allow a 50% skew of the cache time
|
||||
isFresh := time.Since(node.lastUpdate) < time.Duration(cacheTimeMs/2)*time.Millisecond
|
||||
return node.data, isFresh
|
||||
}
|
||||
|
||||
// Set stores the latest combined data snapshot for the given interval.
|
||||
func (c *systemDataCache) Set(data *system.CombinedData, cacheTimeMs uint16) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
node, ok := c.cache[cacheTimeMs]
|
||||
if !ok {
|
||||
node = &cacheNode{}
|
||||
c.cache[cacheTimeMs] = node
|
||||
}
|
||||
node.data = data
|
||||
node.lastUpdate = time.Now()
|
||||
}
|
||||
246
agent/agent_cache_test.go
Normal file
246
agent/agent_cache_test.go
Normal file
@@ -0,0 +1,246 @@
|
||||
//go:build testing
|
||||
// +build testing
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"testing/synctest"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/container"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func createTestCacheData() *system.CombinedData {
|
||||
return &system.CombinedData{
|
||||
Stats: system.Stats{
|
||||
Cpu: 50.5,
|
||||
Mem: 8192,
|
||||
DiskTotal: 100000,
|
||||
},
|
||||
Info: system.Info{
|
||||
Hostname: "test-host",
|
||||
},
|
||||
Containers: []*container.Stats{
|
||||
{
|
||||
Name: "test-container",
|
||||
Cpu: 25.0,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewSystemDataCache(t *testing.T) {
|
||||
cache := NewSystemDataCache()
|
||||
require.NotNil(t, cache)
|
||||
assert.NotNil(t, cache.cache)
|
||||
assert.Empty(t, cache.cache)
|
||||
}
|
||||
|
||||
func TestCacheGetSet(t *testing.T) {
|
||||
cache := NewSystemDataCache()
|
||||
data := createTestCacheData()
|
||||
|
||||
// Test setting data
|
||||
cache.Set(data, 1000) // 1 second cache
|
||||
|
||||
// Test getting fresh data
|
||||
retrieved, isCached := cache.Get(1000)
|
||||
assert.True(t, isCached)
|
||||
assert.Equal(t, data, retrieved)
|
||||
|
||||
// Test getting non-existent cache key
|
||||
_, isCached = cache.Get(2000)
|
||||
assert.False(t, isCached)
|
||||
}
|
||||
|
||||
func TestCacheFreshness(t *testing.T) {
|
||||
cache := NewSystemDataCache()
|
||||
data := createTestCacheData()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
cacheTimeMs uint16
|
||||
sleepMs time.Duration
|
||||
expectFresh bool
|
||||
}{
|
||||
{
|
||||
name: "fresh data - well within cache time",
|
||||
cacheTimeMs: 1000, // 1 second
|
||||
sleepMs: 100, // 100ms
|
||||
expectFresh: true,
|
||||
},
|
||||
{
|
||||
name: "fresh data - at 50% of cache time boundary",
|
||||
cacheTimeMs: 1000, // 1 second, 50% = 500ms
|
||||
sleepMs: 499, // just under 500ms
|
||||
expectFresh: true,
|
||||
},
|
||||
{
|
||||
name: "stale data - exactly at 50% cache time",
|
||||
cacheTimeMs: 1000, // 1 second, 50% = 500ms
|
||||
sleepMs: 500, // exactly 500ms
|
||||
expectFresh: false,
|
||||
},
|
||||
{
|
||||
name: "stale data - well beyond cache time",
|
||||
cacheTimeMs: 1000, // 1 second
|
||||
sleepMs: 800, // 800ms
|
||||
expectFresh: false,
|
||||
},
|
||||
{
|
||||
name: "short cache time",
|
||||
cacheTimeMs: 200, // 200ms, 50% = 100ms
|
||||
sleepMs: 150, // 150ms > 100ms
|
||||
expectFresh: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
// Set data
|
||||
cache.Set(data, tc.cacheTimeMs)
|
||||
|
||||
// Wait for the specified duration
|
||||
if tc.sleepMs > 0 {
|
||||
time.Sleep(tc.sleepMs * time.Millisecond)
|
||||
}
|
||||
|
||||
// Check freshness
|
||||
_, isCached := cache.Get(tc.cacheTimeMs)
|
||||
assert.Equal(t, tc.expectFresh, isCached)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheMultipleIntervals(t *testing.T) {
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
cache := NewSystemDataCache()
|
||||
data1 := createTestCacheData()
|
||||
data2 := &system.CombinedData{
|
||||
Stats: system.Stats{
|
||||
Cpu: 75.0,
|
||||
Mem: 16384,
|
||||
},
|
||||
Info: system.Info{
|
||||
Hostname: "test-host-2",
|
||||
},
|
||||
Containers: []*container.Stats{},
|
||||
}
|
||||
|
||||
// Set data for different intervals
|
||||
cache.Set(data1, 500) // 500ms cache
|
||||
cache.Set(data2, 1000) // 1000ms cache
|
||||
|
||||
// Both should be fresh immediately
|
||||
retrieved1, isCached1 := cache.Get(500)
|
||||
assert.True(t, isCached1)
|
||||
assert.Equal(t, data1, retrieved1)
|
||||
|
||||
retrieved2, isCached2 := cache.Get(1000)
|
||||
assert.True(t, isCached2)
|
||||
assert.Equal(t, data2, retrieved2)
|
||||
|
||||
// Wait 300ms - 500ms cache should be stale (250ms threshold), 1000ms should still be fresh (500ms threshold)
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
|
||||
_, isCached1 = cache.Get(500)
|
||||
assert.False(t, isCached1)
|
||||
|
||||
_, isCached2 = cache.Get(1000)
|
||||
assert.True(t, isCached2)
|
||||
|
||||
// Wait another 300ms (total 600ms) - now 1000ms cache should also be stale
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
_, isCached2 = cache.Get(1000)
|
||||
assert.False(t, isCached2)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCacheOverwrite(t *testing.T) {
|
||||
cache := NewSystemDataCache()
|
||||
data1 := createTestCacheData()
|
||||
data2 := &system.CombinedData{
|
||||
Stats: system.Stats{
|
||||
Cpu: 90.0,
|
||||
Mem: 32768,
|
||||
},
|
||||
Info: system.Info{
|
||||
Hostname: "updated-host",
|
||||
},
|
||||
Containers: []*container.Stats{},
|
||||
}
|
||||
|
||||
// Set initial data
|
||||
cache.Set(data1, 1000)
|
||||
retrieved, isCached := cache.Get(1000)
|
||||
assert.True(t, isCached)
|
||||
assert.Equal(t, data1, retrieved)
|
||||
|
||||
// Overwrite with new data
|
||||
cache.Set(data2, 1000)
|
||||
retrieved, isCached = cache.Get(1000)
|
||||
assert.True(t, isCached)
|
||||
assert.Equal(t, data2, retrieved)
|
||||
assert.NotEqual(t, data1, retrieved)
|
||||
}
|
||||
|
||||
func TestCacheMiss(t *testing.T) {
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
cache := NewSystemDataCache()
|
||||
|
||||
// Test getting from empty cache
|
||||
_, isCached := cache.Get(1000)
|
||||
assert.False(t, isCached)
|
||||
|
||||
// Set data for one interval
|
||||
data := createTestCacheData()
|
||||
cache.Set(data, 1000)
|
||||
|
||||
// Test getting different interval
|
||||
_, isCached = cache.Get(2000)
|
||||
assert.False(t, isCached)
|
||||
|
||||
// Test getting after data has expired
|
||||
time.Sleep(600 * time.Millisecond) // 600ms > 500ms (50% of 1000ms)
|
||||
_, isCached = cache.Get(1000)
|
||||
assert.False(t, isCached)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCacheZeroInterval(t *testing.T) {
|
||||
cache := NewSystemDataCache()
|
||||
data := createTestCacheData()
|
||||
|
||||
// Set with zero interval - should allow immediate cache
|
||||
cache.Set(data, 0)
|
||||
|
||||
// With 0 interval, 50% is 0, so it should never be considered fresh
|
||||
// (time.Since(lastUpdate) >= 0, which is not < 0)
|
||||
_, isCached := cache.Get(0)
|
||||
assert.False(t, isCached)
|
||||
}
|
||||
|
||||
func TestCacheLargeInterval(t *testing.T) {
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
cache := NewSystemDataCache()
|
||||
data := createTestCacheData()
|
||||
|
||||
// Test with maximum uint16 value
|
||||
cache.Set(data, 65535) // ~65 seconds
|
||||
|
||||
// Should be fresh immediately
|
||||
_, isCached := cache.Get(65535)
|
||||
assert.True(t, isCached)
|
||||
|
||||
// Should still be fresh after a short time
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
_, isCached = cache.Get(65535)
|
||||
assert.True(t, isCached)
|
||||
})
|
||||
}
|
||||
9
agent/agent_test_helpers.go
Normal file
9
agent/agent_test_helpers.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//go:build testing
|
||||
// +build testing
|
||||
|
||||
package agent
|
||||
|
||||
// TESTING ONLY: GetConnectionManager is a helper function to get the connection manager for testing.
|
||||
func (a *Agent) GetConnectionManager() *ConnectionManager {
|
||||
return a.connectionManager
|
||||
}
|
||||
84
agent/battery/battery.go
Normal file
84
agent/battery/battery.go
Normal file
@@ -0,0 +1,84 @@
|
||||
//go:build !freebsd
|
||||
|
||||
// Package battery provides functions to check if the system has a battery and to get the battery stats.
|
||||
package battery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log/slog"
|
||||
"math"
|
||||
|
||||
"github.com/distatus/battery"
|
||||
)
|
||||
|
||||
var (
|
||||
systemHasBattery = false
|
||||
haveCheckedBattery = false
|
||||
)
|
||||
|
||||
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||
func HasReadableBattery() bool {
|
||||
if haveCheckedBattery {
|
||||
return systemHasBattery
|
||||
}
|
||||
haveCheckedBattery = true
|
||||
batteries, err := battery.GetAll()
|
||||
for _, bat := range batteries {
|
||||
if bat != nil && (bat.Full > 0 || bat.Design > 0) {
|
||||
systemHasBattery = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !systemHasBattery {
|
||||
slog.Debug("No battery found", "err", err)
|
||||
}
|
||||
return systemHasBattery
|
||||
}
|
||||
|
||||
// GetBatteryStats returns the current battery percent and charge state
|
||||
// percent = (current charge of all batteries) / (sum of designed/full capacity of all batteries)
|
||||
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||
if !HasReadableBattery() {
|
||||
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||
}
|
||||
batteries, err := battery.GetAll()
|
||||
// we'll handle errors later by skipping batteries with errors, rather
|
||||
// than skipping everything because of the presence of some errors.
|
||||
if len(batteries) == 0 {
|
||||
return batteryPercent, batteryState, errors.New("no batteries")
|
||||
}
|
||||
|
||||
totalCapacity := float64(0)
|
||||
totalCharge := float64(0)
|
||||
errs, partialErrs := err.(battery.Errors)
|
||||
|
||||
batteryState = math.MaxUint8
|
||||
|
||||
for i, bat := range batteries {
|
||||
if partialErrs && errs[i] != nil {
|
||||
// if there were some errors, like missing data, skip it
|
||||
continue
|
||||
}
|
||||
if bat == nil || bat.Full == 0 {
|
||||
// skip batteries with no capacity. Charge is unlikely to ever be zero, but
|
||||
// we can't guarantee that, so don't skip based on charge.
|
||||
continue
|
||||
}
|
||||
totalCapacity += bat.Full
|
||||
totalCharge += bat.Current
|
||||
if bat.State.Raw >= 0 {
|
||||
batteryState = uint8(bat.State.Raw)
|
||||
}
|
||||
}
|
||||
|
||||
if totalCapacity == 0 || batteryState == math.MaxUint8 {
|
||||
// for macs there's sometimes a ghost battery with 0 capacity
|
||||
// https://github.com/distatus/battery/issues/34
|
||||
// Instead of skipping over those batteries, we'll check for total 0 capacity
|
||||
// and return an error. This also prevents a divide by zero.
|
||||
return batteryPercent, batteryState, errors.New("no battery capacity")
|
||||
}
|
||||
|
||||
batteryPercent = uint8(totalCharge / totalCapacity * 100)
|
||||
return batteryPercent, batteryState, nil
|
||||
}
|
||||
13
agent/battery/battery_freebsd.go
Normal file
13
agent/battery/battery_freebsd.go
Normal file
@@ -0,0 +1,13 @@
|
||||
//go:build freebsd
|
||||
|
||||
package battery
|
||||
|
||||
import "errors"
|
||||
|
||||
func HasReadableBattery() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func GetBatteryStats() (uint8, uint8, error) {
|
||||
return 0, 0, errors.ErrUnsupported
|
||||
}
|
||||
310
agent/client.go
Normal file
310
agent/client.go
Normal file
@@ -0,0 +1,310 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel"
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/entities/smart"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/lxzan/gws"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
const (
|
||||
wsDeadline = 70 * time.Second
|
||||
)
|
||||
|
||||
// WebSocketClient manages the WebSocket connection between the agent and hub.
|
||||
// It handles authentication, message routing, and connection lifecycle management.
|
||||
type WebSocketClient struct {
|
||||
gws.BuiltinEventHandler
|
||||
options *gws.ClientOption // WebSocket client configuration options
|
||||
agent *Agent // Reference to the parent agent
|
||||
Conn *gws.Conn // Active WebSocket connection
|
||||
hubURL *url.URL // Parsed hub URL for connection
|
||||
token string // Authentication token for hub registration
|
||||
fingerprint string // System fingerprint for identification
|
||||
hubRequest *common.HubRequest[cbor.RawMessage] // Reusable request structure for message parsing
|
||||
lastConnectAttempt time.Time // Timestamp of last connection attempt
|
||||
hubVerified bool // Whether the hub has been cryptographically verified
|
||||
}
|
||||
|
||||
// newWebSocketClient creates a new WebSocket client for the given agent.
|
||||
// It reads configuration from environment variables and validates the hub URL.
|
||||
func newWebSocketClient(agent *Agent) (client *WebSocketClient, err error) {
|
||||
hubURLStr, exists := GetEnv("HUB_URL")
|
||||
if !exists {
|
||||
return nil, errors.New("HUB_URL environment variable not set")
|
||||
}
|
||||
|
||||
client = &WebSocketClient{}
|
||||
|
||||
client.hubURL, err = url.Parse(hubURLStr)
|
||||
if err != nil {
|
||||
return nil, errors.New("invalid hub URL")
|
||||
}
|
||||
// get registration token
|
||||
client.token, err = getToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client.agent = agent
|
||||
client.hubRequest = &common.HubRequest[cbor.RawMessage]{}
|
||||
client.fingerprint = agent.getFingerprint()
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// getToken returns the token for the WebSocket client.
|
||||
// It first checks the TOKEN environment variable, then the TOKEN_FILE environment variable.
|
||||
// If neither is set, it returns an error.
|
||||
func getToken() (string, error) {
|
||||
// get token from env var
|
||||
token, _ := GetEnv("TOKEN")
|
||||
if token != "" {
|
||||
return token, nil
|
||||
}
|
||||
// get token from file
|
||||
tokenFile, _ := GetEnv("TOKEN_FILE")
|
||||
if tokenFile == "" {
|
||||
return "", errors.New("must set TOKEN or TOKEN_FILE")
|
||||
}
|
||||
tokenBytes, err := os.ReadFile(tokenFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(tokenBytes)), nil
|
||||
}
|
||||
|
||||
// getOptions returns the WebSocket client options, creating them if necessary.
|
||||
// It configures the connection URL, TLS settings, and authentication headers.
|
||||
func (client *WebSocketClient) getOptions() *gws.ClientOption {
|
||||
if client.options != nil {
|
||||
return client.options
|
||||
}
|
||||
|
||||
// update the hub url to use websocket scheme and api path
|
||||
if client.hubURL.Scheme == "https" {
|
||||
client.hubURL.Scheme = "wss"
|
||||
} else {
|
||||
client.hubURL.Scheme = "ws"
|
||||
}
|
||||
client.hubURL.Path = path.Join(client.hubURL.Path, "api/beszel/agent-connect")
|
||||
|
||||
client.options = &gws.ClientOption{
|
||||
Addr: client.hubURL.String(),
|
||||
TlsConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
RequestHeader: http.Header{
|
||||
"User-Agent": []string{getUserAgent()},
|
||||
"X-Token": []string{client.token},
|
||||
"X-Beszel": []string{beszel.Version},
|
||||
},
|
||||
}
|
||||
return client.options
|
||||
}
|
||||
|
||||
// Connect establishes a WebSocket connection to the hub.
|
||||
// It closes any existing connection before attempting to reconnect.
|
||||
func (client *WebSocketClient) Connect() (err error) {
|
||||
client.lastConnectAttempt = time.Now()
|
||||
|
||||
// make sure previous connection is closed
|
||||
client.Close()
|
||||
|
||||
client.Conn, _, err = gws.NewClient(client, client.getOptions())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go client.Conn.ReadLoop()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnOpen handles WebSocket connection establishment.
|
||||
// It sets a deadline for the connection to prevent hanging.
|
||||
func (client *WebSocketClient) OnOpen(conn *gws.Conn) {
|
||||
conn.SetDeadline(time.Now().Add(wsDeadline))
|
||||
}
|
||||
|
||||
// OnClose handles WebSocket connection closure.
|
||||
// It logs the closure reason and notifies the connection manager.
|
||||
func (client *WebSocketClient) OnClose(conn *gws.Conn, err error) {
|
||||
if err != nil {
|
||||
slog.Warn("Connection closed", "err", strings.TrimPrefix(err.Error(), "gws: "))
|
||||
}
|
||||
client.agent.connectionManager.eventChan <- WebSocketDisconnect
|
||||
}
|
||||
|
||||
// OnMessage handles incoming WebSocket messages from the hub.
|
||||
// It decodes CBOR messages and routes them to appropriate handlers.
|
||||
func (client *WebSocketClient) OnMessage(conn *gws.Conn, message *gws.Message) {
|
||||
defer message.Close()
|
||||
conn.SetDeadline(time.Now().Add(wsDeadline))
|
||||
|
||||
if message.Opcode != gws.OpcodeBinary {
|
||||
return
|
||||
}
|
||||
|
||||
var HubRequest common.HubRequest[cbor.RawMessage]
|
||||
|
||||
err := cbor.Unmarshal(message.Data.Bytes(), &HubRequest)
|
||||
if err != nil {
|
||||
slog.Error("Error parsing message", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := client.handleHubRequest(&HubRequest, HubRequest.Id); err != nil {
|
||||
slog.Error("Error handling message", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// OnPing handles WebSocket ping frames.
|
||||
// It responds with a pong and updates the connection deadline.
|
||||
func (client *WebSocketClient) OnPing(conn *gws.Conn, message []byte) {
|
||||
conn.SetDeadline(time.Now().Add(wsDeadline))
|
||||
conn.WritePong(message)
|
||||
}
|
||||
|
||||
// handleAuthChallenge verifies the authenticity of the hub and returns the system's fingerprint.
|
||||
func (client *WebSocketClient) handleAuthChallenge(msg *common.HubRequest[cbor.RawMessage], requestID *uint32) (err error) {
|
||||
var authRequest common.FingerprintRequest
|
||||
if err := cbor.Unmarshal(msg.Data, &authRequest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := client.verifySignature(authRequest.Signature); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client.hubVerified = true
|
||||
client.agent.connectionManager.eventChan <- WebSocketConnect
|
||||
|
||||
response := &common.FingerprintResponse{
|
||||
Fingerprint: client.fingerprint,
|
||||
}
|
||||
|
||||
if authRequest.NeedSysInfo {
|
||||
response.Name, _ = GetEnv("SYSTEM_NAME")
|
||||
response.Hostname = client.agent.systemInfo.Hostname
|
||||
serverAddr := client.agent.connectionManager.serverOptions.Addr
|
||||
_, response.Port, _ = net.SplitHostPort(serverAddr)
|
||||
}
|
||||
|
||||
return client.sendResponse(response, requestID)
|
||||
}
|
||||
|
||||
// verifySignature verifies the signature of the token using the public keys.
|
||||
func (client *WebSocketClient) verifySignature(signature []byte) (err error) {
|
||||
for _, pubKey := range client.agent.keys {
|
||||
sig := ssh.Signature{
|
||||
Format: pubKey.Type(),
|
||||
Blob: signature,
|
||||
}
|
||||
if err = pubKey.Verify([]byte(client.token), &sig); err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.New("invalid signature - check KEY value")
|
||||
}
|
||||
|
||||
// Close closes the WebSocket connection gracefully.
|
||||
// This method is safe to call multiple times.
|
||||
func (client *WebSocketClient) Close() {
|
||||
if client.Conn != nil {
|
||||
_ = client.Conn.WriteClose(1000, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// handleHubRequest routes the request to the appropriate handler using the handler registry.
|
||||
func (client *WebSocketClient) handleHubRequest(msg *common.HubRequest[cbor.RawMessage], requestID *uint32) error {
|
||||
ctx := &HandlerContext{
|
||||
Client: client,
|
||||
Agent: client.agent,
|
||||
Request: msg,
|
||||
RequestID: requestID,
|
||||
HubVerified: client.hubVerified,
|
||||
SendResponse: client.sendResponse,
|
||||
}
|
||||
return client.agent.handlerRegistry.Handle(ctx)
|
||||
}
|
||||
|
||||
// sendMessage encodes the given data to CBOR and sends it as a binary message over the WebSocket connection to the hub.
|
||||
func (client *WebSocketClient) sendMessage(data any) error {
|
||||
bytes, err := cbor.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = client.Conn.WriteMessage(gws.OpcodeBinary, bytes)
|
||||
if err != nil {
|
||||
// If writing fails (e.g., broken pipe due to network issues),
|
||||
// close the connection to trigger reconnection logic (#1263)
|
||||
client.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// sendResponse sends a response with optional request ID for the new protocol
|
||||
func (client *WebSocketClient) sendResponse(data any, requestID *uint32) error {
|
||||
if requestID != nil {
|
||||
// New format with ID - use typed fields
|
||||
response := common.AgentResponse{
|
||||
Id: requestID,
|
||||
}
|
||||
|
||||
// Set the appropriate typed field based on data type
|
||||
switch v := data.(type) {
|
||||
case *system.CombinedData:
|
||||
response.SystemData = v
|
||||
case *common.FingerprintResponse:
|
||||
response.Fingerprint = v
|
||||
case string:
|
||||
response.String = &v
|
||||
case map[string]smart.SmartData:
|
||||
response.SmartData = v
|
||||
case systemd.ServiceDetails:
|
||||
response.ServiceInfo = v
|
||||
// case []byte:
|
||||
// response.RawBytes = v
|
||||
// case string:
|
||||
// response.RawBytes = []byte(v)
|
||||
default:
|
||||
// For any other type, convert to error
|
||||
response.Error = fmt.Sprintf("unsupported response type: %T", data)
|
||||
}
|
||||
|
||||
return client.sendMessage(response)
|
||||
} else {
|
||||
// Legacy format - send data directly
|
||||
return client.sendMessage(data)
|
||||
}
|
||||
}
|
||||
|
||||
// getUserAgent returns one of two User-Agent strings based on current time.
|
||||
// This is used to avoid being blocked by Cloudflare or other anti-bot measures.
|
||||
func getUserAgent() string {
|
||||
const (
|
||||
uaBase = "Mozilla/5.0 (%s) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36"
|
||||
uaWindows = "Windows NT 11.0; Win64; x64"
|
||||
uaMac = "Macintosh; Intel Mac OS X 14_0_0"
|
||||
)
|
||||
if time.Now().UnixNano()%2 == 0 {
|
||||
return fmt.Sprintf(uaBase, uaWindows)
|
||||
}
|
||||
return fmt.Sprintf(uaBase, uaMac)
|
||||
}
|
||||
561
agent/client_test.go
Normal file
561
agent/client_test.go
Normal file
@@ -0,0 +1,561 @@
|
||||
//go:build testing
|
||||
// +build testing
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel"
|
||||
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
// TestNewWebSocketClient tests WebSocket client creation
|
||||
func TestNewWebSocketClient(t *testing.T) {
|
||||
agent := createTestAgent(t)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
hubURL string
|
||||
token string
|
||||
expectError bool
|
||||
errorMsg string
|
||||
}{
|
||||
{
|
||||
name: "valid configuration",
|
||||
hubURL: "http://localhost:8080",
|
||||
token: "test-token-123",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "valid https URL",
|
||||
hubURL: "https://hub.example.com",
|
||||
token: "secure-token",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "missing hub URL",
|
||||
hubURL: "",
|
||||
token: "test-token",
|
||||
expectError: true,
|
||||
errorMsg: "HUB_URL environment variable not set",
|
||||
},
|
||||
{
|
||||
name: "invalid URL",
|
||||
hubURL: "ht\ttp://invalid",
|
||||
token: "test-token",
|
||||
expectError: true,
|
||||
errorMsg: "invalid hub URL",
|
||||
},
|
||||
{
|
||||
name: "missing token",
|
||||
hubURL: "http://localhost:8080",
|
||||
token: "",
|
||||
expectError: true,
|
||||
errorMsg: "must set TOKEN or TOKEN_FILE",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set up environment
|
||||
if tc.hubURL != "" {
|
||||
os.Setenv("BESZEL_AGENT_HUB_URL", tc.hubURL)
|
||||
} else {
|
||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
||||
}
|
||||
if tc.token != "" {
|
||||
os.Setenv("BESZEL_AGENT_TOKEN", tc.token)
|
||||
} else {
|
||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
||||
}
|
||||
defer func() {
|
||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
||||
}()
|
||||
|
||||
client, err := newWebSocketClient(agent)
|
||||
|
||||
if tc.expectError {
|
||||
assert.Error(t, err)
|
||||
if err != nil && tc.errorMsg != "" {
|
||||
assert.Contains(t, err.Error(), tc.errorMsg)
|
||||
}
|
||||
assert.Nil(t, client)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, client)
|
||||
assert.Equal(t, agent, client.agent)
|
||||
assert.Equal(t, tc.token, client.token)
|
||||
assert.Equal(t, tc.hubURL, client.hubURL.String())
|
||||
assert.NotEmpty(t, client.fingerprint)
|
||||
assert.NotNil(t, client.hubRequest)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWebSocketClient_GetOptions tests WebSocket client options configuration
|
||||
func TestWebSocketClient_GetOptions(t *testing.T) {
|
||||
agent := createTestAgent(t)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
inputURL string
|
||||
expectedScheme string
|
||||
expectedPath string
|
||||
}{
|
||||
{
|
||||
name: "http to ws conversion",
|
||||
inputURL: "http://localhost:8080",
|
||||
expectedScheme: "ws",
|
||||
expectedPath: "/api/beszel/agent-connect",
|
||||
},
|
||||
{
|
||||
name: "https to wss conversion",
|
||||
inputURL: "https://hub.example.com",
|
||||
expectedScheme: "wss",
|
||||
expectedPath: "/api/beszel/agent-connect",
|
||||
},
|
||||
{
|
||||
name: "existing path preservation",
|
||||
inputURL: "http://localhost:8080/custom/path",
|
||||
expectedScheme: "ws",
|
||||
expectedPath: "/custom/path/api/beszel/agent-connect",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set up environment
|
||||
os.Setenv("BESZEL_AGENT_HUB_URL", tc.inputURL)
|
||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||
defer func() {
|
||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
||||
}()
|
||||
|
||||
client, err := newWebSocketClient(agent)
|
||||
require.NoError(t, err)
|
||||
|
||||
options := client.getOptions()
|
||||
|
||||
// Parse the WebSocket URL
|
||||
wsURL, err := url.Parse(options.Addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, tc.expectedScheme, wsURL.Scheme)
|
||||
assert.Equal(t, tc.expectedPath, wsURL.Path)
|
||||
|
||||
// Check headers
|
||||
assert.Equal(t, "test-token", options.RequestHeader.Get("X-Token"))
|
||||
assert.Equal(t, beszel.Version, options.RequestHeader.Get("X-Beszel"))
|
||||
assert.Contains(t, options.RequestHeader.Get("User-Agent"), "Mozilla/5.0")
|
||||
|
||||
// Test options caching
|
||||
options2 := client.getOptions()
|
||||
assert.Same(t, options, options2, "Options should be cached")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWebSocketClient_VerifySignature tests signature verification
|
||||
func TestWebSocketClient_VerifySignature(t *testing.T) {
|
||||
agent := createTestAgent(t)
|
||||
|
||||
// Generate test key pairs
|
||||
_, goodPrivKey, err := ed25519.GenerateKey(nil)
|
||||
require.NoError(t, err)
|
||||
goodPubKey, err := ssh.NewPublicKey(goodPrivKey.Public().(ed25519.PublicKey))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, badPrivKey, err := ed25519.GenerateKey(nil)
|
||||
require.NoError(t, err)
|
||||
badPubKey, err := ssh.NewPublicKey(badPrivKey.Public().(ed25519.PublicKey))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up environment
|
||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||
defer func() {
|
||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
||||
}()
|
||||
|
||||
client, err := newWebSocketClient(agent)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
keys []ssh.PublicKey
|
||||
token string
|
||||
signWith ed25519.PrivateKey
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "valid signature with correct key",
|
||||
keys: []ssh.PublicKey{goodPubKey},
|
||||
token: "test-token",
|
||||
signWith: goodPrivKey,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid signature with wrong key",
|
||||
keys: []ssh.PublicKey{goodPubKey},
|
||||
token: "test-token",
|
||||
signWith: badPrivKey,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "valid signature with multiple keys",
|
||||
keys: []ssh.PublicKey{badPubKey, goodPubKey},
|
||||
token: "test-token",
|
||||
signWith: goodPrivKey,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "no valid keys",
|
||||
keys: []ssh.PublicKey{badPubKey},
|
||||
token: "test-token",
|
||||
signWith: goodPrivKey,
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set up agent with test keys
|
||||
agent.keys = tc.keys
|
||||
client.token = tc.token
|
||||
|
||||
// Create signature
|
||||
signature := ed25519.Sign(tc.signWith, []byte(tc.token))
|
||||
|
||||
err := client.verifySignature(signature)
|
||||
|
||||
if tc.expectError {
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "invalid signature")
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWebSocketClient_HandleHubRequest tests hub request routing (basic verification logic)
|
||||
func TestWebSocketClient_HandleHubRequest(t *testing.T) {
|
||||
agent := createTestAgent(t)
|
||||
|
||||
// Set up environment
|
||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||
defer func() {
|
||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
||||
}()
|
||||
|
||||
client, err := newWebSocketClient(agent)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
action common.WebSocketAction
|
||||
hubVerified bool
|
||||
expectError bool
|
||||
errorMsg string
|
||||
}{
|
||||
{
|
||||
name: "CheckFingerprint without verification",
|
||||
action: common.CheckFingerprint,
|
||||
hubVerified: false,
|
||||
expectError: false, // CheckFingerprint is allowed without verification
|
||||
},
|
||||
{
|
||||
name: "GetData without verification",
|
||||
action: common.GetData,
|
||||
hubVerified: false,
|
||||
expectError: true,
|
||||
errorMsg: "hub not verified",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
client.hubVerified = tc.hubVerified
|
||||
|
||||
// Create minimal request
|
||||
hubRequest := &common.HubRequest[cbor.RawMessage]{
|
||||
Action: tc.action,
|
||||
Data: cbor.RawMessage{},
|
||||
}
|
||||
|
||||
err := client.handleHubRequest(hubRequest, nil)
|
||||
|
||||
if tc.expectError {
|
||||
assert.Error(t, err)
|
||||
if tc.errorMsg != "" {
|
||||
assert.Contains(t, err.Error(), tc.errorMsg)
|
||||
}
|
||||
} else {
|
||||
// For CheckFingerprint, we expect a decode error since we're not providing valid data,
|
||||
// but it shouldn't be the "hub not verified" error
|
||||
if err != nil && tc.errorMsg != "" {
|
||||
assert.NotContains(t, err.Error(), tc.errorMsg)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWebSocketClient_GetUserAgent tests user agent generation
|
||||
func TestGetUserAgent(t *testing.T) {
|
||||
// Run multiple times to check both variants
|
||||
userAgents := make(map[string]bool)
|
||||
|
||||
for range 20 {
|
||||
ua := getUserAgent()
|
||||
userAgents[ua] = true
|
||||
|
||||
// Check that it's a valid Mozilla user agent
|
||||
assert.Contains(t, ua, "Mozilla/5.0")
|
||||
assert.Contains(t, ua, "AppleWebKit/537.36")
|
||||
assert.Contains(t, ua, "Chrome/124.0.0.0")
|
||||
assert.Contains(t, ua, "Safari/537.36")
|
||||
|
||||
// Should contain either Windows or Mac
|
||||
isWindows := strings.Contains(ua, "Windows NT 11.0")
|
||||
isMac := strings.Contains(ua, "Macintosh; Intel Mac OS X 14_0_0")
|
||||
assert.True(t, isWindows || isMac, "User agent should contain either Windows or Mac identifier")
|
||||
}
|
||||
|
||||
// With enough iterations, we should see both variants
|
||||
// though this might occasionally fail
|
||||
if len(userAgents) == 1 {
|
||||
t.Log("Note: Only one user agent variant was generated in this test run")
|
||||
}
|
||||
}
|
||||
|
||||
// TestWebSocketClient_Close tests connection closing
|
||||
func TestWebSocketClient_Close(t *testing.T) {
|
||||
agent := createTestAgent(t)
|
||||
|
||||
// Set up environment
|
||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||
defer func() {
|
||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
||||
}()
|
||||
|
||||
client, err := newWebSocketClient(agent)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test closing with nil connection (should not panic)
|
||||
assert.NotPanics(t, func() {
|
||||
client.Close()
|
||||
})
|
||||
}
|
||||
|
||||
// TestWebSocketClient_ConnectRateLimit tests connection rate limiting
|
||||
func TestWebSocketClient_ConnectRateLimit(t *testing.T) {
|
||||
agent := createTestAgent(t)
|
||||
|
||||
// Set up environment
|
||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||
defer func() {
|
||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
||||
}()
|
||||
|
||||
client, err := newWebSocketClient(agent)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set recent connection attempt
|
||||
client.lastConnectAttempt = time.Now()
|
||||
|
||||
// Test that connection fails quickly due to rate limiting
|
||||
// This won't actually connect but should fail fast
|
||||
err = client.Connect()
|
||||
assert.Error(t, err, "Connection should fail but not hang")
|
||||
}
|
||||
|
||||
// TestGetToken tests the getToken function with various scenarios
|
||||
func TestGetToken(t *testing.T) {
|
||||
unsetEnvVars := func() {
|
||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
||||
os.Unsetenv("TOKEN")
|
||||
os.Unsetenv("BESZEL_AGENT_TOKEN_FILE")
|
||||
os.Unsetenv("TOKEN_FILE")
|
||||
}
|
||||
|
||||
t.Run("token from TOKEN environment variable", func(t *testing.T) {
|
||||
unsetEnvVars()
|
||||
|
||||
// Set TOKEN env var
|
||||
expectedToken := "test-token-from-env"
|
||||
os.Setenv("TOKEN", expectedToken)
|
||||
defer os.Unsetenv("TOKEN")
|
||||
|
||||
token, err := getToken()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedToken, token)
|
||||
})
|
||||
|
||||
t.Run("token from BESZEL_AGENT_TOKEN environment variable", func(t *testing.T) {
|
||||
unsetEnvVars()
|
||||
|
||||
// Set BESZEL_AGENT_TOKEN env var (should take precedence)
|
||||
expectedToken := "test-token-from-beszel-env"
|
||||
os.Setenv("BESZEL_AGENT_TOKEN", expectedToken)
|
||||
defer os.Unsetenv("BESZEL_AGENT_TOKEN")
|
||||
|
||||
token, err := getToken()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedToken, token)
|
||||
})
|
||||
|
||||
t.Run("token from TOKEN_FILE", func(t *testing.T) {
|
||||
unsetEnvVars()
|
||||
|
||||
// Create a temporary token file
|
||||
expectedToken := "test-token-from-file"
|
||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(tokenFile.Name())
|
||||
|
||||
_, err = tokenFile.WriteString(expectedToken)
|
||||
require.NoError(t, err)
|
||||
tokenFile.Close()
|
||||
|
||||
// Set TOKEN_FILE env var
|
||||
os.Setenv("TOKEN_FILE", tokenFile.Name())
|
||||
defer os.Unsetenv("TOKEN_FILE")
|
||||
|
||||
token, err := getToken()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedToken, token)
|
||||
})
|
||||
|
||||
t.Run("token from BESZEL_AGENT_TOKEN_FILE", func(t *testing.T) {
|
||||
unsetEnvVars()
|
||||
|
||||
// Create a temporary token file
|
||||
expectedToken := "test-token-from-beszel-file"
|
||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(tokenFile.Name())
|
||||
|
||||
_, err = tokenFile.WriteString(expectedToken)
|
||||
require.NoError(t, err)
|
||||
tokenFile.Close()
|
||||
|
||||
// Set BESZEL_AGENT_TOKEN_FILE env var (should take precedence)
|
||||
os.Setenv("BESZEL_AGENT_TOKEN_FILE", tokenFile.Name())
|
||||
defer os.Unsetenv("BESZEL_AGENT_TOKEN_FILE")
|
||||
|
||||
token, err := getToken()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedToken, token)
|
||||
})
|
||||
|
||||
t.Run("TOKEN takes precedence over TOKEN_FILE", func(t *testing.T) {
|
||||
unsetEnvVars()
|
||||
|
||||
// Create a temporary token file
|
||||
fileToken := "token-from-file"
|
||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(tokenFile.Name())
|
||||
|
||||
_, err = tokenFile.WriteString(fileToken)
|
||||
require.NoError(t, err)
|
||||
tokenFile.Close()
|
||||
|
||||
// Set both TOKEN and TOKEN_FILE
|
||||
envToken := "token-from-env"
|
||||
os.Setenv("TOKEN", envToken)
|
||||
os.Setenv("TOKEN_FILE", tokenFile.Name())
|
||||
defer func() {
|
||||
os.Unsetenv("TOKEN")
|
||||
os.Unsetenv("TOKEN_FILE")
|
||||
}()
|
||||
|
||||
token, err := getToken()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, envToken, token, "TOKEN should take precedence over TOKEN_FILE")
|
||||
})
|
||||
|
||||
t.Run("error when neither TOKEN nor TOKEN_FILE is set", func(t *testing.T) {
|
||||
unsetEnvVars()
|
||||
|
||||
token, err := getToken()
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "", token)
|
||||
assert.Contains(t, err.Error(), "must set TOKEN or TOKEN_FILE")
|
||||
})
|
||||
|
||||
t.Run("error when TOKEN_FILE points to non-existent file", func(t *testing.T) {
|
||||
unsetEnvVars()
|
||||
|
||||
// Set TOKEN_FILE to a non-existent file
|
||||
os.Setenv("TOKEN_FILE", "/non/existent/file.txt")
|
||||
defer os.Unsetenv("TOKEN_FILE")
|
||||
|
||||
token, err := getToken()
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "", token)
|
||||
assert.Contains(t, err.Error(), "no such file or directory")
|
||||
})
|
||||
|
||||
t.Run("handles empty token file", func(t *testing.T) {
|
||||
unsetEnvVars()
|
||||
|
||||
// Create an empty token file
|
||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(tokenFile.Name())
|
||||
tokenFile.Close()
|
||||
|
||||
// Set TOKEN_FILE env var
|
||||
os.Setenv("TOKEN_FILE", tokenFile.Name())
|
||||
defer os.Unsetenv("TOKEN_FILE")
|
||||
|
||||
token, err := getToken()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "", token, "Empty file should return empty string")
|
||||
})
|
||||
|
||||
t.Run("strips whitespace from TOKEN_FILE", func(t *testing.T) {
|
||||
unsetEnvVars()
|
||||
|
||||
tokenWithWhitespace := " test-token-with-whitespace \n\t"
|
||||
expectedToken := "test-token-with-whitespace"
|
||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(tokenFile.Name())
|
||||
|
||||
_, err = tokenFile.WriteString(tokenWithWhitespace)
|
||||
require.NoError(t, err)
|
||||
tokenFile.Close()
|
||||
|
||||
os.Setenv("TOKEN_FILE", tokenFile.Name())
|
||||
defer os.Unsetenv("TOKEN_FILE")
|
||||
|
||||
token, err := getToken()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedToken, token, "Whitespace should be stripped from token file content")
|
||||
})
|
||||
}
|
||||
226
agent/connection_manager.go
Normal file
226
agent/connection_manager.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log/slog"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/agent/health"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
)
|
||||
|
||||
// ConnectionManager manages the connection state and events for the agent.
|
||||
// It handles both WebSocket and SSH connections, automatically switching between
|
||||
// them based on availability and managing reconnection attempts.
|
||||
type ConnectionManager struct {
|
||||
agent *Agent // Reference to the parent agent
|
||||
State ConnectionState // Current connection state
|
||||
eventChan chan ConnectionEvent // Channel for connection events
|
||||
wsClient *WebSocketClient // WebSocket client for hub communication
|
||||
serverOptions ServerOptions // Configuration for SSH server
|
||||
wsTicker *time.Ticker // Ticker for WebSocket connection attempts
|
||||
isConnecting bool // Prevents multiple simultaneous reconnection attempts
|
||||
ConnectionType system.ConnectionType
|
||||
}
|
||||
|
||||
// ConnectionState represents the current connection state of the agent.
|
||||
type ConnectionState uint8
|
||||
|
||||
// ConnectionEvent represents connection-related events that can occur.
|
||||
type ConnectionEvent uint8
|
||||
|
||||
// Connection states
|
||||
const (
|
||||
Disconnected ConnectionState = iota // No active connection
|
||||
WebSocketConnected // Connected via WebSocket
|
||||
SSHConnected // Connected via SSH
|
||||
)
|
||||
|
||||
// Connection events
|
||||
const (
|
||||
WebSocketConnect ConnectionEvent = iota // WebSocket connection established
|
||||
WebSocketDisconnect // WebSocket connection lost
|
||||
SSHConnect // SSH connection established
|
||||
SSHDisconnect // SSH connection lost
|
||||
)
|
||||
|
||||
const wsTickerInterval = 10 * time.Second
|
||||
|
||||
// newConnectionManager creates a new connection manager for the given agent.
|
||||
func newConnectionManager(agent *Agent) *ConnectionManager {
|
||||
cm := &ConnectionManager{
|
||||
agent: agent,
|
||||
State: Disconnected,
|
||||
}
|
||||
return cm
|
||||
}
|
||||
|
||||
// startWsTicker starts or resets the WebSocket connection attempt ticker.
|
||||
func (c *ConnectionManager) startWsTicker() {
|
||||
if c.wsTicker == nil {
|
||||
c.wsTicker = time.NewTicker(wsTickerInterval)
|
||||
} else {
|
||||
c.wsTicker.Reset(wsTickerInterval)
|
||||
}
|
||||
}
|
||||
|
||||
// stopWsTicker stops the WebSocket connection attempt ticker.
|
||||
func (c *ConnectionManager) stopWsTicker() {
|
||||
if c.wsTicker != nil {
|
||||
c.wsTicker.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins connection attempts and enters the main event loop.
|
||||
// It handles connection events, periodic health updates, and graceful shutdown.
|
||||
func (c *ConnectionManager) Start(serverOptions ServerOptions) error {
|
||||
if c.eventChan != nil {
|
||||
return errors.New("already started")
|
||||
}
|
||||
|
||||
wsClient, err := newWebSocketClient(c.agent)
|
||||
if err != nil {
|
||||
slog.Warn("Error creating WebSocket client", "err", err)
|
||||
}
|
||||
c.wsClient = wsClient
|
||||
|
||||
c.serverOptions = serverOptions
|
||||
c.eventChan = make(chan ConnectionEvent, 1)
|
||||
|
||||
// signal handling for shutdown
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
c.startWsTicker()
|
||||
c.connect()
|
||||
|
||||
// update health status immediately and every 90 seconds
|
||||
_ = health.Update()
|
||||
healthTicker := time.Tick(90 * time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case connectionEvent := <-c.eventChan:
|
||||
c.handleEvent(connectionEvent)
|
||||
case <-c.wsTicker.C:
|
||||
_ = c.startWebSocketConnection()
|
||||
case <-healthTicker:
|
||||
_ = health.Update()
|
||||
case <-sigChan:
|
||||
slog.Info("Shutting down")
|
||||
_ = c.agent.StopServer()
|
||||
c.closeWebSocket()
|
||||
return health.CleanUp()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleEvent processes connection events and updates the connection state accordingly.
|
||||
func (c *ConnectionManager) handleEvent(event ConnectionEvent) {
|
||||
switch event {
|
||||
case WebSocketConnect:
|
||||
c.handleStateChange(WebSocketConnected)
|
||||
case SSHConnect:
|
||||
c.handleStateChange(SSHConnected)
|
||||
case WebSocketDisconnect:
|
||||
if c.State == WebSocketConnected {
|
||||
c.handleStateChange(Disconnected)
|
||||
}
|
||||
case SSHDisconnect:
|
||||
if c.State == SSHConnected {
|
||||
c.handleStateChange(Disconnected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleStateChange updates the connection state and performs necessary actions
|
||||
// based on the new state, including stopping services and initiating reconnections.
|
||||
func (c *ConnectionManager) handleStateChange(newState ConnectionState) {
|
||||
if c.State == newState {
|
||||
return
|
||||
}
|
||||
c.State = newState
|
||||
switch newState {
|
||||
case WebSocketConnected:
|
||||
slog.Info("WebSocket connected", "host", c.wsClient.hubURL.Host)
|
||||
c.ConnectionType = system.ConnectionTypeWebSocket
|
||||
c.stopWsTicker()
|
||||
_ = c.agent.StopServer()
|
||||
c.isConnecting = false
|
||||
case SSHConnected:
|
||||
// stop new ws connection attempts
|
||||
slog.Info("SSH connection established")
|
||||
c.ConnectionType = system.ConnectionTypeSSH
|
||||
c.stopWsTicker()
|
||||
c.isConnecting = false
|
||||
case Disconnected:
|
||||
c.ConnectionType = system.ConnectionTypeNone
|
||||
if c.isConnecting {
|
||||
// Already handling reconnection, avoid duplicate attempts
|
||||
return
|
||||
}
|
||||
c.isConnecting = true
|
||||
slog.Warn("Disconnected from hub")
|
||||
// make sure old ws connection is closed
|
||||
c.closeWebSocket()
|
||||
// reconnect
|
||||
go c.connect()
|
||||
}
|
||||
}
|
||||
|
||||
// connect handles the connection logic with proper delays and priority.
|
||||
// It attempts WebSocket connection first, falling back to SSH server if needed.
|
||||
func (c *ConnectionManager) connect() {
|
||||
c.isConnecting = true
|
||||
defer func() {
|
||||
c.isConnecting = false
|
||||
}()
|
||||
|
||||
if c.wsClient != nil && time.Since(c.wsClient.lastConnectAttempt) < 5*time.Second {
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
|
||||
// Try WebSocket first, if it fails, start SSH server
|
||||
err := c.startWebSocketConnection()
|
||||
if err != nil && c.State == Disconnected {
|
||||
c.startSSHServer()
|
||||
c.startWsTicker()
|
||||
}
|
||||
}
|
||||
|
||||
// startWebSocketConnection attempts to establish a WebSocket connection to the hub.
|
||||
func (c *ConnectionManager) startWebSocketConnection() error {
|
||||
if c.State != Disconnected {
|
||||
return errors.New("already connected")
|
||||
}
|
||||
if c.wsClient == nil {
|
||||
return errors.New("WebSocket client not initialized")
|
||||
}
|
||||
if time.Since(c.wsClient.lastConnectAttempt) < 5*time.Second {
|
||||
return errors.New("already connecting")
|
||||
}
|
||||
|
||||
err := c.wsClient.Connect()
|
||||
if err != nil {
|
||||
slog.Warn("WebSocket connection failed", "err", err)
|
||||
c.closeWebSocket()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// startSSHServer starts the SSH server if the agent is currently disconnected.
|
||||
func (c *ConnectionManager) startSSHServer() {
|
||||
if c.State == Disconnected {
|
||||
go c.agent.StartServer(c.serverOptions)
|
||||
}
|
||||
}
|
||||
|
||||
// closeWebSocket closes the WebSocket connection if it exists.
|
||||
func (c *ConnectionManager) closeWebSocket() {
|
||||
if c.wsClient != nil {
|
||||
c.wsClient.Close()
|
||||
}
|
||||
}
|
||||
315
agent/connection_manager_test.go
Normal file
315
agent/connection_manager_test.go
Normal file
@@ -0,0 +1,315 @@
|
||||
//go:build testing
|
||||
// +build testing
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
func createTestAgent(t *testing.T) *Agent {
|
||||
dataDir := t.TempDir()
|
||||
agent, err := NewAgent(dataDir)
|
||||
require.NoError(t, err)
|
||||
return agent
|
||||
}
|
||||
|
||||
func createTestServerOptions(t *testing.T) ServerOptions {
|
||||
// Generate test key pair
|
||||
_, privKey, err := ed25519.GenerateKey(nil)
|
||||
require.NoError(t, err)
|
||||
sshPubKey, err := ssh.NewPublicKey(privKey.Public().(ed25519.PublicKey))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Find available port
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
port := listener.Addr().(*net.TCPAddr).Port
|
||||
listener.Close()
|
||||
|
||||
return ServerOptions{
|
||||
Network: "tcp",
|
||||
Addr: fmt.Sprintf("127.0.0.1:%d", port),
|
||||
Keys: []ssh.PublicKey{sshPubKey},
|
||||
}
|
||||
}
|
||||
|
||||
// TestConnectionManager_NewConnectionManager tests connection manager creation
|
||||
func TestConnectionManager_NewConnectionManager(t *testing.T) {
|
||||
agent := createTestAgent(t)
|
||||
cm := newConnectionManager(agent)
|
||||
|
||||
assert.NotNil(t, cm, "Connection manager should not be nil")
|
||||
assert.Equal(t, agent, cm.agent, "Agent reference should be set")
|
||||
assert.Equal(t, Disconnected, cm.State, "Initial state should be Disconnected")
|
||||
assert.Nil(t, cm.eventChan, "Event channel should be nil initially")
|
||||
assert.Nil(t, cm.wsClient, "WebSocket client should be nil initially")
|
||||
assert.Nil(t, cm.wsTicker, "WebSocket ticker should be nil initially")
|
||||
assert.False(t, cm.isConnecting, "isConnecting should be false initially")
|
||||
}
|
||||
|
||||
// TestConnectionManager_StateTransitions tests basic state transitions
|
||||
func TestConnectionManager_StateTransitions(t *testing.T) {
|
||||
agent := createTestAgent(t)
|
||||
cm := agent.connectionManager
|
||||
initialState := cm.State
|
||||
cm.wsClient = &WebSocketClient{
|
||||
hubURL: &url.URL{
|
||||
Host: "localhost:8080",
|
||||
},
|
||||
}
|
||||
assert.NotNil(t, cm, "Connection manager should not be nil")
|
||||
assert.Equal(t, Disconnected, initialState, "Initial state should be Disconnected")
|
||||
|
||||
// Test state transitions
|
||||
cm.handleStateChange(WebSocketConnected)
|
||||
assert.Equal(t, WebSocketConnected, cm.State, "State should change to WebSocketConnected")
|
||||
|
||||
cm.handleStateChange(SSHConnected)
|
||||
assert.Equal(t, SSHConnected, cm.State, "State should change to SSHConnected")
|
||||
|
||||
cm.handleStateChange(Disconnected)
|
||||
assert.Equal(t, Disconnected, cm.State, "State should change to Disconnected")
|
||||
|
||||
// Test that same state doesn't trigger changes
|
||||
cm.State = WebSocketConnected
|
||||
cm.handleStateChange(WebSocketConnected)
|
||||
assert.Equal(t, WebSocketConnected, cm.State, "Same state should not trigger change")
|
||||
}
|
||||
|
||||
// TestConnectionManager_EventHandling tests event handling logic
|
||||
func TestConnectionManager_EventHandling(t *testing.T) {
|
||||
agent := createTestAgent(t)
|
||||
cm := agent.connectionManager
|
||||
cm.wsClient = &WebSocketClient{
|
||||
hubURL: &url.URL{
|
||||
Host: "localhost:8080",
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
initialState ConnectionState
|
||||
event ConnectionEvent
|
||||
expectedState ConnectionState
|
||||
}{
|
||||
{
|
||||
name: "WebSocket connect from disconnected",
|
||||
initialState: Disconnected,
|
||||
event: WebSocketConnect,
|
||||
expectedState: WebSocketConnected,
|
||||
},
|
||||
{
|
||||
name: "SSH connect from disconnected",
|
||||
initialState: Disconnected,
|
||||
event: SSHConnect,
|
||||
expectedState: SSHConnected,
|
||||
},
|
||||
{
|
||||
name: "WebSocket disconnect from connected",
|
||||
initialState: WebSocketConnected,
|
||||
event: WebSocketDisconnect,
|
||||
expectedState: Disconnected,
|
||||
},
|
||||
{
|
||||
name: "SSH disconnect from connected",
|
||||
initialState: SSHConnected,
|
||||
event: SSHDisconnect,
|
||||
expectedState: Disconnected,
|
||||
},
|
||||
{
|
||||
name: "WebSocket disconnect from SSH connected (no change)",
|
||||
initialState: SSHConnected,
|
||||
event: WebSocketDisconnect,
|
||||
expectedState: SSHConnected,
|
||||
},
|
||||
{
|
||||
name: "SSH disconnect from WebSocket connected (no change)",
|
||||
initialState: WebSocketConnected,
|
||||
event: SSHDisconnect,
|
||||
expectedState: WebSocketConnected,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cm.State = tc.initialState
|
||||
cm.handleEvent(tc.event)
|
||||
assert.Equal(t, tc.expectedState, cm.State, "State should match expected after event")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestConnectionManager_TickerManagement tests WebSocket ticker management
|
||||
func TestConnectionManager_TickerManagement(t *testing.T) {
|
||||
agent := createTestAgent(t)
|
||||
cm := agent.connectionManager
|
||||
|
||||
// Test starting ticker
|
||||
cm.startWsTicker()
|
||||
assert.NotNil(t, cm.wsTicker, "Ticker should be created")
|
||||
|
||||
// Test stopping ticker (should not panic)
|
||||
assert.NotPanics(t, func() {
|
||||
cm.stopWsTicker()
|
||||
}, "Stopping ticker should not panic")
|
||||
|
||||
// Test stopping nil ticker (should not panic)
|
||||
cm.wsTicker = nil
|
||||
assert.NotPanics(t, func() {
|
||||
cm.stopWsTicker()
|
||||
}, "Stopping nil ticker should not panic")
|
||||
|
||||
// Test restarting ticker
|
||||
cm.startWsTicker()
|
||||
assert.NotNil(t, cm.wsTicker, "Ticker should be recreated")
|
||||
|
||||
// Test resetting existing ticker
|
||||
firstTicker := cm.wsTicker
|
||||
cm.startWsTicker()
|
||||
assert.Equal(t, firstTicker, cm.wsTicker, "Same ticker instance should be reused")
|
||||
|
||||
cm.stopWsTicker()
|
||||
}
|
||||
|
||||
// TestConnectionManager_WebSocketConnectionFlow tests WebSocket connection logic
|
||||
func TestConnectionManager_WebSocketConnectionFlow(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping WebSocket connection test in short mode")
|
||||
}
|
||||
|
||||
agent := createTestAgent(t)
|
||||
cm := agent.connectionManager
|
||||
|
||||
// Test WebSocket connection without proper environment
|
||||
err := cm.startWebSocketConnection()
|
||||
assert.Error(t, err, "WebSocket connection should fail without proper environment")
|
||||
assert.Equal(t, Disconnected, cm.State, "State should remain Disconnected after failed connection")
|
||||
|
||||
// Test with invalid URL
|
||||
os.Setenv("BESZEL_AGENT_HUB_URL", "invalid-url")
|
||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||
defer func() {
|
||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
||||
}()
|
||||
|
||||
// Test with missing token
|
||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
||||
|
||||
_, err2 := newWebSocketClient(agent)
|
||||
assert.Error(t, err2, "WebSocket client creation should fail without token")
|
||||
}
|
||||
|
||||
// TestConnectionManager_ReconnectionLogic tests reconnection prevention logic
|
||||
func TestConnectionManager_ReconnectionLogic(t *testing.T) {
|
||||
agent := createTestAgent(t)
|
||||
cm := agent.connectionManager
|
||||
cm.eventChan = make(chan ConnectionEvent, 1)
|
||||
|
||||
// Test that isConnecting flag prevents duplicate reconnection attempts
|
||||
// Start from connected state, then simulate disconnect
|
||||
cm.State = WebSocketConnected
|
||||
cm.isConnecting = false
|
||||
|
||||
// First disconnect should trigger reconnection logic
|
||||
cm.handleStateChange(Disconnected)
|
||||
assert.Equal(t, Disconnected, cm.State, "Should change to disconnected")
|
||||
assert.True(t, cm.isConnecting, "Should set isConnecting flag")
|
||||
}
|
||||
|
||||
// TestConnectionManager_ConnectWithRateLimit tests connection rate limiting
|
||||
func TestConnectionManager_ConnectWithRateLimit(t *testing.T) {
|
||||
agent := createTestAgent(t)
|
||||
cm := agent.connectionManager
|
||||
|
||||
// Set up environment for WebSocket client creation
|
||||
os.Setenv("BESZEL_AGENT_HUB_URL", "ws://localhost:8080")
|
||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||
defer func() {
|
||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
||||
}()
|
||||
|
||||
// Create WebSocket client
|
||||
wsClient, err := newWebSocketClient(agent)
|
||||
require.NoError(t, err)
|
||||
cm.wsClient = wsClient
|
||||
|
||||
// Set recent connection attempt
|
||||
cm.wsClient.lastConnectAttempt = time.Now()
|
||||
|
||||
// Test that connection is rate limited
|
||||
err = cm.startWebSocketConnection()
|
||||
assert.Error(t, err, "Should error due to rate limiting")
|
||||
assert.Contains(t, err.Error(), "already connecting", "Error should indicate rate limiting")
|
||||
|
||||
// Test connection after rate limit expires
|
||||
cm.wsClient.lastConnectAttempt = time.Now().Add(-10 * time.Second)
|
||||
err = cm.startWebSocketConnection()
|
||||
// This will fail due to no actual server, but should not be rate limited
|
||||
assert.Error(t, err, "Connection should fail but not due to rate limiting")
|
||||
assert.NotContains(t, err.Error(), "already connecting", "Error should not indicate rate limiting")
|
||||
}
|
||||
|
||||
// TestConnectionManager_StartWithInvalidConfig tests starting with invalid configuration
|
||||
func TestConnectionManager_StartWithInvalidConfig(t *testing.T) {
|
||||
agent := createTestAgent(t)
|
||||
cm := agent.connectionManager
|
||||
serverOptions := createTestServerOptions(t)
|
||||
|
||||
// Test starting when already started
|
||||
cm.eventChan = make(chan ConnectionEvent, 5)
|
||||
err := cm.Start(serverOptions)
|
||||
assert.Error(t, err, "Should error when starting already started connection manager")
|
||||
}
|
||||
|
||||
// TestConnectionManager_CloseWebSocket tests WebSocket closing
|
||||
func TestConnectionManager_CloseWebSocket(t *testing.T) {
|
||||
agent := createTestAgent(t)
|
||||
cm := agent.connectionManager
|
||||
|
||||
// Test closing when no WebSocket client exists
|
||||
assert.NotPanics(t, func() {
|
||||
cm.closeWebSocket()
|
||||
}, "Should not panic when closing nil WebSocket client")
|
||||
|
||||
// Set up environment and create WebSocket client
|
||||
os.Setenv("BESZEL_AGENT_HUB_URL", "ws://localhost:8080")
|
||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||
defer func() {
|
||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
||||
}()
|
||||
|
||||
wsClient, err := newWebSocketClient(agent)
|
||||
require.NoError(t, err)
|
||||
cm.wsClient = wsClient
|
||||
|
||||
// Test closing when WebSocket client exists
|
||||
assert.NotPanics(t, func() {
|
||||
cm.closeWebSocket()
|
||||
}, "Should not panic when closing WebSocket client")
|
||||
}
|
||||
|
||||
// TestConnectionManager_ConnectFlow tests the connect method
|
||||
func TestConnectionManager_ConnectFlow(t *testing.T) {
|
||||
agent := createTestAgent(t)
|
||||
cm := agent.connectionManager
|
||||
|
||||
// Test connect without WebSocket client
|
||||
assert.NotPanics(t, func() {
|
||||
cm.connect()
|
||||
}, "Connect should not panic without WebSocket client")
|
||||
}
|
||||
134
agent/cpu.go
Normal file
134
agent/cpu.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"math"
|
||||
"runtime"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/shirou/gopsutil/v4/cpu"
|
||||
)
|
||||
|
||||
var lastCpuTimes = make(map[uint16]cpu.TimesStat)
|
||||
var lastPerCoreCpuTimes = make(map[uint16][]cpu.TimesStat)
|
||||
|
||||
// init initializes the CPU monitoring by storing the initial CPU times
|
||||
// for the default 60-second cache interval.
|
||||
func init() {
|
||||
if times, err := cpu.Times(false); err == nil {
|
||||
lastCpuTimes[60000] = times[0]
|
||||
}
|
||||
if perCoreTimes, err := cpu.Times(true); err == nil {
|
||||
lastPerCoreCpuTimes[60000] = perCoreTimes
|
||||
}
|
||||
}
|
||||
|
||||
// CpuMetrics contains detailed CPU usage breakdown
|
||||
type CpuMetrics struct {
|
||||
Total float64
|
||||
User float64
|
||||
System float64
|
||||
Iowait float64
|
||||
Steal float64
|
||||
Idle float64
|
||||
}
|
||||
|
||||
// getCpuMetrics calculates detailed CPU usage metrics using cached previous measurements.
|
||||
// It returns percentages for total, user, system, iowait, and steal time.
|
||||
func getCpuMetrics(cacheTimeMs uint16) (CpuMetrics, error) {
|
||||
times, err := cpu.Times(false)
|
||||
if err != nil || len(times) == 0 {
|
||||
return CpuMetrics{}, err
|
||||
}
|
||||
// if cacheTimeMs is not in lastCpuTimes, use 60000 as fallback lastCpuTime
|
||||
if _, ok := lastCpuTimes[cacheTimeMs]; !ok {
|
||||
lastCpuTimes[cacheTimeMs] = lastCpuTimes[60000]
|
||||
}
|
||||
|
||||
t1 := lastCpuTimes[cacheTimeMs]
|
||||
t2 := times[0]
|
||||
|
||||
t1All, _ := getAllBusy(t1)
|
||||
t2All, _ := getAllBusy(t2)
|
||||
|
||||
totalDelta := t2All - t1All
|
||||
if totalDelta <= 0 {
|
||||
return CpuMetrics{}, nil
|
||||
}
|
||||
|
||||
metrics := CpuMetrics{
|
||||
Total: calculateBusy(t1, t2),
|
||||
User: clampPercent((t2.User - t1.User) / totalDelta * 100),
|
||||
System: clampPercent((t2.System - t1.System) / totalDelta * 100),
|
||||
Iowait: clampPercent((t2.Iowait - t1.Iowait) / totalDelta * 100),
|
||||
Steal: clampPercent((t2.Steal - t1.Steal) / totalDelta * 100),
|
||||
Idle: clampPercent((t2.Idle - t1.Idle) / totalDelta * 100),
|
||||
}
|
||||
|
||||
lastCpuTimes[cacheTimeMs] = times[0]
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
// clampPercent ensures the percentage is between 0 and 100
|
||||
func clampPercent(value float64) float64 {
|
||||
return math.Min(100, math.Max(0, value))
|
||||
}
|
||||
|
||||
// getPerCoreCpuUsage calculates per-core CPU busy usage as integer percentages (0-100).
|
||||
// It uses cached previous measurements for the provided cache interval.
|
||||
func getPerCoreCpuUsage(cacheTimeMs uint16) (system.Uint8Slice, error) {
|
||||
perCoreTimes, err := cpu.Times(true)
|
||||
if err != nil || len(perCoreTimes) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize cache if needed
|
||||
if _, ok := lastPerCoreCpuTimes[cacheTimeMs]; !ok {
|
||||
lastPerCoreCpuTimes[cacheTimeMs] = lastPerCoreCpuTimes[60000]
|
||||
}
|
||||
|
||||
lastTimes := lastPerCoreCpuTimes[cacheTimeMs]
|
||||
|
||||
// Limit to the number of cores available in both samples
|
||||
length := len(perCoreTimes)
|
||||
if len(lastTimes) < length {
|
||||
length = len(lastTimes)
|
||||
}
|
||||
|
||||
usage := make([]uint8, length)
|
||||
for i := 0; i < length; i++ {
|
||||
t1 := lastTimes[i]
|
||||
t2 := perCoreTimes[i]
|
||||
usage[i] = uint8(math.Round(calculateBusy(t1, t2)))
|
||||
}
|
||||
|
||||
lastPerCoreCpuTimes[cacheTimeMs] = perCoreTimes
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// calculateBusy calculates the CPU busy percentage between two time points.
|
||||
// It computes the ratio of busy time to total time elapsed between t1 and t2,
|
||||
// returning a percentage clamped between 0 and 100.
|
||||
func calculateBusy(t1, t2 cpu.TimesStat) float64 {
|
||||
t1All, t1Busy := getAllBusy(t1)
|
||||
t2All, t2Busy := getAllBusy(t2)
|
||||
|
||||
if t2All <= t1All || t2Busy <= t1Busy {
|
||||
return 0
|
||||
}
|
||||
return clampPercent((t2Busy - t1Busy) / (t2All - t1All) * 100)
|
||||
}
|
||||
|
||||
// getAllBusy calculates the total CPU time and busy CPU time from CPU times statistics.
|
||||
// On Linux, it excludes guest and guest_nice time from the total to match kernel behavior.
|
||||
// Returns total CPU time and busy CPU time (total minus idle and I/O wait time).
|
||||
func getAllBusy(t cpu.TimesStat) (float64, float64) {
|
||||
tot := t.Total()
|
||||
if runtime.GOOS == "linux" {
|
||||
tot -= t.Guest // Linux 2.6.24+
|
||||
tot -= t.GuestNice // Linux 3.2.0+
|
||||
}
|
||||
|
||||
busy := tot - t.Idle - t.Iowait
|
||||
|
||||
return tot, busy
|
||||
}
|
||||
117
agent/data_dir.go
Normal file
117
agent/data_dir.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// getDataDir returns the path to the data directory for the agent and an error
|
||||
// if the directory is not valid. Attempts to find the optimal data directory if
|
||||
// no data directories are provided.
|
||||
func getDataDir(dataDirs ...string) (string, error) {
|
||||
if len(dataDirs) > 0 {
|
||||
return testDataDirs(dataDirs)
|
||||
}
|
||||
|
||||
dataDir, _ := GetEnv("DATA_DIR")
|
||||
if dataDir != "" {
|
||||
dataDirs = append(dataDirs, dataDir)
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
dataDirs = append(dataDirs,
|
||||
filepath.Join(os.Getenv("APPDATA"), "beszel-agent"),
|
||||
filepath.Join(os.Getenv("LOCALAPPDATA"), "beszel-agent"),
|
||||
)
|
||||
} else {
|
||||
dataDirs = append(dataDirs, "/var/lib/beszel-agent")
|
||||
if homeDir, err := os.UserHomeDir(); err == nil {
|
||||
dataDirs = append(dataDirs, filepath.Join(homeDir, ".config", "beszel"))
|
||||
}
|
||||
}
|
||||
return testDataDirs(dataDirs)
|
||||
}
|
||||
|
||||
func testDataDirs(paths []string) (string, error) {
|
||||
// first check if the directory exists and is writable
|
||||
for _, path := range paths {
|
||||
if valid, _ := isValidDataDir(path, false); valid {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
// if the directory doesn't exist, try to create it
|
||||
for _, path := range paths {
|
||||
exists, _ := directoryExists(path)
|
||||
if exists {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the created directory is actually writable
|
||||
writable, _ := directoryIsWritable(path)
|
||||
if !writable {
|
||||
continue
|
||||
}
|
||||
|
||||
return path, nil
|
||||
}
|
||||
|
||||
return "", errors.New("data directory not found")
|
||||
}
|
||||
|
||||
func isValidDataDir(path string, createIfNotExists bool) (bool, error) {
|
||||
exists, err := directoryExists(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
if !createIfNotExists {
|
||||
return false, nil
|
||||
}
|
||||
if err = os.MkdirAll(path, 0755); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// Always check if the directory is writable
|
||||
writable, err := directoryIsWritable(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return writable, nil
|
||||
}
|
||||
|
||||
// directoryExists checks if a directory exists
|
||||
func directoryExists(path string) (bool, error) {
|
||||
// Check if directory exists
|
||||
stat, err := os.Stat(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
if !stat.IsDir() {
|
||||
return false, fmt.Errorf("%s is not a directory", path)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// directoryIsWritable tests if a directory is writable by creating and removing a temporary file
|
||||
func directoryIsWritable(path string) (bool, error) {
|
||||
testFile := filepath.Join(path, ".write-test")
|
||||
file, err := os.Create(testFile)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer file.Close()
|
||||
defer os.Remove(testFile)
|
||||
return true, nil
|
||||
}
|
||||
263
agent/data_dir_test.go
Normal file
263
agent/data_dir_test.go
Normal file
@@ -0,0 +1,263 @@
|
||||
//go:build testing
|
||||
// +build testing
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetDataDir(t *testing.T) {
|
||||
// Test with explicit dataDir parameter
|
||||
t.Run("explicit data dir", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
result, err := getDataDir(tempDir)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tempDir, result)
|
||||
})
|
||||
|
||||
// Test with explicit non-existent dataDir that can be created
|
||||
t.Run("explicit data dir - create new", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
newDir := filepath.Join(tempDir, "new-data-dir")
|
||||
result, err := getDataDir(newDir)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, newDir, result)
|
||||
|
||||
// Verify directory was created
|
||||
stat, err := os.Stat(newDir)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, stat.IsDir())
|
||||
})
|
||||
|
||||
// Test with DATA_DIR environment variable
|
||||
t.Run("DATA_DIR environment variable", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
|
||||
// Set environment variable
|
||||
oldValue := os.Getenv("DATA_DIR")
|
||||
defer func() {
|
||||
if oldValue == "" {
|
||||
os.Unsetenv("BESZEL_AGENT_DATA_DIR")
|
||||
} else {
|
||||
os.Setenv("BESZEL_AGENT_DATA_DIR", oldValue)
|
||||
}
|
||||
}()
|
||||
|
||||
os.Setenv("BESZEL_AGENT_DATA_DIR", tempDir)
|
||||
|
||||
result, err := getDataDir()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tempDir, result)
|
||||
})
|
||||
|
||||
// Test with invalid explicit dataDir
|
||||
t.Run("invalid explicit data dir", func(t *testing.T) {
|
||||
invalidPath := "/invalid/path/that/cannot/be/created"
|
||||
_, err := getDataDir(invalidPath)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
// Test fallback behavior (empty dataDir, no env var)
|
||||
t.Run("fallback to default directories", func(t *testing.T) {
|
||||
// Clear DATA_DIR environment variable
|
||||
oldValue := os.Getenv("DATA_DIR")
|
||||
defer func() {
|
||||
if oldValue == "" {
|
||||
os.Unsetenv("DATA_DIR")
|
||||
} else {
|
||||
os.Setenv("DATA_DIR", oldValue)
|
||||
}
|
||||
}()
|
||||
os.Unsetenv("DATA_DIR")
|
||||
|
||||
// This will try platform-specific defaults, which may or may not work
|
||||
// We're mainly testing that it doesn't panic and returns some result
|
||||
result, err := getDataDir()
|
||||
// We don't assert success/failure here since it depends on system permissions
|
||||
// Just verify we get a string result if no error
|
||||
if err == nil {
|
||||
assert.NotEmpty(t, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestTestDataDirs(t *testing.T) {
|
||||
// Test with existing valid directory
|
||||
t.Run("existing valid directory", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
result, err := testDataDirs([]string{tempDir})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tempDir, result)
|
||||
})
|
||||
|
||||
// Test with multiple directories, first one valid
|
||||
t.Run("multiple dirs - first valid", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
invalidDir := "/invalid/path"
|
||||
result, err := testDataDirs([]string{tempDir, invalidDir})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tempDir, result)
|
||||
})
|
||||
|
||||
// Test with multiple directories, second one valid
|
||||
t.Run("multiple dirs - second valid", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
invalidDir := "/invalid/path"
|
||||
result, err := testDataDirs([]string{invalidDir, tempDir})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tempDir, result)
|
||||
})
|
||||
|
||||
// Test with non-existing directory that can be created
|
||||
t.Run("create new directory", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
newDir := filepath.Join(tempDir, "new-dir")
|
||||
result, err := testDataDirs([]string{newDir})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, newDir, result)
|
||||
|
||||
// Verify directory was created
|
||||
stat, err := os.Stat(newDir)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, stat.IsDir())
|
||||
})
|
||||
|
||||
// Test with no valid directories
|
||||
t.Run("no valid directories", func(t *testing.T) {
|
||||
invalidPaths := []string{"/invalid/path1", "/invalid/path2"}
|
||||
_, err := testDataDirs(invalidPaths)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "data directory not found")
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsValidDataDir(t *testing.T) {
|
||||
// Test with existing directory
|
||||
t.Run("existing directory", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
valid, err := isValidDataDir(tempDir, false)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, valid)
|
||||
})
|
||||
|
||||
// Test with non-existing directory, createIfNotExists=false
|
||||
t.Run("non-existing dir - no create", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
nonExistentDir := filepath.Join(tempDir, "does-not-exist")
|
||||
valid, err := isValidDataDir(nonExistentDir, false)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, valid)
|
||||
})
|
||||
|
||||
// Test with non-existing directory, createIfNotExists=true
|
||||
t.Run("non-existing dir - create", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
newDir := filepath.Join(tempDir, "new-dir")
|
||||
valid, err := isValidDataDir(newDir, true)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, valid)
|
||||
|
||||
// Verify directory was created
|
||||
stat, err := os.Stat(newDir)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, stat.IsDir())
|
||||
})
|
||||
|
||||
// Test with file instead of directory
|
||||
t.Run("file instead of directory", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
tempFile := filepath.Join(tempDir, "testfile")
|
||||
err := os.WriteFile(tempFile, []byte("test"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
valid, err := isValidDataDir(tempFile, false)
|
||||
assert.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "is not a directory")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDirectoryExists(t *testing.T) {
|
||||
// Test with existing directory
|
||||
t.Run("existing directory", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
exists, err := directoryExists(tempDir)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, exists)
|
||||
})
|
||||
|
||||
// Test with non-existing directory
|
||||
t.Run("non-existing directory", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
nonExistentDir := filepath.Join(tempDir, "does-not-exist")
|
||||
exists, err := directoryExists(nonExistentDir)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, exists)
|
||||
})
|
||||
|
||||
// Test with file instead of directory
|
||||
t.Run("file instead of directory", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
tempFile := filepath.Join(tempDir, "testfile")
|
||||
err := os.WriteFile(tempFile, []byte("test"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
exists, err := directoryExists(tempFile)
|
||||
assert.Error(t, err)
|
||||
assert.False(t, exists)
|
||||
assert.Contains(t, err.Error(), "is not a directory")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDirectoryIsWritable(t *testing.T) {
|
||||
// Test with writable directory
|
||||
t.Run("writable directory", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
writable, err := directoryIsWritable(tempDir)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, writable)
|
||||
})
|
||||
|
||||
// Test with non-existing directory
|
||||
t.Run("non-existing directory", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
nonExistentDir := filepath.Join(tempDir, "does-not-exist")
|
||||
writable, err := directoryIsWritable(nonExistentDir)
|
||||
assert.Error(t, err)
|
||||
assert.False(t, writable)
|
||||
})
|
||||
|
||||
// Test with non-writable directory (Unix-like systems only)
|
||||
t.Run("non-writable directory", func(t *testing.T) {
|
||||
if runtime.GOOS != "linux" && runtime.GOOS != "darwin" {
|
||||
t.Skip("Skipping non-writable directory test on", runtime.GOOS)
|
||||
}
|
||||
|
||||
tempDir := t.TempDir()
|
||||
readOnlyDir := filepath.Join(tempDir, "readonly")
|
||||
|
||||
// Create the directory
|
||||
err := os.Mkdir(readOnlyDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make it read-only
|
||||
err = os.Chmod(readOnlyDir, 0444)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Restore permissions after test for cleanup
|
||||
defer func() {
|
||||
os.Chmod(readOnlyDir, 0755)
|
||||
}()
|
||||
|
||||
writable, err := directoryIsWritable(readOnlyDir)
|
||||
assert.Error(t, err)
|
||||
assert.False(t, writable)
|
||||
})
|
||||
}
|
||||
100
agent/deltatracker/deltatracker.go
Normal file
100
agent/deltatracker/deltatracker.go
Normal file
@@ -0,0 +1,100 @@
|
||||
// Package deltatracker provides a tracker for calculating differences in numeric values over time.
|
||||
package deltatracker
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
// Numeric is a constraint that permits any integer or floating-point type.
|
||||
type Numeric interface {
|
||||
constraints.Integer | constraints.Float
|
||||
}
|
||||
|
||||
// DeltaTracker is a generic, thread-safe tracker for calculating differences
|
||||
// in numeric values over time.
|
||||
// K is the key type (e.g., int, string).
|
||||
// V is the value type (e.g., int, int64, float32, float64).
|
||||
type DeltaTracker[K comparable, V Numeric] struct {
|
||||
sync.RWMutex
|
||||
current map[K]V
|
||||
previous map[K]V
|
||||
}
|
||||
|
||||
// NewDeltaTracker creates a new generic tracker.
|
||||
func NewDeltaTracker[K comparable, V Numeric]() *DeltaTracker[K, V] {
|
||||
return &DeltaTracker[K, V]{
|
||||
current: make(map[K]V),
|
||||
previous: make(map[K]V),
|
||||
}
|
||||
}
|
||||
|
||||
// Set records the current value for a given ID.
|
||||
func (t *DeltaTracker[K, V]) Set(id K, value V) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.current[id] = value
|
||||
}
|
||||
|
||||
// Snapshot returns a copy of the current map.
|
||||
// func (t *DeltaTracker[K, V]) Snapshot() map[K]V {
|
||||
// t.RLock()
|
||||
// defer t.RUnlock()
|
||||
|
||||
// copyMap := make(map[K]V, len(t.current))
|
||||
// maps.Copy(copyMap, t.current)
|
||||
// return copyMap
|
||||
// }
|
||||
|
||||
// Deltas returns a map of all calculated deltas for the current interval.
|
||||
func (t *DeltaTracker[K, V]) Deltas() map[K]V {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
|
||||
deltas := make(map[K]V)
|
||||
for id, currentVal := range t.current {
|
||||
if previousVal, ok := t.previous[id]; ok {
|
||||
deltas[id] = currentVal - previousVal
|
||||
} else {
|
||||
deltas[id] = 0
|
||||
}
|
||||
}
|
||||
return deltas
|
||||
}
|
||||
|
||||
// Previous returns the previously recorded value for the given key, if it exists.
|
||||
func (t *DeltaTracker[K, V]) Previous(id K) (V, bool) {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
|
||||
value, ok := t.previous[id]
|
||||
return value, ok
|
||||
}
|
||||
|
||||
// Delta returns the delta for a single key.
|
||||
// Returns 0 if the key doesn't exist or has no previous value.
|
||||
func (t *DeltaTracker[K, V]) Delta(id K) V {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
|
||||
currentVal, currentOk := t.current[id]
|
||||
if !currentOk {
|
||||
return 0
|
||||
}
|
||||
|
||||
previousVal, previousOk := t.previous[id]
|
||||
if !previousOk {
|
||||
return 0
|
||||
}
|
||||
|
||||
return currentVal - previousVal
|
||||
}
|
||||
|
||||
// Cycle prepares the tracker for the next interval.
|
||||
func (t *DeltaTracker[K, V]) Cycle() {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.previous = t.current
|
||||
t.current = make(map[K]V)
|
||||
}
|
||||
217
agent/deltatracker/deltatracker_test.go
Normal file
217
agent/deltatracker/deltatracker_test.go
Normal file
@@ -0,0 +1,217 @@
|
||||
package deltatracker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func ExampleDeltaTracker() {
|
||||
tracker := NewDeltaTracker[string, int]()
|
||||
tracker.Set("key1", 10)
|
||||
tracker.Set("key2", 20)
|
||||
tracker.Cycle()
|
||||
tracker.Set("key1", 15)
|
||||
tracker.Set("key2", 30)
|
||||
fmt.Println(tracker.Delta("key1"))
|
||||
fmt.Println(tracker.Delta("key2"))
|
||||
fmt.Println(tracker.Deltas())
|
||||
// Output: 5
|
||||
// 10
|
||||
// map[key1:5 key2:10]
|
||||
}
|
||||
|
||||
func TestNewDeltaTracker(t *testing.T) {
|
||||
tracker := NewDeltaTracker[string, int]()
|
||||
assert.NotNil(t, tracker)
|
||||
assert.Empty(t, tracker.current)
|
||||
assert.Empty(t, tracker.previous)
|
||||
}
|
||||
|
||||
func TestSet(t *testing.T) {
|
||||
tracker := NewDeltaTracker[string, int]()
|
||||
tracker.Set("key1", 10)
|
||||
|
||||
tracker.RLock()
|
||||
defer tracker.RUnlock()
|
||||
|
||||
assert.Equal(t, 10, tracker.current["key1"])
|
||||
}
|
||||
|
||||
func TestDeltas(t *testing.T) {
|
||||
tracker := NewDeltaTracker[string, int]()
|
||||
|
||||
// Test with no previous values
|
||||
tracker.Set("key1", 10)
|
||||
tracker.Set("key2", 20)
|
||||
|
||||
deltas := tracker.Deltas()
|
||||
assert.Equal(t, 0, deltas["key1"])
|
||||
assert.Equal(t, 0, deltas["key2"])
|
||||
|
||||
// Cycle to move current to previous
|
||||
tracker.Cycle()
|
||||
|
||||
// Set new values and check deltas
|
||||
tracker.Set("key1", 15) // Delta should be 5 (15-10)
|
||||
tracker.Set("key2", 25) // Delta should be 5 (25-20)
|
||||
tracker.Set("key3", 30) // New key, delta should be 0
|
||||
|
||||
deltas = tracker.Deltas()
|
||||
assert.Equal(t, 5, deltas["key1"])
|
||||
assert.Equal(t, 5, deltas["key2"])
|
||||
assert.Equal(t, 0, deltas["key3"])
|
||||
}
|
||||
|
||||
func TestCycle(t *testing.T) {
|
||||
tracker := NewDeltaTracker[string, int]()
|
||||
|
||||
tracker.Set("key1", 10)
|
||||
tracker.Set("key2", 20)
|
||||
|
||||
// Verify current has values
|
||||
tracker.RLock()
|
||||
assert.Equal(t, 10, tracker.current["key1"])
|
||||
assert.Equal(t, 20, tracker.current["key2"])
|
||||
assert.Empty(t, tracker.previous)
|
||||
tracker.RUnlock()
|
||||
|
||||
tracker.Cycle()
|
||||
|
||||
// After cycle, previous should have the old current values
|
||||
// and current should be empty
|
||||
tracker.RLock()
|
||||
assert.Empty(t, tracker.current)
|
||||
assert.Equal(t, 10, tracker.previous["key1"])
|
||||
assert.Equal(t, 20, tracker.previous["key2"])
|
||||
tracker.RUnlock()
|
||||
}
|
||||
|
||||
func TestCompleteWorkflow(t *testing.T) {
|
||||
tracker := NewDeltaTracker[string, int]()
|
||||
|
||||
// First interval
|
||||
tracker.Set("server1", 100)
|
||||
tracker.Set("server2", 200)
|
||||
|
||||
// Get deltas for first interval (should be zero)
|
||||
firstDeltas := tracker.Deltas()
|
||||
assert.Equal(t, 0, firstDeltas["server1"])
|
||||
assert.Equal(t, 0, firstDeltas["server2"])
|
||||
|
||||
// Cycle to next interval
|
||||
tracker.Cycle()
|
||||
|
||||
// Second interval
|
||||
tracker.Set("server1", 150) // Delta: 50
|
||||
tracker.Set("server2", 180) // Delta: -20
|
||||
tracker.Set("server3", 300) // New server, delta: 300
|
||||
|
||||
secondDeltas := tracker.Deltas()
|
||||
assert.Equal(t, 50, secondDeltas["server1"])
|
||||
assert.Equal(t, -20, secondDeltas["server2"])
|
||||
assert.Equal(t, 0, secondDeltas["server3"])
|
||||
}
|
||||
|
||||
func TestDeltaTrackerWithDifferentTypes(t *testing.T) {
|
||||
// Test with int64
|
||||
intTracker := NewDeltaTracker[string, int64]()
|
||||
intTracker.Set("pid1", 1000)
|
||||
intTracker.Cycle()
|
||||
intTracker.Set("pid1", 1200)
|
||||
intDeltas := intTracker.Deltas()
|
||||
assert.Equal(t, int64(200), intDeltas["pid1"])
|
||||
|
||||
// Test with float64
|
||||
floatTracker := NewDeltaTracker[string, float64]()
|
||||
floatTracker.Set("cpu1", 1.5)
|
||||
floatTracker.Cycle()
|
||||
floatTracker.Set("cpu1", 2.7)
|
||||
floatDeltas := floatTracker.Deltas()
|
||||
assert.InDelta(t, 1.2, floatDeltas["cpu1"], 0.0001)
|
||||
|
||||
// Test with int keys
|
||||
pidTracker := NewDeltaTracker[int, int64]()
|
||||
pidTracker.Set(101, 20000)
|
||||
pidTracker.Cycle()
|
||||
pidTracker.Set(101, 22500)
|
||||
pidDeltas := pidTracker.Deltas()
|
||||
assert.Equal(t, int64(2500), pidDeltas[101])
|
||||
}
|
||||
|
||||
func TestDelta(t *testing.T) {
|
||||
tracker := NewDeltaTracker[string, int]()
|
||||
|
||||
// Test getting delta for non-existent key
|
||||
result := tracker.Delta("nonexistent")
|
||||
assert.Equal(t, 0, result)
|
||||
|
||||
// Test getting delta for key with no previous value
|
||||
tracker.Set("key1", 10)
|
||||
result = tracker.Delta("key1")
|
||||
assert.Equal(t, 0, result)
|
||||
|
||||
// Cycle to move current to previous
|
||||
tracker.Cycle()
|
||||
|
||||
// Test getting delta for key with previous value
|
||||
tracker.Set("key1", 15)
|
||||
result = tracker.Delta("key1")
|
||||
assert.Equal(t, 5, result)
|
||||
|
||||
// Test getting delta for key that exists in previous but not current
|
||||
result = tracker.Delta("key1")
|
||||
assert.Equal(t, 5, result) // Should still return 5
|
||||
|
||||
// Test getting delta for key that exists in current but not previous
|
||||
tracker.Set("key2", 20)
|
||||
result = tracker.Delta("key2")
|
||||
assert.Equal(t, 0, result)
|
||||
}
|
||||
|
||||
func TestDeltaWithDifferentTypes(t *testing.T) {
|
||||
// Test with int64
|
||||
intTracker := NewDeltaTracker[string, int64]()
|
||||
intTracker.Set("pid1", 1000)
|
||||
intTracker.Cycle()
|
||||
intTracker.Set("pid1", 1200)
|
||||
result := intTracker.Delta("pid1")
|
||||
assert.Equal(t, int64(200), result)
|
||||
|
||||
// Test with float64
|
||||
floatTracker := NewDeltaTracker[string, float64]()
|
||||
floatTracker.Set("cpu1", 1.5)
|
||||
floatTracker.Cycle()
|
||||
floatTracker.Set("cpu1", 2.7)
|
||||
floatResult := floatTracker.Delta("cpu1")
|
||||
assert.InDelta(t, 1.2, floatResult, 0.0001)
|
||||
|
||||
// Test with int keys
|
||||
pidTracker := NewDeltaTracker[int, int64]()
|
||||
pidTracker.Set(101, 20000)
|
||||
pidTracker.Cycle()
|
||||
pidTracker.Set(101, 22500)
|
||||
pidResult := pidTracker.Delta(101)
|
||||
assert.Equal(t, int64(2500), pidResult)
|
||||
}
|
||||
|
||||
func TestDeltaConcurrentAccess(t *testing.T) {
|
||||
tracker := NewDeltaTracker[string, int]()
|
||||
|
||||
// Set initial values
|
||||
tracker.Set("key1", 10)
|
||||
tracker.Set("key2", 20)
|
||||
tracker.Cycle()
|
||||
|
||||
// Set new values
|
||||
tracker.Set("key1", 15)
|
||||
tracker.Set("key2", 25)
|
||||
|
||||
// Test concurrent access safety
|
||||
result1 := tracker.Delta("key1")
|
||||
result2 := tracker.Delta("key2")
|
||||
|
||||
assert.Equal(t, 5, result1)
|
||||
assert.Equal(t, 5, result2)
|
||||
}
|
||||
314
agent/disk.go
Normal file
314
agent/disk.go
Normal file
@@ -0,0 +1,314 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
|
||||
"github.com/shirou/gopsutil/v4/disk"
|
||||
)
|
||||
|
||||
// parseFilesystemEntry parses a filesystem entry in the format "device__customname"
|
||||
// Returns the device/filesystem part and the custom name part
|
||||
func parseFilesystemEntry(entry string) (device, customName string) {
|
||||
entry = strings.TrimSpace(entry)
|
||||
if parts := strings.SplitN(entry, "__", 2); len(parts) == 2 {
|
||||
device = strings.TrimSpace(parts[0])
|
||||
customName = strings.TrimSpace(parts[1])
|
||||
} else {
|
||||
device = entry
|
||||
}
|
||||
return device, customName
|
||||
}
|
||||
|
||||
// Sets up the filesystems to monitor for disk usage and I/O.
|
||||
func (a *Agent) initializeDiskInfo() {
|
||||
filesystem, _ := GetEnv("FILESYSTEM")
|
||||
efPath := "/extra-filesystems"
|
||||
hasRoot := false
|
||||
isWindows := runtime.GOOS == "windows"
|
||||
|
||||
partitions, err := disk.Partitions(false)
|
||||
if err != nil {
|
||||
slog.Error("Error getting disk partitions", "err", err)
|
||||
}
|
||||
slog.Debug("Disk", "partitions", partitions)
|
||||
|
||||
// trim trailing backslash for Windows devices (#1361)
|
||||
if isWindows {
|
||||
for i, p := range partitions {
|
||||
partitions[i].Device = strings.TrimSuffix(p.Device, "\\")
|
||||
}
|
||||
}
|
||||
|
||||
// ioContext := context.WithValue(a.sensorsContext,
|
||||
// common.EnvKey, common.EnvMap{common.HostProcEnvKey: "/tmp/testproc"},
|
||||
// )
|
||||
// diskIoCounters, err := disk.IOCountersWithContext(ioContext)
|
||||
|
||||
diskIoCounters, err := disk.IOCounters()
|
||||
if err != nil {
|
||||
slog.Error("Error getting diskstats", "err", err)
|
||||
}
|
||||
slog.Debug("Disk I/O", "diskstats", diskIoCounters)
|
||||
|
||||
// Helper function to add a filesystem to fsStats if it doesn't exist
|
||||
addFsStat := func(device, mountpoint string, root bool, customName ...string) {
|
||||
var key string
|
||||
if isWindows {
|
||||
key = device
|
||||
} else {
|
||||
key = filepath.Base(device)
|
||||
}
|
||||
var ioMatch bool
|
||||
if _, exists := a.fsStats[key]; !exists {
|
||||
if root {
|
||||
slog.Info("Detected root device", "name", key)
|
||||
// Check if root device is in /proc/diskstats, use fallback if not
|
||||
if _, ioMatch = diskIoCounters[key]; !ioMatch {
|
||||
key, ioMatch = findIoDevice(filesystem, diskIoCounters, a.fsStats)
|
||||
if !ioMatch {
|
||||
slog.Info("Using I/O fallback", "device", device, "mountpoint", mountpoint, "fallback", key)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Check if non-root has diskstats and fall back to folder name if not
|
||||
// Scenario: device is encrypted and named luks-2bcb02be-999d-4417-8d18-5c61e660fb6e - not in /proc/diskstats.
|
||||
// However, the device can be specified by mounting folder from luks device at /extra-filesystems/sda1
|
||||
if _, ioMatch = diskIoCounters[key]; !ioMatch {
|
||||
efBase := filepath.Base(mountpoint)
|
||||
if _, ioMatch = diskIoCounters[efBase]; ioMatch {
|
||||
key = efBase
|
||||
}
|
||||
}
|
||||
}
|
||||
fsStats := &system.FsStats{Root: root, Mountpoint: mountpoint}
|
||||
if len(customName) > 0 && customName[0] != "" {
|
||||
fsStats.Name = customName[0]
|
||||
}
|
||||
a.fsStats[key] = fsStats
|
||||
}
|
||||
}
|
||||
|
||||
// Use FILESYSTEM env var to find root filesystem
|
||||
if filesystem != "" {
|
||||
for _, p := range partitions {
|
||||
if strings.HasSuffix(p.Device, filesystem) || p.Mountpoint == filesystem {
|
||||
addFsStat(p.Device, p.Mountpoint, true)
|
||||
hasRoot = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasRoot {
|
||||
slog.Warn("Partition details not found", "filesystem", filesystem)
|
||||
}
|
||||
}
|
||||
|
||||
// Add EXTRA_FILESYSTEMS env var values to fsStats
|
||||
if extraFilesystems, exists := GetEnv("EXTRA_FILESYSTEMS"); exists {
|
||||
for _, fsEntry := range strings.Split(extraFilesystems, ",") {
|
||||
// Parse custom name from format: device__customname
|
||||
fs, customName := parseFilesystemEntry(fsEntry)
|
||||
|
||||
found := false
|
||||
for _, p := range partitions {
|
||||
if strings.HasSuffix(p.Device, fs) || p.Mountpoint == fs {
|
||||
addFsStat(p.Device, p.Mountpoint, false, customName)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// if not in partitions, test if we can get disk usage
|
||||
if !found {
|
||||
if _, err := disk.Usage(fs); err == nil {
|
||||
addFsStat(filepath.Base(fs), fs, false, customName)
|
||||
} else {
|
||||
slog.Error("Invalid filesystem", "name", fs, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process partitions for various mount points
|
||||
for _, p := range partitions {
|
||||
// fmt.Println(p.Device, p.Mountpoint)
|
||||
// Binary root fallback or docker root fallback
|
||||
if !hasRoot && (p.Mountpoint == "/" || (p.Mountpoint == "/etc/hosts" && strings.HasPrefix(p.Device, "/dev"))) {
|
||||
fs, match := findIoDevice(filepath.Base(p.Device), diskIoCounters, a.fsStats)
|
||||
if match {
|
||||
addFsStat(fs, p.Mountpoint, true)
|
||||
hasRoot = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check if device is in /extra-filesystems
|
||||
if strings.HasPrefix(p.Mountpoint, efPath) {
|
||||
device, customName := parseFilesystemEntry(p.Mountpoint)
|
||||
addFsStat(device, p.Mountpoint, false, customName)
|
||||
}
|
||||
}
|
||||
|
||||
// Check all folders in /extra-filesystems and add them if not already present
|
||||
if folders, err := os.ReadDir(efPath); err == nil {
|
||||
existingMountpoints := make(map[string]bool)
|
||||
for _, stats := range a.fsStats {
|
||||
existingMountpoints[stats.Mountpoint] = true
|
||||
}
|
||||
for _, folder := range folders {
|
||||
if folder.IsDir() {
|
||||
mountpoint := filepath.Join(efPath, folder.Name())
|
||||
slog.Debug("/extra-filesystems", "mountpoint", mountpoint)
|
||||
if !existingMountpoints[mountpoint] {
|
||||
device, customName := parseFilesystemEntry(folder.Name())
|
||||
addFsStat(device, mountpoint, false, customName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no root filesystem set, use fallback
|
||||
if !hasRoot {
|
||||
rootDevice, _ := findIoDevice(filepath.Base(filesystem), diskIoCounters, a.fsStats)
|
||||
slog.Info("Root disk", "mountpoint", "/", "io", rootDevice)
|
||||
a.fsStats[rootDevice] = &system.FsStats{Root: true, Mountpoint: "/"}
|
||||
}
|
||||
|
||||
a.initializeDiskIoStats(diskIoCounters)
|
||||
}
|
||||
|
||||
// Returns matching device from /proc/diskstats,
|
||||
// or the device with the most reads if no match is found.
|
||||
// bool is true if a match was found.
|
||||
func findIoDevice(filesystem string, diskIoCounters map[string]disk.IOCountersStat, fsStats map[string]*system.FsStats) (string, bool) {
|
||||
var maxReadBytes uint64
|
||||
maxReadDevice := "/"
|
||||
for _, d := range diskIoCounters {
|
||||
if d.Name == filesystem || (d.Label != "" && d.Label == filesystem) {
|
||||
return d.Name, true
|
||||
}
|
||||
if d.ReadBytes > maxReadBytes {
|
||||
// don't use if device already exists in fsStats
|
||||
if _, exists := fsStats[d.Name]; !exists {
|
||||
maxReadBytes = d.ReadBytes
|
||||
maxReadDevice = d.Name
|
||||
}
|
||||
}
|
||||
}
|
||||
return maxReadDevice, false
|
||||
}
|
||||
|
||||
// Sets start values for disk I/O stats.
|
||||
func (a *Agent) initializeDiskIoStats(diskIoCounters map[string]disk.IOCountersStat) {
|
||||
for device, stats := range a.fsStats {
|
||||
// skip if not in diskIoCounters
|
||||
d, exists := diskIoCounters[device]
|
||||
if !exists {
|
||||
slog.Warn("Device not found in diskstats", "name", device)
|
||||
continue
|
||||
}
|
||||
// populate initial values
|
||||
stats.Time = time.Now()
|
||||
stats.TotalRead = d.ReadBytes
|
||||
stats.TotalWrite = d.WriteBytes
|
||||
// add to list of valid io device names
|
||||
a.fsNames = append(a.fsNames, device)
|
||||
}
|
||||
}
|
||||
|
||||
// Updates disk usage statistics for all monitored filesystems
|
||||
func (a *Agent) updateDiskUsage(systemStats *system.Stats) {
|
||||
// disk usage
|
||||
for _, stats := range a.fsStats {
|
||||
if d, err := disk.Usage(stats.Mountpoint); err == nil {
|
||||
stats.DiskTotal = bytesToGigabytes(d.Total)
|
||||
stats.DiskUsed = bytesToGigabytes(d.Used)
|
||||
if stats.Root {
|
||||
systemStats.DiskTotal = bytesToGigabytes(d.Total)
|
||||
systemStats.DiskUsed = bytesToGigabytes(d.Used)
|
||||
systemStats.DiskPct = twoDecimals(d.UsedPercent)
|
||||
}
|
||||
} else {
|
||||
// reset stats if error (likely unmounted)
|
||||
slog.Error("Error getting disk stats", "name", stats.Mountpoint, "err", err)
|
||||
stats.DiskTotal = 0
|
||||
stats.DiskUsed = 0
|
||||
stats.TotalRead = 0
|
||||
stats.TotalWrite = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Updates disk I/O statistics for all monitored filesystems
|
||||
func (a *Agent) updateDiskIo(cacheTimeMs uint16, systemStats *system.Stats) {
|
||||
// disk i/o (cache-aware per interval)
|
||||
if ioCounters, err := disk.IOCounters(a.fsNames...); err == nil {
|
||||
// Ensure map for this interval exists
|
||||
if _, ok := a.diskPrev[cacheTimeMs]; !ok {
|
||||
a.diskPrev[cacheTimeMs] = make(map[string]prevDisk)
|
||||
}
|
||||
now := time.Now()
|
||||
for name, d := range ioCounters {
|
||||
stats := a.fsStats[d.Name]
|
||||
if stats == nil {
|
||||
// skip devices not tracked
|
||||
continue
|
||||
}
|
||||
|
||||
// Previous snapshot for this interval and device
|
||||
prev, hasPrev := a.diskPrev[cacheTimeMs][name]
|
||||
if !hasPrev {
|
||||
// Seed from agent-level fsStats if present, else seed from current
|
||||
prev = prevDisk{readBytes: stats.TotalRead, writeBytes: stats.TotalWrite, at: stats.Time}
|
||||
if prev.at.IsZero() {
|
||||
prev = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
||||
}
|
||||
}
|
||||
|
||||
msElapsed := uint64(now.Sub(prev.at).Milliseconds())
|
||||
if msElapsed < 100 {
|
||||
// Avoid division by zero or clock issues; update snapshot and continue
|
||||
a.diskPrev[cacheTimeMs][name] = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
||||
continue
|
||||
}
|
||||
|
||||
diskIORead := (d.ReadBytes - prev.readBytes) * 1000 / msElapsed
|
||||
diskIOWrite := (d.WriteBytes - prev.writeBytes) * 1000 / msElapsed
|
||||
readMbPerSecond := bytesToMegabytes(float64(diskIORead))
|
||||
writeMbPerSecond := bytesToMegabytes(float64(diskIOWrite))
|
||||
|
||||
// validate values
|
||||
if readMbPerSecond > 50_000 || writeMbPerSecond > 50_000 {
|
||||
slog.Warn("Invalid disk I/O. Resetting.", "name", d.Name, "read", readMbPerSecond, "write", writeMbPerSecond)
|
||||
// Reset interval snapshot and seed from current
|
||||
a.diskPrev[cacheTimeMs][name] = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
||||
// also refresh agent baseline to avoid future negatives
|
||||
a.initializeDiskIoStats(ioCounters)
|
||||
continue
|
||||
}
|
||||
|
||||
// Update per-interval snapshot
|
||||
a.diskPrev[cacheTimeMs][name] = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
||||
|
||||
// Update global fsStats baseline for cross-interval correctness
|
||||
stats.Time = now
|
||||
stats.TotalRead = d.ReadBytes
|
||||
stats.TotalWrite = d.WriteBytes
|
||||
stats.DiskReadPs = readMbPerSecond
|
||||
stats.DiskWritePs = writeMbPerSecond
|
||||
stats.DiskReadBytes = diskIORead
|
||||
stats.DiskWriteBytes = diskIOWrite
|
||||
|
||||
if stats.Root {
|
||||
systemStats.DiskReadPs = stats.DiskReadPs
|
||||
systemStats.DiskWritePs = stats.DiskWritePs
|
||||
systemStats.DiskIO[0] = diskIORead
|
||||
systemStats.DiskIO[1] = diskIOWrite
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
235
agent/disk_test.go
Normal file
235
agent/disk_test.go
Normal file
@@ -0,0 +1,235 @@
|
||||
//go:build testing
|
||||
// +build testing
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/shirou/gopsutil/v4/disk"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseFilesystemEntry(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expectedFs string
|
||||
expectedName string
|
||||
}{
|
||||
{
|
||||
name: "simple device name",
|
||||
input: "sda1",
|
||||
expectedFs: "sda1",
|
||||
expectedName: "",
|
||||
},
|
||||
{
|
||||
name: "device with custom name",
|
||||
input: "sda1__my-storage",
|
||||
expectedFs: "sda1",
|
||||
expectedName: "my-storage",
|
||||
},
|
||||
{
|
||||
name: "full device path with custom name",
|
||||
input: "/dev/sdb1__backup-drive",
|
||||
expectedFs: "/dev/sdb1",
|
||||
expectedName: "backup-drive",
|
||||
},
|
||||
{
|
||||
name: "NVMe device with custom name",
|
||||
input: "nvme0n1p2__fast-ssd",
|
||||
expectedFs: "nvme0n1p2",
|
||||
expectedName: "fast-ssd",
|
||||
},
|
||||
{
|
||||
name: "whitespace trimmed",
|
||||
input: " sda2__trimmed-name ",
|
||||
expectedFs: "sda2",
|
||||
expectedName: "trimmed-name",
|
||||
},
|
||||
{
|
||||
name: "empty custom name",
|
||||
input: "sda3__",
|
||||
expectedFs: "sda3",
|
||||
expectedName: "",
|
||||
},
|
||||
{
|
||||
name: "empty device name",
|
||||
input: "__just-custom",
|
||||
expectedFs: "",
|
||||
expectedName: "just-custom",
|
||||
},
|
||||
{
|
||||
name: "multiple underscores in custom name",
|
||||
input: "sda1__my_custom_drive",
|
||||
expectedFs: "sda1",
|
||||
expectedName: "my_custom_drive",
|
||||
},
|
||||
{
|
||||
name: "custom name with spaces",
|
||||
input: "sda1__My Storage Drive",
|
||||
expectedFs: "sda1",
|
||||
expectedName: "My Storage Drive",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fsEntry := strings.TrimSpace(tt.input)
|
||||
var fs, customName string
|
||||
if parts := strings.SplitN(fsEntry, "__", 2); len(parts) == 2 {
|
||||
fs = strings.TrimSpace(parts[0])
|
||||
customName = strings.TrimSpace(parts[1])
|
||||
} else {
|
||||
fs = fsEntry
|
||||
}
|
||||
|
||||
assert.Equal(t, tt.expectedFs, fs)
|
||||
assert.Equal(t, tt.expectedName, customName)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitializeDiskInfoWithCustomNames(t *testing.T) {
|
||||
// Set up environment variables
|
||||
oldEnv := os.Getenv("EXTRA_FILESYSTEMS")
|
||||
defer func() {
|
||||
if oldEnv != "" {
|
||||
os.Setenv("EXTRA_FILESYSTEMS", oldEnv)
|
||||
} else {
|
||||
os.Unsetenv("EXTRA_FILESYSTEMS")
|
||||
}
|
||||
}()
|
||||
|
||||
// Test with custom names
|
||||
os.Setenv("EXTRA_FILESYSTEMS", "sda1__my-storage,/dev/sdb1__backup-drive,nvme0n1p2")
|
||||
|
||||
// Mock disk partitions (we'll just test the parsing logic)
|
||||
// Since the actual disk operations are system-dependent, we'll focus on the parsing
|
||||
testCases := []struct {
|
||||
envValue string
|
||||
expectedFs []string
|
||||
expectedNames map[string]string
|
||||
}{
|
||||
{
|
||||
envValue: "sda1__my-storage,sdb1__backup-drive",
|
||||
expectedFs: []string{"sda1", "sdb1"},
|
||||
expectedNames: map[string]string{
|
||||
"sda1": "my-storage",
|
||||
"sdb1": "backup-drive",
|
||||
},
|
||||
},
|
||||
{
|
||||
envValue: "sda1,nvme0n1p2__fast-ssd",
|
||||
expectedFs: []string{"sda1", "nvme0n1p2"},
|
||||
expectedNames: map[string]string{
|
||||
"nvme0n1p2": "fast-ssd",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run("env_"+tc.envValue, func(t *testing.T) {
|
||||
os.Setenv("EXTRA_FILESYSTEMS", tc.envValue)
|
||||
|
||||
// Create mock partitions that would match our test cases
|
||||
partitions := []disk.PartitionStat{}
|
||||
for _, fs := range tc.expectedFs {
|
||||
if strings.HasPrefix(fs, "/dev/") {
|
||||
partitions = append(partitions, disk.PartitionStat{
|
||||
Device: fs,
|
||||
Mountpoint: fs,
|
||||
})
|
||||
} else {
|
||||
partitions = append(partitions, disk.PartitionStat{
|
||||
Device: "/dev/" + fs,
|
||||
Mountpoint: "/" + fs,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test the parsing logic by calling the relevant part
|
||||
// We'll create a simplified version to test just the parsing
|
||||
extraFilesystems := tc.envValue
|
||||
for _, fsEntry := range strings.Split(extraFilesystems, ",") {
|
||||
// Parse the entry
|
||||
fsEntry = strings.TrimSpace(fsEntry)
|
||||
var fs, customName string
|
||||
if parts := strings.SplitN(fsEntry, "__", 2); len(parts) == 2 {
|
||||
fs = strings.TrimSpace(parts[0])
|
||||
customName = strings.TrimSpace(parts[1])
|
||||
} else {
|
||||
fs = fsEntry
|
||||
}
|
||||
|
||||
// Verify the device is in our expected list
|
||||
assert.Contains(t, tc.expectedFs, fs, "parsed device should be in expected list")
|
||||
|
||||
// Check if custom name should exist
|
||||
if expectedName, exists := tc.expectedNames[fs]; exists {
|
||||
assert.Equal(t, expectedName, customName, "custom name should match expected")
|
||||
} else {
|
||||
assert.Empty(t, customName, "custom name should be empty when not expected")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFsStatsWithCustomNames(t *testing.T) {
|
||||
// Test that FsStats properly stores custom names
|
||||
fsStats := &system.FsStats{
|
||||
Mountpoint: "/mnt/storage",
|
||||
Name: "my-custom-storage",
|
||||
DiskTotal: 100.0,
|
||||
DiskUsed: 50.0,
|
||||
}
|
||||
|
||||
assert.Equal(t, "my-custom-storage", fsStats.Name)
|
||||
assert.Equal(t, "/mnt/storage", fsStats.Mountpoint)
|
||||
assert.Equal(t, 100.0, fsStats.DiskTotal)
|
||||
assert.Equal(t, 50.0, fsStats.DiskUsed)
|
||||
}
|
||||
|
||||
func TestExtraFsKeyGeneration(t *testing.T) {
|
||||
// Test the logic for generating ExtraFs keys with custom names
|
||||
testCases := []struct {
|
||||
name string
|
||||
deviceName string
|
||||
customName string
|
||||
expectedKey string
|
||||
}{
|
||||
{
|
||||
name: "with custom name",
|
||||
deviceName: "sda1",
|
||||
customName: "my-storage",
|
||||
expectedKey: "my-storage",
|
||||
},
|
||||
{
|
||||
name: "without custom name",
|
||||
deviceName: "sda1",
|
||||
customName: "",
|
||||
expectedKey: "sda1",
|
||||
},
|
||||
{
|
||||
name: "empty custom name falls back to device",
|
||||
deviceName: "nvme0n1p2",
|
||||
customName: "",
|
||||
expectedKey: "nvme0n1p2",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Simulate the key generation logic from agent.go
|
||||
key := tc.deviceName
|
||||
if tc.customName != "" {
|
||||
key = tc.customName
|
||||
}
|
||||
assert.Equal(t, tc.expectedKey, key)
|
||||
})
|
||||
}
|
||||
}
|
||||
757
agent/docker.go
Normal file
757
agent/docker.go
Normal file
@@ -0,0 +1,757 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/agent/deltatracker"
|
||||
"github.com/henrygd/beszel/internal/entities/container"
|
||||
|
||||
"github.com/blang/semver"
|
||||
)
|
||||
|
||||
const (
|
||||
// Docker API timeout in milliseconds
|
||||
dockerTimeoutMs = 2100
|
||||
// Maximum realistic network speed (5 GB/s) to detect bad deltas
|
||||
maxNetworkSpeedBps uint64 = 5e9
|
||||
// Maximum conceivable memory usage of a container (100TB) to detect bad memory stats
|
||||
maxMemoryUsage uint64 = 100 * 1024 * 1024 * 1024 * 1024
|
||||
// Number of log lines to request when fetching container logs
|
||||
dockerLogsTail = 200
|
||||
// Maximum size of a single log frame (1MB) to prevent memory exhaustion
|
||||
// A single log line larger than 1MB is likely an error or misconfiguration
|
||||
maxLogFrameSize = 1024 * 1024
|
||||
// Maximum total log content size (5MB) to prevent memory exhaustion
|
||||
// This provides a reasonable limit for network transfer and browser rendering
|
||||
maxTotalLogSize = 5 * 1024 * 1024
|
||||
)
|
||||
|
||||
type dockerManager struct {
|
||||
client *http.Client // Client to query Docker API
|
||||
wg sync.WaitGroup // WaitGroup to wait for all goroutines to finish
|
||||
sem chan struct{} // Semaphore to limit concurrent container requests
|
||||
containerStatsMutex sync.RWMutex // Mutex to prevent concurrent access to containerStatsMap
|
||||
apiContainerList []*container.ApiInfo // List of containers from Docker API
|
||||
containerStatsMap map[string]*container.Stats // Keeps track of container stats
|
||||
validIds map[string]struct{} // Map of valid container ids, used to prune invalid containers from containerStatsMap
|
||||
goodDockerVersion bool // Whether docker version is at least 25.0.0 (one-shot works correctly)
|
||||
isWindows bool // Whether the Docker Engine API is running on Windows
|
||||
buf *bytes.Buffer // Buffer to store and read response bodies
|
||||
decoder *json.Decoder // Reusable JSON decoder that reads from buf
|
||||
apiStats *container.ApiStats // Reusable API stats object
|
||||
excludeContainers []string // Patterns to exclude containers by name
|
||||
|
||||
// Cache-time-aware tracking for CPU stats (similar to cpu.go)
|
||||
// Maps cache time intervals to container-specific CPU usage tracking
|
||||
lastCpuContainer map[uint16]map[string]uint64 // cacheTimeMs -> containerId -> last cpu container usage
|
||||
lastCpuSystem map[uint16]map[string]uint64 // cacheTimeMs -> containerId -> last cpu system usage
|
||||
lastCpuReadTime map[uint16]map[string]time.Time // cacheTimeMs -> containerId -> last read time (Windows)
|
||||
|
||||
// Network delta trackers - one per cache time to avoid interference
|
||||
// cacheTimeMs -> DeltaTracker for network bytes sent/received
|
||||
networkSentTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
||||
networkRecvTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
||||
}
|
||||
|
||||
// userAgentRoundTripper is a custom http.RoundTripper that adds a User-Agent header to all requests
|
||||
type userAgentRoundTripper struct {
|
||||
rt http.RoundTripper
|
||||
userAgent string
|
||||
}
|
||||
|
||||
// RoundTrip implements the http.RoundTripper interface
|
||||
func (u *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
req.Header.Set("User-Agent", u.userAgent)
|
||||
return u.rt.RoundTrip(req)
|
||||
}
|
||||
|
||||
// Add goroutine to the queue
|
||||
func (d *dockerManager) queue() {
|
||||
d.wg.Add(1)
|
||||
if d.goodDockerVersion {
|
||||
d.sem <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove goroutine from the queue
|
||||
func (d *dockerManager) dequeue() {
|
||||
d.wg.Done()
|
||||
if d.goodDockerVersion {
|
||||
<-d.sem
|
||||
}
|
||||
}
|
||||
|
||||
// shouldExcludeContainer checks if a container name matches any exclusion pattern
|
||||
func (dm *dockerManager) shouldExcludeContainer(name string) bool {
|
||||
if len(dm.excludeContainers) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, pattern := range dm.excludeContainers {
|
||||
if match, _ := path.Match(pattern, name); match {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns stats for all running containers with cache-time-aware delta tracking
|
||||
func (dm *dockerManager) getDockerStats(cacheTimeMs uint16) ([]*container.Stats, error) {
|
||||
resp, err := dm.client.Get("http://localhost/containers/json")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dm.apiContainerList = dm.apiContainerList[:0]
|
||||
if err := dm.decode(resp, &dm.apiContainerList); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dm.isWindows = strings.Contains(resp.Header.Get("Server"), "windows")
|
||||
|
||||
containersLength := len(dm.apiContainerList)
|
||||
|
||||
// store valid ids to clean up old container ids from map
|
||||
if dm.validIds == nil {
|
||||
dm.validIds = make(map[string]struct{}, containersLength)
|
||||
} else {
|
||||
clear(dm.validIds)
|
||||
}
|
||||
|
||||
var failedContainers []*container.ApiInfo
|
||||
|
||||
for _, ctr := range dm.apiContainerList {
|
||||
ctr.IdShort = ctr.Id[:12]
|
||||
|
||||
// Skip this container if it matches the exclusion pattern
|
||||
if dm.shouldExcludeContainer(ctr.Names[0][1:]) {
|
||||
slog.Debug("Excluding container", "name", ctr.Names[0][1:])
|
||||
continue
|
||||
}
|
||||
|
||||
dm.validIds[ctr.IdShort] = struct{}{}
|
||||
// check if container is less than 1 minute old (possible restart)
|
||||
// note: can't use Created field because it's not updated on restart
|
||||
if strings.Contains(ctr.Status, "second") {
|
||||
// if so, remove old container data
|
||||
dm.deleteContainerStatsSync(ctr.IdShort)
|
||||
}
|
||||
dm.queue()
|
||||
go func(ctr *container.ApiInfo) {
|
||||
defer dm.dequeue()
|
||||
err := dm.updateContainerStats(ctr, cacheTimeMs)
|
||||
// if error, delete from map and add to failed list to retry
|
||||
if err != nil {
|
||||
dm.containerStatsMutex.Lock()
|
||||
delete(dm.containerStatsMap, ctr.IdShort)
|
||||
failedContainers = append(failedContainers, ctr)
|
||||
dm.containerStatsMutex.Unlock()
|
||||
}
|
||||
}(ctr)
|
||||
}
|
||||
|
||||
dm.wg.Wait()
|
||||
|
||||
// retry failed containers separately so we can run them in parallel (docker 24 bug)
|
||||
if len(failedContainers) > 0 {
|
||||
slog.Debug("Retrying failed containers", "count", len(failedContainers))
|
||||
for i := range failedContainers {
|
||||
ctr := failedContainers[i]
|
||||
dm.queue()
|
||||
go func(ctr *container.ApiInfo) {
|
||||
defer dm.dequeue()
|
||||
if err2 := dm.updateContainerStats(ctr, cacheTimeMs); err2 != nil {
|
||||
slog.Error("Error getting container stats", "err", err2)
|
||||
}
|
||||
}(ctr)
|
||||
}
|
||||
dm.wg.Wait()
|
||||
}
|
||||
|
||||
// populate final stats and remove old / invalid container stats
|
||||
stats := make([]*container.Stats, 0, containersLength)
|
||||
for id, v := range dm.containerStatsMap {
|
||||
if _, exists := dm.validIds[id]; !exists {
|
||||
delete(dm.containerStatsMap, id)
|
||||
} else {
|
||||
stats = append(stats, v)
|
||||
}
|
||||
}
|
||||
|
||||
// prepare network trackers for next interval for this cache time
|
||||
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// initializeCpuTracking initializes CPU tracking maps for a specific cache time interval
|
||||
func (dm *dockerManager) initializeCpuTracking(cacheTimeMs uint16) {
|
||||
// Initialize cache time maps if they don't exist
|
||||
if dm.lastCpuContainer[cacheTimeMs] == nil {
|
||||
dm.lastCpuContainer[cacheTimeMs] = make(map[string]uint64)
|
||||
}
|
||||
if dm.lastCpuSystem[cacheTimeMs] == nil {
|
||||
dm.lastCpuSystem[cacheTimeMs] = make(map[string]uint64)
|
||||
}
|
||||
// Ensure the outer map exists before indexing
|
||||
if dm.lastCpuReadTime == nil {
|
||||
dm.lastCpuReadTime = make(map[uint16]map[string]time.Time)
|
||||
}
|
||||
if dm.lastCpuReadTime[cacheTimeMs] == nil {
|
||||
dm.lastCpuReadTime[cacheTimeMs] = make(map[string]time.Time)
|
||||
}
|
||||
}
|
||||
|
||||
// getCpuPreviousValues returns previous CPU values for a container and cache time interval
|
||||
func (dm *dockerManager) getCpuPreviousValues(cacheTimeMs uint16, containerId string) (uint64, uint64) {
|
||||
return dm.lastCpuContainer[cacheTimeMs][containerId], dm.lastCpuSystem[cacheTimeMs][containerId]
|
||||
}
|
||||
|
||||
// setCpuCurrentValues stores current CPU values for a container and cache time interval
|
||||
func (dm *dockerManager) setCpuCurrentValues(cacheTimeMs uint16, containerId string, cpuContainer, cpuSystem uint64) {
|
||||
dm.lastCpuContainer[cacheTimeMs][containerId] = cpuContainer
|
||||
dm.lastCpuSystem[cacheTimeMs][containerId] = cpuSystem
|
||||
}
|
||||
|
||||
// calculateMemoryUsage calculates memory usage from Docker API stats
|
||||
func calculateMemoryUsage(apiStats *container.ApiStats, isWindows bool) (uint64, error) {
|
||||
if isWindows {
|
||||
return apiStats.MemoryStats.PrivateWorkingSet, nil
|
||||
}
|
||||
|
||||
memCache := apiStats.MemoryStats.Stats.InactiveFile
|
||||
if memCache == 0 {
|
||||
memCache = apiStats.MemoryStats.Stats.Cache
|
||||
}
|
||||
|
||||
usedDelta := apiStats.MemoryStats.Usage - memCache
|
||||
if usedDelta <= 0 || usedDelta > maxMemoryUsage {
|
||||
return 0, fmt.Errorf("bad memory stats")
|
||||
}
|
||||
|
||||
return usedDelta, nil
|
||||
}
|
||||
|
||||
// getNetworkTracker returns the DeltaTracker for a specific cache time, creating it if needed
|
||||
func (dm *dockerManager) getNetworkTracker(cacheTimeMs uint16, isSent bool) *deltatracker.DeltaTracker[string, uint64] {
|
||||
var trackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
||||
if isSent {
|
||||
trackers = dm.networkSentTrackers
|
||||
} else {
|
||||
trackers = dm.networkRecvTrackers
|
||||
}
|
||||
|
||||
if trackers[cacheTimeMs] == nil {
|
||||
trackers[cacheTimeMs] = deltatracker.NewDeltaTracker[string, uint64]()
|
||||
}
|
||||
|
||||
return trackers[cacheTimeMs]
|
||||
}
|
||||
|
||||
// cycleNetworkDeltasForCacheTime cycles the network delta trackers for a specific cache time
|
||||
func (dm *dockerManager) cycleNetworkDeltasForCacheTime(cacheTimeMs uint16) {
|
||||
if dm.networkSentTrackers[cacheTimeMs] != nil {
|
||||
dm.networkSentTrackers[cacheTimeMs].Cycle()
|
||||
}
|
||||
if dm.networkRecvTrackers[cacheTimeMs] != nil {
|
||||
dm.networkRecvTrackers[cacheTimeMs].Cycle()
|
||||
}
|
||||
}
|
||||
|
||||
// calculateNetworkStats calculates network sent/receive deltas using DeltaTracker
|
||||
func (dm *dockerManager) calculateNetworkStats(ctr *container.ApiInfo, apiStats *container.ApiStats, stats *container.Stats, initialized bool, name string, cacheTimeMs uint16) (uint64, uint64) {
|
||||
var total_sent, total_recv uint64
|
||||
for _, v := range apiStats.Networks {
|
||||
total_sent += v.TxBytes
|
||||
total_recv += v.RxBytes
|
||||
}
|
||||
|
||||
// Get the DeltaTracker for this specific cache time
|
||||
sentTracker := dm.getNetworkTracker(cacheTimeMs, true)
|
||||
recvTracker := dm.getNetworkTracker(cacheTimeMs, false)
|
||||
|
||||
// Set current values in the cache-time-specific DeltaTracker
|
||||
sentTracker.Set(ctr.IdShort, total_sent)
|
||||
recvTracker.Set(ctr.IdShort, total_recv)
|
||||
|
||||
// Get deltas (bytes since last measurement)
|
||||
sent_delta_raw := sentTracker.Delta(ctr.IdShort)
|
||||
recv_delta_raw := recvTracker.Delta(ctr.IdShort)
|
||||
|
||||
// Calculate bytes per second independently for Tx and Rx if we have previous data
|
||||
var sent_delta, recv_delta uint64
|
||||
if initialized {
|
||||
millisecondsElapsed := uint64(time.Since(stats.PrevReadTime).Milliseconds())
|
||||
if millisecondsElapsed > 0 {
|
||||
if sent_delta_raw > 0 {
|
||||
sent_delta = sent_delta_raw * 1000 / millisecondsElapsed
|
||||
if sent_delta > maxNetworkSpeedBps {
|
||||
slog.Warn("Bad network delta", "container", name)
|
||||
sent_delta = 0
|
||||
}
|
||||
}
|
||||
if recv_delta_raw > 0 {
|
||||
recv_delta = recv_delta_raw * 1000 / millisecondsElapsed
|
||||
if recv_delta > maxNetworkSpeedBps {
|
||||
slog.Warn("Bad network delta", "container", name)
|
||||
recv_delta = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return sent_delta, recv_delta
|
||||
}
|
||||
|
||||
// validateCpuPercentage checks if CPU percentage is within valid range
|
||||
func validateCpuPercentage(cpuPct float64, containerName string) error {
|
||||
if cpuPct > 100 {
|
||||
return fmt.Errorf("%s cpu pct greater than 100: %+v", containerName, cpuPct)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateContainerStatsValues updates the final stats values
|
||||
func updateContainerStatsValues(stats *container.Stats, cpuPct float64, usedMemory uint64, sent_delta, recv_delta uint64, readTime time.Time) {
|
||||
stats.Cpu = twoDecimals(cpuPct)
|
||||
stats.Mem = bytesToMegabytes(float64(usedMemory))
|
||||
stats.NetworkSent = bytesToMegabytes(float64(sent_delta))
|
||||
stats.NetworkRecv = bytesToMegabytes(float64(recv_delta))
|
||||
stats.PrevReadTime = readTime
|
||||
}
|
||||
|
||||
func parseDockerStatus(status string) (string, container.DockerHealth) {
|
||||
trimmed := strings.TrimSpace(status)
|
||||
if trimmed == "" {
|
||||
return "", container.DockerHealthNone
|
||||
}
|
||||
|
||||
// Remove "About " from status
|
||||
trimmed = strings.Replace(trimmed, "About ", "", 1)
|
||||
|
||||
openIdx := strings.LastIndex(trimmed, "(")
|
||||
if openIdx == -1 || !strings.HasSuffix(trimmed, ")") {
|
||||
return trimmed, container.DockerHealthNone
|
||||
}
|
||||
|
||||
statusText := strings.TrimSpace(trimmed[:openIdx])
|
||||
if statusText == "" {
|
||||
statusText = trimmed
|
||||
}
|
||||
|
||||
healthText := strings.ToLower(strings.TrimSpace(strings.TrimSuffix(trimmed[openIdx+1:], ")")))
|
||||
// Some Docker statuses include a "health:" prefix inside the parentheses.
|
||||
// Strip it so it maps correctly to the known health states.
|
||||
if colonIdx := strings.IndexRune(healthText, ':'); colonIdx != -1 {
|
||||
prefix := strings.TrimSpace(healthText[:colonIdx])
|
||||
if prefix == "health" || prefix == "health status" {
|
||||
healthText = strings.TrimSpace(healthText[colonIdx+1:])
|
||||
}
|
||||
}
|
||||
if health, ok := container.DockerHealthStrings[healthText]; ok {
|
||||
return statusText, health
|
||||
}
|
||||
|
||||
return trimmed, container.DockerHealthNone
|
||||
}
|
||||
|
||||
// Updates stats for individual container with cache-time-aware delta tracking
|
||||
func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo, cacheTimeMs uint16) error {
|
||||
name := ctr.Names[0][1:]
|
||||
|
||||
resp, err := dm.client.Get(fmt.Sprintf("http://localhost/containers/%s/stats?stream=0&one-shot=1", ctr.IdShort))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dm.containerStatsMutex.Lock()
|
||||
defer dm.containerStatsMutex.Unlock()
|
||||
|
||||
// add empty values if they doesn't exist in map
|
||||
stats, initialized := dm.containerStatsMap[ctr.IdShort]
|
||||
if !initialized {
|
||||
stats = &container.Stats{Name: name, Id: ctr.IdShort, Image: ctr.Image}
|
||||
dm.containerStatsMap[ctr.IdShort] = stats
|
||||
}
|
||||
|
||||
stats.Id = ctr.IdShort
|
||||
|
||||
statusText, health := parseDockerStatus(ctr.Status)
|
||||
stats.Status = statusText
|
||||
stats.Health = health
|
||||
|
||||
// reset current stats
|
||||
stats.Cpu = 0
|
||||
stats.Mem = 0
|
||||
stats.NetworkSent = 0
|
||||
stats.NetworkRecv = 0
|
||||
|
||||
res := dm.apiStats
|
||||
res.Networks = nil
|
||||
if err := dm.decode(resp, res); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize CPU tracking for this cache time interval
|
||||
dm.initializeCpuTracking(cacheTimeMs)
|
||||
|
||||
// Get previous CPU values
|
||||
prevCpuContainer, prevCpuSystem := dm.getCpuPreviousValues(cacheTimeMs, ctr.IdShort)
|
||||
|
||||
// Calculate CPU percentage based on platform
|
||||
var cpuPct float64
|
||||
if dm.isWindows {
|
||||
prevRead := dm.lastCpuReadTime[cacheTimeMs][ctr.IdShort]
|
||||
cpuPct = res.CalculateCpuPercentWindows(prevCpuContainer, prevRead)
|
||||
} else {
|
||||
cpuPct = res.CalculateCpuPercentLinux(prevCpuContainer, prevCpuSystem)
|
||||
}
|
||||
|
||||
// Calculate memory usage
|
||||
usedMemory, err := calculateMemoryUsage(res, dm.isWindows)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s - %w - see https://github.com/henrygd/beszel/issues/144", name, err)
|
||||
}
|
||||
|
||||
// Store current CPU stats for next calculation
|
||||
currentCpuContainer := res.CPUStats.CPUUsage.TotalUsage
|
||||
currentCpuSystem := res.CPUStats.SystemUsage
|
||||
dm.setCpuCurrentValues(cacheTimeMs, ctr.IdShort, currentCpuContainer, currentCpuSystem)
|
||||
|
||||
// Validate CPU percentage
|
||||
if err := validateCpuPercentage(cpuPct, name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Calculate network stats using DeltaTracker
|
||||
sent_delta, recv_delta := dm.calculateNetworkStats(ctr, res, stats, initialized, name, cacheTimeMs)
|
||||
|
||||
// Store current network values for legacy compatibility
|
||||
var total_sent, total_recv uint64
|
||||
for _, v := range res.Networks {
|
||||
total_sent += v.TxBytes
|
||||
total_recv += v.RxBytes
|
||||
}
|
||||
stats.PrevNet.Sent, stats.PrevNet.Recv = total_sent, total_recv
|
||||
|
||||
// Update final stats values
|
||||
updateContainerStatsValues(stats, cpuPct, usedMemory, sent_delta, recv_delta, res.Read)
|
||||
// store per-cache-time read time for Windows CPU percent calc
|
||||
dm.lastCpuReadTime[cacheTimeMs][ctr.IdShort] = res.Read
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete container stats from map using mutex
|
||||
func (dm *dockerManager) deleteContainerStatsSync(id string) {
|
||||
dm.containerStatsMutex.Lock()
|
||||
defer dm.containerStatsMutex.Unlock()
|
||||
delete(dm.containerStatsMap, id)
|
||||
for ct := range dm.lastCpuContainer {
|
||||
delete(dm.lastCpuContainer[ct], id)
|
||||
}
|
||||
for ct := range dm.lastCpuSystem {
|
||||
delete(dm.lastCpuSystem[ct], id)
|
||||
}
|
||||
for ct := range dm.lastCpuReadTime {
|
||||
delete(dm.lastCpuReadTime[ct], id)
|
||||
}
|
||||
}
|
||||
|
||||
// Creates a new http client for Docker or Podman API
|
||||
func newDockerManager(a *Agent) *dockerManager {
|
||||
dockerHost, exists := GetEnv("DOCKER_HOST")
|
||||
if exists {
|
||||
// return nil if set to empty string
|
||||
if dockerHost == "" {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
dockerHost = getDockerHost()
|
||||
}
|
||||
|
||||
parsedURL, err := url.Parse(dockerHost)
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
transport := &http.Transport{
|
||||
DisableCompression: true,
|
||||
MaxConnsPerHost: 0,
|
||||
}
|
||||
|
||||
switch parsedURL.Scheme {
|
||||
case "unix":
|
||||
transport.DialContext = func(ctx context.Context, proto, addr string) (net.Conn, error) {
|
||||
return (&net.Dialer{}).DialContext(ctx, "unix", parsedURL.Path)
|
||||
}
|
||||
case "tcp", "http", "https":
|
||||
transport.DialContext = func(ctx context.Context, proto, addr string) (net.Conn, error) {
|
||||
return (&net.Dialer{}).DialContext(ctx, "tcp", parsedURL.Host)
|
||||
}
|
||||
default:
|
||||
slog.Error("Invalid DOCKER_HOST", "scheme", parsedURL.Scheme)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// configurable timeout
|
||||
timeout := time.Millisecond * time.Duration(dockerTimeoutMs)
|
||||
if t, set := GetEnv("DOCKER_TIMEOUT"); set {
|
||||
timeout, err = time.ParseDuration(t)
|
||||
if err != nil {
|
||||
slog.Error(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
slog.Info("DOCKER_TIMEOUT", "timeout", timeout)
|
||||
}
|
||||
|
||||
// Custom user-agent to avoid docker bug: https://github.com/docker/for-mac/issues/7575
|
||||
userAgentTransport := &userAgentRoundTripper{
|
||||
rt: transport,
|
||||
userAgent: "Docker-Client/",
|
||||
}
|
||||
|
||||
// Read container exclusion patterns from environment variable
|
||||
var excludeContainers []string
|
||||
if excludeStr, set := GetEnv("EXCLUDE_CONTAINERS"); set && excludeStr != "" {
|
||||
parts := strings.SplitSeq(excludeStr, ",")
|
||||
for part := range parts {
|
||||
trimmed := strings.TrimSpace(part)
|
||||
if trimmed != "" {
|
||||
excludeContainers = append(excludeContainers, trimmed)
|
||||
}
|
||||
}
|
||||
slog.Info("EXCLUDE_CONTAINERS", "patterns", excludeContainers)
|
||||
}
|
||||
|
||||
manager := &dockerManager{
|
||||
client: &http.Client{
|
||||
Timeout: timeout,
|
||||
Transport: userAgentTransport,
|
||||
},
|
||||
containerStatsMap: make(map[string]*container.Stats),
|
||||
sem: make(chan struct{}, 5),
|
||||
apiContainerList: []*container.ApiInfo{},
|
||||
apiStats: &container.ApiStats{},
|
||||
excludeContainers: excludeContainers,
|
||||
|
||||
// Initialize cache-time-aware tracking structures
|
||||
lastCpuContainer: make(map[uint16]map[string]uint64),
|
||||
lastCpuSystem: make(map[uint16]map[string]uint64),
|
||||
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||
}
|
||||
|
||||
// If using podman, return client
|
||||
if strings.Contains(dockerHost, "podman") {
|
||||
a.systemInfo.Podman = true
|
||||
manager.goodDockerVersion = true
|
||||
return manager
|
||||
}
|
||||
|
||||
// this can take up to 5 seconds with retry, so run in goroutine
|
||||
go manager.checkDockerVersion()
|
||||
|
||||
// give version check a chance to complete before returning
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
return manager
|
||||
}
|
||||
|
||||
// checkDockerVersion checks Docker version and sets goodDockerVersion if at least 25.0.0.
|
||||
// Versions before 25.0.0 have a bug with one-shot which requires all requests to be made in one batch.
|
||||
func (dm *dockerManager) checkDockerVersion() {
|
||||
var err error
|
||||
var resp *http.Response
|
||||
var versionInfo struct {
|
||||
Version string `json:"Version"`
|
||||
}
|
||||
const versionMaxTries = 2
|
||||
for i := 1; i <= versionMaxTries; i++ {
|
||||
resp, err = dm.client.Get("http://localhost/version")
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
if i < versionMaxTries {
|
||||
slog.Debug("Failed to get Docker version; retrying", "attempt", i, "error", err)
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err := dm.decode(resp, &versionInfo); err != nil {
|
||||
return
|
||||
}
|
||||
// if version > 24, one-shot works correctly and we can limit concurrent operations
|
||||
if dockerVersion, err := semver.Parse(versionInfo.Version); err == nil && dockerVersion.Major > 24 {
|
||||
dm.goodDockerVersion = true
|
||||
} else {
|
||||
slog.Info(fmt.Sprintf("Docker %s is outdated. Upgrade if possible. See https://github.com/henrygd/beszel/issues/58", versionInfo.Version))
|
||||
}
|
||||
}
|
||||
|
||||
// Decodes Docker API JSON response using a reusable buffer and decoder. Not thread safe.
|
||||
func (dm *dockerManager) decode(resp *http.Response, d any) error {
|
||||
if dm.buf == nil {
|
||||
// initialize buffer with 256kb starting size
|
||||
dm.buf = bytes.NewBuffer(make([]byte, 0, 1024*256))
|
||||
dm.decoder = json.NewDecoder(dm.buf)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
defer dm.buf.Reset()
|
||||
_, err := dm.buf.ReadFrom(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dm.decoder.Decode(d)
|
||||
}
|
||||
|
||||
// Test docker / podman sockets and return if one exists
|
||||
func getDockerHost() string {
|
||||
scheme := "unix://"
|
||||
socks := []string{"/var/run/docker.sock", fmt.Sprintf("/run/user/%v/podman/podman.sock", os.Getuid())}
|
||||
for _, sock := range socks {
|
||||
if _, err := os.Stat(sock); err == nil {
|
||||
return scheme + sock
|
||||
}
|
||||
}
|
||||
return scheme + socks[0]
|
||||
}
|
||||
|
||||
// getContainerInfo fetches the inspection data for a container
|
||||
func (dm *dockerManager) getContainerInfo(ctx context.Context, containerID string) ([]byte, error) {
|
||||
endpoint := fmt.Sprintf("http://localhost/containers/%s/json", containerID)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := dm.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
|
||||
return nil, fmt.Errorf("container info request failed: %s: %s", resp.Status, strings.TrimSpace(string(body)))
|
||||
}
|
||||
|
||||
// Remove sensitive environment variables from Config.Env
|
||||
var containerInfo map[string]any
|
||||
if err := json.NewDecoder(resp.Body).Decode(&containerInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if config, ok := containerInfo["Config"].(map[string]any); ok {
|
||||
delete(config, "Env")
|
||||
}
|
||||
|
||||
return json.Marshal(containerInfo)
|
||||
}
|
||||
|
||||
// getLogs fetches the logs for a container
|
||||
func (dm *dockerManager) getLogs(ctx context.Context, containerID string) (string, error) {
|
||||
endpoint := fmt.Sprintf("http://localhost/containers/%s/logs?stdout=1&stderr=1&tail=%d", containerID, dockerLogsTail)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
resp, err := dm.client.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
|
||||
return "", fmt.Errorf("logs request failed: %s: %s", resp.Status, strings.TrimSpace(string(body)))
|
||||
}
|
||||
|
||||
var builder strings.Builder
|
||||
if err := decodeDockerLogStream(resp.Body, &builder); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return builder.String(), nil
|
||||
}
|
||||
|
||||
func decodeDockerLogStream(reader io.Reader, builder *strings.Builder) error {
|
||||
const headerSize = 8
|
||||
var header [headerSize]byte
|
||||
buf := make([]byte, 0, dockerLogsTail*200)
|
||||
totalBytesRead := 0
|
||||
|
||||
for {
|
||||
if _, err := io.ReadFull(reader, header[:]); err != nil {
|
||||
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
frameLen := binary.BigEndian.Uint32(header[4:])
|
||||
if frameLen == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Prevent memory exhaustion from excessively large frames
|
||||
if frameLen > maxLogFrameSize {
|
||||
return fmt.Errorf("log frame size (%d) exceeds maximum (%d)", frameLen, maxLogFrameSize)
|
||||
}
|
||||
|
||||
// Check if reading this frame would exceed total log size limit
|
||||
if totalBytesRead+int(frameLen) > maxTotalLogSize {
|
||||
// Read and discard remaining data to avoid blocking
|
||||
_, _ = io.Copy(io.Discard, io.LimitReader(reader, int64(frameLen)))
|
||||
slog.Debug("Truncating logs: limit reached", "read", totalBytesRead, "limit", maxTotalLogSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
buf = allocateBuffer(buf, int(frameLen))
|
||||
if _, err := io.ReadFull(reader, buf[:frameLen]); err != nil {
|
||||
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
if len(buf) > 0 {
|
||||
builder.Write(buf[:min(int(frameLen), len(buf))])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
builder.Write(buf[:frameLen])
|
||||
totalBytesRead += int(frameLen)
|
||||
}
|
||||
}
|
||||
|
||||
func allocateBuffer(current []byte, needed int) []byte {
|
||||
if cap(current) >= needed {
|
||||
return current[:needed]
|
||||
}
|
||||
return make([]byte, needed)
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
1205
agent/docker_test.go
Normal file
1205
agent/docker_test.go
Normal file
File diff suppressed because it is too large
Load Diff
483
agent/gpu.go
Normal file
483
agent/gpu.go
Normal file
@@ -0,0 +1,483 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
|
||||
"golang.org/x/exp/slog"
|
||||
)
|
||||
|
||||
const (
|
||||
// Commands
|
||||
nvidiaSmiCmd string = "nvidia-smi"
|
||||
rocmSmiCmd string = "rocm-smi"
|
||||
tegraStatsCmd string = "tegrastats"
|
||||
|
||||
// Polling intervals
|
||||
nvidiaSmiInterval string = "4" // in seconds
|
||||
tegraStatsInterval string = "3700" // in milliseconds
|
||||
rocmSmiInterval time.Duration = 4300 * time.Millisecond
|
||||
// Command retry and timeout constants
|
||||
retryWaitTime time.Duration = 5 * time.Second
|
||||
maxFailureRetries int = 5
|
||||
|
||||
// Unit Conversions
|
||||
mebibytesInAMegabyte float64 = 1.024 // nvidia-smi reports memory in MiB
|
||||
milliwattsInAWatt float64 = 1000.0 // tegrastats reports power in mW
|
||||
)
|
||||
|
||||
// GPUManager manages data collection for GPUs (either Nvidia or AMD)
|
||||
type GPUManager struct {
|
||||
sync.Mutex
|
||||
nvidiaSmi bool
|
||||
rocmSmi bool
|
||||
tegrastats bool
|
||||
intelGpuStats bool
|
||||
GpuDataMap map[string]*system.GPUData
|
||||
// lastAvgData stores the last calculated averages for each GPU
|
||||
// Used when a collection happens before new data arrives (Count == 0)
|
||||
lastAvgData map[string]system.GPUData
|
||||
// Per-cache-key tracking for delta calculations
|
||||
// cacheKey -> gpuId -> snapshot of last count/usage/power values
|
||||
lastSnapshots map[uint16]map[string]*gpuSnapshot
|
||||
}
|
||||
|
||||
// gpuSnapshot stores the last observed incremental values for delta tracking
|
||||
type gpuSnapshot struct {
|
||||
count uint32
|
||||
usage float64
|
||||
power float64
|
||||
powerPkg float64
|
||||
engines map[string]float64
|
||||
}
|
||||
|
||||
// RocmSmiJson represents the JSON structure of rocm-smi output
|
||||
type RocmSmiJson struct {
|
||||
ID string `json:"GUID"`
|
||||
Name string `json:"Card series"`
|
||||
Temperature string `json:"Temperature (Sensor edge) (C)"`
|
||||
MemoryUsed string `json:"VRAM Total Used Memory (B)"`
|
||||
MemoryTotal string `json:"VRAM Total Memory (B)"`
|
||||
Usage string `json:"GPU use (%)"`
|
||||
PowerPackage string `json:"Average Graphics Package Power (W)"`
|
||||
PowerSocket string `json:"Current Socket Graphics Package Power (W)"`
|
||||
}
|
||||
|
||||
// gpuCollector defines a collector for a specific GPU management utility (nvidia-smi or rocm-smi)
|
||||
type gpuCollector struct {
|
||||
name string
|
||||
cmdArgs []string
|
||||
parse func([]byte) bool // returns true if valid data was found
|
||||
buf []byte
|
||||
bufSize uint16
|
||||
}
|
||||
|
||||
var errNoValidData = fmt.Errorf("no valid GPU data found") // Error for missing data
|
||||
|
||||
// starts and manages the ongoing collection of GPU data for the specified GPU management utility
|
||||
func (c *gpuCollector) start() {
|
||||
for {
|
||||
err := c.collect()
|
||||
if err != nil {
|
||||
if err == errNoValidData {
|
||||
slog.Warn(c.name + " found no valid GPU data, stopping")
|
||||
break
|
||||
}
|
||||
slog.Warn(c.name+" failed, restarting", "err", err)
|
||||
time.Sleep(retryWaitTime)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// collect executes the command, parses output with the assigned parser function
|
||||
func (c *gpuCollector) collect() error {
|
||||
cmd := exec.Command(c.name, c.cmdArgs...)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
if c.buf == nil {
|
||||
c.buf = make([]byte, 0, c.bufSize)
|
||||
}
|
||||
scanner.Buffer(c.buf, bufio.MaxScanTokenSize)
|
||||
|
||||
for scanner.Scan() {
|
||||
hasValidData := c.parse(scanner.Bytes())
|
||||
if !hasValidData {
|
||||
return errNoValidData
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return fmt.Errorf("scanner error: %w", err)
|
||||
}
|
||||
return cmd.Wait()
|
||||
}
|
||||
|
||||
// getJetsonParser returns a function to parse the output of tegrastats and update the GPUData map
|
||||
func (gm *GPUManager) getJetsonParser() func(output []byte) bool {
|
||||
// use closure to avoid recompiling the regex
|
||||
ramPattern := regexp.MustCompile(`RAM (\d+)/(\d+)MB`)
|
||||
gr3dPattern := regexp.MustCompile(`GR3D_FREQ (\d+)%`)
|
||||
tempPattern := regexp.MustCompile(`tj@(\d+\.?\d*)C`)
|
||||
// Orin Nano / NX do not have GPU specific power monitor
|
||||
// TODO: Maybe use VDD_IN for Nano / NX and add a total system power chart
|
||||
powerPattern := regexp.MustCompile(`(GPU_SOC|CPU_GPU_CV) (\d+)mW`)
|
||||
|
||||
// jetson devices have only one gpu so we'll just initialize here
|
||||
gpuData := &system.GPUData{Name: "GPU"}
|
||||
gm.GpuDataMap["0"] = gpuData
|
||||
|
||||
return func(output []byte) bool {
|
||||
gm.Lock()
|
||||
defer gm.Unlock()
|
||||
// Parse RAM usage
|
||||
ramMatches := ramPattern.FindSubmatch(output)
|
||||
if ramMatches != nil {
|
||||
gpuData.MemoryUsed, _ = strconv.ParseFloat(string(ramMatches[1]), 64)
|
||||
gpuData.MemoryTotal, _ = strconv.ParseFloat(string(ramMatches[2]), 64)
|
||||
}
|
||||
// Parse GR3D (GPU) usage
|
||||
gr3dMatches := gr3dPattern.FindSubmatch(output)
|
||||
if gr3dMatches != nil {
|
||||
gr3dUsage, _ := strconv.ParseFloat(string(gr3dMatches[1]), 64)
|
||||
gpuData.Usage += gr3dUsage
|
||||
}
|
||||
// Parse temperature
|
||||
tempMatches := tempPattern.FindSubmatch(output)
|
||||
if tempMatches != nil {
|
||||
gpuData.Temperature, _ = strconv.ParseFloat(string(tempMatches[1]), 64)
|
||||
}
|
||||
// Parse power usage
|
||||
powerMatches := powerPattern.FindSubmatch(output)
|
||||
if powerMatches != nil {
|
||||
power, _ := strconv.ParseFloat(string(powerMatches[2]), 64)
|
||||
gpuData.Power += power / milliwattsInAWatt
|
||||
}
|
||||
gpuData.Count++
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// parseNvidiaData parses the output of nvidia-smi and updates the GPUData map
|
||||
func (gm *GPUManager) parseNvidiaData(output []byte) bool {
|
||||
gm.Lock()
|
||||
defer gm.Unlock()
|
||||
scanner := bufio.NewScanner(bytes.NewReader(output))
|
||||
var valid bool
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text() // Or use scanner.Bytes() for []byte
|
||||
fields := strings.Split(strings.TrimSpace(line), ", ")
|
||||
if len(fields) < 7 {
|
||||
continue
|
||||
}
|
||||
valid = true
|
||||
id := fields[0]
|
||||
temp, _ := strconv.ParseFloat(fields[2], 64)
|
||||
memoryUsage, _ := strconv.ParseFloat(fields[3], 64)
|
||||
totalMemory, _ := strconv.ParseFloat(fields[4], 64)
|
||||
usage, _ := strconv.ParseFloat(fields[5], 64)
|
||||
power, _ := strconv.ParseFloat(fields[6], 64)
|
||||
// add gpu if not exists
|
||||
if _, ok := gm.GpuDataMap[id]; !ok {
|
||||
name := strings.TrimPrefix(fields[1], "NVIDIA ")
|
||||
gm.GpuDataMap[id] = &system.GPUData{Name: strings.TrimSuffix(name, " Laptop GPU")}
|
||||
}
|
||||
// update gpu data
|
||||
gpu := gm.GpuDataMap[id]
|
||||
gpu.Temperature = temp
|
||||
gpu.MemoryUsed = memoryUsage / mebibytesInAMegabyte
|
||||
gpu.MemoryTotal = totalMemory / mebibytesInAMegabyte
|
||||
gpu.Usage += usage
|
||||
gpu.Power += power
|
||||
gpu.Count++
|
||||
}
|
||||
return valid
|
||||
}
|
||||
|
||||
// parseAmdData parses the output of rocm-smi and updates the GPUData map
|
||||
func (gm *GPUManager) parseAmdData(output []byte) bool {
|
||||
var rocmSmiInfo map[string]RocmSmiJson
|
||||
if err := json.Unmarshal(output, &rocmSmiInfo); err != nil || len(rocmSmiInfo) == 0 {
|
||||
return false
|
||||
}
|
||||
gm.Lock()
|
||||
defer gm.Unlock()
|
||||
for _, v := range rocmSmiInfo {
|
||||
var power float64
|
||||
if v.PowerPackage != "" {
|
||||
power, _ = strconv.ParseFloat(v.PowerPackage, 64)
|
||||
} else {
|
||||
power, _ = strconv.ParseFloat(v.PowerSocket, 64)
|
||||
}
|
||||
memoryUsage, _ := strconv.ParseFloat(v.MemoryUsed, 64)
|
||||
totalMemory, _ := strconv.ParseFloat(v.MemoryTotal, 64)
|
||||
usage, _ := strconv.ParseFloat(v.Usage, 64)
|
||||
|
||||
if _, ok := gm.GpuDataMap[v.ID]; !ok {
|
||||
gm.GpuDataMap[v.ID] = &system.GPUData{Name: v.Name}
|
||||
}
|
||||
gpu := gm.GpuDataMap[v.ID]
|
||||
gpu.Temperature, _ = strconv.ParseFloat(v.Temperature, 64)
|
||||
gpu.MemoryUsed = bytesToMegabytes(memoryUsage)
|
||||
gpu.MemoryTotal = bytesToMegabytes(totalMemory)
|
||||
gpu.Usage += usage
|
||||
gpu.Power += power
|
||||
gpu.Count++
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// GetCurrentData returns GPU utilization data averaged since the last call with this cacheKey
|
||||
func (gm *GPUManager) GetCurrentData(cacheKey uint16) map[string]system.GPUData {
|
||||
gm.Lock()
|
||||
defer gm.Unlock()
|
||||
|
||||
gm.initializeSnapshots(cacheKey)
|
||||
nameCounts := gm.countGPUNames()
|
||||
|
||||
gpuData := make(map[string]system.GPUData, len(gm.GpuDataMap))
|
||||
for id, gpu := range gm.GpuDataMap {
|
||||
gpuAvg := gm.calculateGPUAverage(id, gpu, cacheKey)
|
||||
gm.updateInstantaneousValues(&gpuAvg, gpu)
|
||||
gm.storeSnapshot(id, gpu, cacheKey)
|
||||
|
||||
// Append id to name if there are multiple GPUs with the same name
|
||||
if nameCounts[gpu.Name] > 1 {
|
||||
gpuAvg.Name = fmt.Sprintf("%s %s", gpu.Name, id)
|
||||
}
|
||||
gpuData[id] = gpuAvg
|
||||
}
|
||||
slog.Debug("GPU", "data", gpuData)
|
||||
return gpuData
|
||||
}
|
||||
|
||||
// initializeSnapshots ensures snapshot maps are initialized for the given cache key
|
||||
func (gm *GPUManager) initializeSnapshots(cacheKey uint16) {
|
||||
if gm.lastAvgData == nil {
|
||||
gm.lastAvgData = make(map[string]system.GPUData)
|
||||
}
|
||||
if gm.lastSnapshots == nil {
|
||||
gm.lastSnapshots = make(map[uint16]map[string]*gpuSnapshot)
|
||||
}
|
||||
if gm.lastSnapshots[cacheKey] == nil {
|
||||
gm.lastSnapshots[cacheKey] = make(map[string]*gpuSnapshot)
|
||||
}
|
||||
}
|
||||
|
||||
// countGPUNames returns a map of GPU names to their occurrence count
|
||||
func (gm *GPUManager) countGPUNames() map[string]int {
|
||||
nameCounts := make(map[string]int)
|
||||
for _, gpu := range gm.GpuDataMap {
|
||||
nameCounts[gpu.Name]++
|
||||
}
|
||||
return nameCounts
|
||||
}
|
||||
|
||||
// calculateGPUAverage computes the average GPU metrics since the last snapshot for this cache key
|
||||
func (gm *GPUManager) calculateGPUAverage(id string, gpu *system.GPUData, cacheKey uint16) system.GPUData {
|
||||
lastSnapshot := gm.lastSnapshots[cacheKey][id]
|
||||
currentCount := uint32(gpu.Count)
|
||||
deltaCount := gm.calculateDeltaCount(currentCount, lastSnapshot)
|
||||
|
||||
// If no new data arrived, use last known average
|
||||
if deltaCount == 0 {
|
||||
return gm.lastAvgData[id] // zero value if not found
|
||||
}
|
||||
|
||||
// Calculate new average
|
||||
gpuAvg := *gpu
|
||||
deltaUsage, deltaPower, deltaPowerPkg := gm.calculateDeltas(gpu, lastSnapshot)
|
||||
|
||||
gpuAvg.Power = twoDecimals(deltaPower / float64(deltaCount))
|
||||
|
||||
if gpu.Engines != nil {
|
||||
// make fresh map for averaged engine metrics to avoid mutating
|
||||
// the accumulator map stored in gm.GpuDataMap
|
||||
gpuAvg.Engines = make(map[string]float64, len(gpu.Engines))
|
||||
gpuAvg.Usage = gm.calculateIntelGPUUsage(&gpuAvg, gpu, lastSnapshot, deltaCount)
|
||||
gpuAvg.PowerPkg = twoDecimals(deltaPowerPkg / float64(deltaCount))
|
||||
} else {
|
||||
gpuAvg.Usage = twoDecimals(deltaUsage / float64(deltaCount))
|
||||
}
|
||||
|
||||
gm.lastAvgData[id] = gpuAvg
|
||||
return gpuAvg
|
||||
}
|
||||
|
||||
// calculateDeltaCount returns the change in count since the last snapshot
|
||||
func (gm *GPUManager) calculateDeltaCount(currentCount uint32, lastSnapshot *gpuSnapshot) uint32 {
|
||||
if lastSnapshot != nil {
|
||||
return currentCount - lastSnapshot.count
|
||||
}
|
||||
return currentCount
|
||||
}
|
||||
|
||||
// calculateDeltas computes the change in usage, power, and powerPkg since the last snapshot
|
||||
func (gm *GPUManager) calculateDeltas(gpu *system.GPUData, lastSnapshot *gpuSnapshot) (deltaUsage, deltaPower, deltaPowerPkg float64) {
|
||||
if lastSnapshot != nil {
|
||||
return gpu.Usage - lastSnapshot.usage,
|
||||
gpu.Power - lastSnapshot.power,
|
||||
gpu.PowerPkg - lastSnapshot.powerPkg
|
||||
}
|
||||
return gpu.Usage, gpu.Power, gpu.PowerPkg
|
||||
}
|
||||
|
||||
// calculateIntelGPUUsage computes Intel GPU usage from engine metrics and returns max engine usage
|
||||
func (gm *GPUManager) calculateIntelGPUUsage(gpuAvg, gpu *system.GPUData, lastSnapshot *gpuSnapshot, deltaCount uint32) float64 {
|
||||
maxEngineUsage := 0.0
|
||||
for name, engine := range gpu.Engines {
|
||||
var deltaEngine float64
|
||||
if lastSnapshot != nil && lastSnapshot.engines != nil {
|
||||
deltaEngine = engine - lastSnapshot.engines[name]
|
||||
} else {
|
||||
deltaEngine = engine
|
||||
}
|
||||
gpuAvg.Engines[name] = twoDecimals(deltaEngine / float64(deltaCount))
|
||||
maxEngineUsage = max(maxEngineUsage, deltaEngine/float64(deltaCount))
|
||||
}
|
||||
return twoDecimals(maxEngineUsage)
|
||||
}
|
||||
|
||||
// updateInstantaneousValues updates values that should reflect current state, not averages
|
||||
func (gm *GPUManager) updateInstantaneousValues(gpuAvg *system.GPUData, gpu *system.GPUData) {
|
||||
gpuAvg.Temperature = twoDecimals(gpu.Temperature)
|
||||
gpuAvg.MemoryUsed = twoDecimals(gpu.MemoryUsed)
|
||||
gpuAvg.MemoryTotal = twoDecimals(gpu.MemoryTotal)
|
||||
}
|
||||
|
||||
// storeSnapshot saves the current GPU state for this cache key
|
||||
func (gm *GPUManager) storeSnapshot(id string, gpu *system.GPUData, cacheKey uint16) {
|
||||
snapshot := &gpuSnapshot{
|
||||
count: uint32(gpu.Count),
|
||||
usage: gpu.Usage,
|
||||
power: gpu.Power,
|
||||
powerPkg: gpu.PowerPkg,
|
||||
}
|
||||
if gpu.Engines != nil {
|
||||
snapshot.engines = make(map[string]float64, len(gpu.Engines))
|
||||
maps.Copy(snapshot.engines, gpu.Engines)
|
||||
}
|
||||
gm.lastSnapshots[cacheKey][id] = snapshot
|
||||
}
|
||||
|
||||
// detectGPUs checks for the presence of GPU management tools (nvidia-smi, rocm-smi, tegrastats)
|
||||
// in the system path. It sets the corresponding flags in the GPUManager struct if any of these
|
||||
// tools are found. If none of the tools are found, it returns an error indicating that no GPU
|
||||
// management tools are available.
|
||||
func (gm *GPUManager) detectGPUs() error {
|
||||
if _, err := exec.LookPath(nvidiaSmiCmd); err == nil {
|
||||
gm.nvidiaSmi = true
|
||||
}
|
||||
if _, err := exec.LookPath(rocmSmiCmd); err == nil {
|
||||
gm.rocmSmi = true
|
||||
}
|
||||
if _, err := exec.LookPath(tegraStatsCmd); err == nil {
|
||||
gm.tegrastats = true
|
||||
gm.nvidiaSmi = false
|
||||
}
|
||||
if _, err := exec.LookPath(intelGpuStatsCmd); err == nil {
|
||||
gm.intelGpuStats = true
|
||||
}
|
||||
if gm.nvidiaSmi || gm.rocmSmi || gm.tegrastats || gm.intelGpuStats {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("no GPU found - install nvidia-smi, rocm-smi, tegrastats, or intel_gpu_top")
|
||||
}
|
||||
|
||||
// startCollector starts the appropriate GPU data collector based on the command
|
||||
func (gm *GPUManager) startCollector(command string) {
|
||||
collector := gpuCollector{
|
||||
name: command,
|
||||
bufSize: 10 * 1024,
|
||||
}
|
||||
switch command {
|
||||
case intelGpuStatsCmd:
|
||||
go func() {
|
||||
failures := 0
|
||||
for {
|
||||
if err := gm.collectIntelStats(); err != nil {
|
||||
failures++
|
||||
if failures > maxFailureRetries {
|
||||
break
|
||||
}
|
||||
slog.Warn("Error collecting Intel GPU data; see https://beszel.dev/guide/gpu", "err", err)
|
||||
time.Sleep(retryWaitTime)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}()
|
||||
case nvidiaSmiCmd:
|
||||
collector.cmdArgs = []string{
|
||||
"-l", nvidiaSmiInterval,
|
||||
"--query-gpu=index,name,temperature.gpu,memory.used,memory.total,utilization.gpu,power.draw",
|
||||
"--format=csv,noheader,nounits",
|
||||
}
|
||||
collector.parse = gm.parseNvidiaData
|
||||
go collector.start()
|
||||
case tegraStatsCmd:
|
||||
collector.cmdArgs = []string{"--interval", tegraStatsInterval}
|
||||
collector.parse = gm.getJetsonParser()
|
||||
go collector.start()
|
||||
case rocmSmiCmd:
|
||||
collector.cmdArgs = []string{"--showid", "--showtemp", "--showuse", "--showpower", "--showproductname", "--showmeminfo", "vram", "--json"}
|
||||
collector.parse = gm.parseAmdData
|
||||
go func() {
|
||||
failures := 0
|
||||
for {
|
||||
if err := collector.collect(); err != nil {
|
||||
failures++
|
||||
if failures > maxFailureRetries {
|
||||
break
|
||||
}
|
||||
slog.Warn("Error collecting AMD GPU data", "err", err)
|
||||
}
|
||||
time.Sleep(rocmSmiInterval)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// NewGPUManager creates and initializes a new GPUManager
|
||||
func NewGPUManager() (*GPUManager, error) {
|
||||
if skipGPU, _ := GetEnv("SKIP_GPU"); skipGPU == "true" {
|
||||
return nil, nil
|
||||
}
|
||||
var gm GPUManager
|
||||
if err := gm.detectGPUs(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gm.GpuDataMap = make(map[string]*system.GPUData)
|
||||
|
||||
if gm.nvidiaSmi {
|
||||
gm.startCollector(nvidiaSmiCmd)
|
||||
}
|
||||
if gm.rocmSmi {
|
||||
gm.startCollector(rocmSmiCmd)
|
||||
}
|
||||
if gm.tegrastats {
|
||||
gm.startCollector(tegraStatsCmd)
|
||||
}
|
||||
if gm.intelGpuStats {
|
||||
gm.startCollector(intelGpuStatsCmd)
|
||||
}
|
||||
|
||||
return &gm, nil
|
||||
}
|
||||
206
agent/gpu_intel.go
Normal file
206
agent/gpu_intel.go
Normal file
@@ -0,0 +1,206 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
)
|
||||
|
||||
const (
|
||||
intelGpuStatsCmd string = "intel_gpu_top"
|
||||
intelGpuStatsInterval string = "3300" // in milliseconds
|
||||
)
|
||||
|
||||
type intelGpuStats struct {
|
||||
PowerGPU float64
|
||||
PowerPkg float64
|
||||
Engines map[string]float64
|
||||
}
|
||||
|
||||
// updateIntelFromStats updates aggregated GPU data from a single intelGpuStats sample
|
||||
func (gm *GPUManager) updateIntelFromStats(sample *intelGpuStats) bool {
|
||||
gm.Lock()
|
||||
defer gm.Unlock()
|
||||
|
||||
// only one gpu for now - cmd doesn't provide all by default
|
||||
gpuData, ok := gm.GpuDataMap["0"]
|
||||
if !ok {
|
||||
gpuData = &system.GPUData{Name: "GPU", Engines: make(map[string]float64)}
|
||||
gm.GpuDataMap["0"] = gpuData
|
||||
}
|
||||
|
||||
gpuData.Power += sample.PowerGPU
|
||||
gpuData.PowerPkg += sample.PowerPkg
|
||||
|
||||
if gpuData.Engines == nil {
|
||||
gpuData.Engines = make(map[string]float64, len(sample.Engines))
|
||||
}
|
||||
for name, engine := range sample.Engines {
|
||||
gpuData.Engines[name] += engine
|
||||
}
|
||||
|
||||
gpuData.Count++
|
||||
return true
|
||||
}
|
||||
|
||||
// collectIntelStats executes intel_gpu_top in text mode (-l) and parses the output
|
||||
func (gm *GPUManager) collectIntelStats() (err error) {
|
||||
// Build command arguments, optionally selecting a device via -d
|
||||
args := []string{"-s", intelGpuStatsInterval, "-l"}
|
||||
if dev, ok := GetEnv("INTEL_GPU_DEVICE"); ok && dev != "" {
|
||||
args = append(args, "-d", dev)
|
||||
}
|
||||
cmd := exec.Command(intelGpuStatsCmd, args...)
|
||||
// Avoid blocking if intel_gpu_top writes to stderr
|
||||
cmd.Stderr = io.Discard
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure we always reap the child to avoid zombies on any return path and
|
||||
// propagate a non-zero exit code if no other error was set.
|
||||
defer func() {
|
||||
// Best-effort close of the pipe (unblock the child if it writes)
|
||||
_ = stdout.Close()
|
||||
if cmd.ProcessState == nil || !cmd.ProcessState.Exited() {
|
||||
_ = cmd.Process.Kill()
|
||||
}
|
||||
if waitErr := cmd.Wait(); err == nil && waitErr != nil {
|
||||
err = waitErr
|
||||
}
|
||||
}()
|
||||
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
var header1 string
|
||||
var engineNames []string
|
||||
var friendlyNames []string
|
||||
var preEngineCols int
|
||||
var powerIndex int
|
||||
var hadDataRow bool
|
||||
// skip first data row because it sometimes has erroneous data
|
||||
var skippedFirstDataRow bool
|
||||
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// first header line
|
||||
if strings.HasPrefix(line, "Freq") {
|
||||
header1 = line
|
||||
continue
|
||||
}
|
||||
|
||||
// second header line
|
||||
if strings.HasPrefix(line, "req") {
|
||||
engineNames, friendlyNames, powerIndex, preEngineCols = gm.parseIntelHeaders(header1, line)
|
||||
continue
|
||||
}
|
||||
|
||||
// Data row
|
||||
if !skippedFirstDataRow {
|
||||
skippedFirstDataRow = true
|
||||
continue
|
||||
}
|
||||
sample, err := gm.parseIntelData(line, engineNames, friendlyNames, powerIndex, preEngineCols)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hadDataRow = true
|
||||
gm.updateIntelFromStats(&sample)
|
||||
}
|
||||
if scanErr := scanner.Err(); scanErr != nil {
|
||||
return scanErr
|
||||
}
|
||||
if !hadDataRow {
|
||||
return errNoValidData
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gm *GPUManager) parseIntelHeaders(header1 string, header2 string) (engineNames []string, friendlyNames []string, powerIndex int, preEngineCols int) {
|
||||
// Build indexes
|
||||
h1 := strings.Fields(header1)
|
||||
h2 := strings.Fields(header2)
|
||||
powerIndex = -1 // Initialize to -1, will be set to actual index if found
|
||||
// Collect engine names from header1
|
||||
for _, col := range h1 {
|
||||
key := strings.TrimRightFunc(col, func(r rune) bool {
|
||||
return (r >= '0' && r <= '9') || r == '/'
|
||||
})
|
||||
var friendly string
|
||||
switch key {
|
||||
case "RCS":
|
||||
friendly = "Render/3D"
|
||||
case "BCS":
|
||||
friendly = "Blitter"
|
||||
case "VCS":
|
||||
friendly = "Video"
|
||||
case "VECS":
|
||||
friendly = "VideoEnhance"
|
||||
case "CCS":
|
||||
friendly = "Compute"
|
||||
default:
|
||||
continue
|
||||
}
|
||||
engineNames = append(engineNames, key)
|
||||
friendlyNames = append(friendlyNames, friendly)
|
||||
}
|
||||
// find power gpu index among pre-engine columns
|
||||
if n := len(engineNames); n > 0 {
|
||||
preEngineCols = max(len(h2)-3*n, 0)
|
||||
limit := min(len(h2), preEngineCols)
|
||||
for i := range limit {
|
||||
if strings.EqualFold(h2[i], "gpu") {
|
||||
powerIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return engineNames, friendlyNames, powerIndex, preEngineCols
|
||||
}
|
||||
|
||||
func (gm *GPUManager) parseIntelData(line string, engineNames []string, friendlyNames []string, powerIndex int, preEngineCols int) (sample intelGpuStats, err error) {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) == 0 {
|
||||
return sample, errNoValidData
|
||||
}
|
||||
// Make sure row has enough columns for engines
|
||||
if need := preEngineCols + 3*len(engineNames); len(fields) < need {
|
||||
return sample, errNoValidData
|
||||
}
|
||||
if powerIndex >= 0 && powerIndex < len(fields) {
|
||||
if v, perr := strconv.ParseFloat(fields[powerIndex], 64); perr == nil {
|
||||
sample.PowerGPU = v
|
||||
}
|
||||
if v, perr := strconv.ParseFloat(fields[powerIndex+1], 64); perr == nil {
|
||||
sample.PowerPkg = v
|
||||
}
|
||||
}
|
||||
if len(engineNames) > 0 {
|
||||
sample.Engines = make(map[string]float64, len(engineNames))
|
||||
for k := range engineNames {
|
||||
base := preEngineCols + 3*k
|
||||
if base < len(fields) {
|
||||
busy := 0.0
|
||||
if v, e := strconv.ParseFloat(fields[base], 64); e == nil {
|
||||
busy = v
|
||||
}
|
||||
cur := sample.Engines[friendlyNames[k]]
|
||||
sample.Engines[friendlyNames[k]] = cur + busy
|
||||
} else {
|
||||
sample.Engines[friendlyNames[k]] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
return sample, nil
|
||||
}
|
||||
1676
agent/gpu_test.go
Normal file
1676
agent/gpu_test.go
Normal file
File diff suppressed because it is too large
Load Diff
205
agent/handlers.go
Normal file
205
agent/handlers.go
Normal file
@@ -0,0 +1,205 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/entities/smart"
|
||||
|
||||
"golang.org/x/exp/slog"
|
||||
)
|
||||
|
||||
// HandlerContext provides context for request handlers
|
||||
type HandlerContext struct {
|
||||
Client *WebSocketClient
|
||||
Agent *Agent
|
||||
Request *common.HubRequest[cbor.RawMessage]
|
||||
RequestID *uint32
|
||||
HubVerified bool
|
||||
// SendResponse abstracts how a handler sends responses (WS or SSH)
|
||||
SendResponse func(data any, requestID *uint32) error
|
||||
}
|
||||
|
||||
// RequestHandler defines the interface for handling specific websocket request types
|
||||
type RequestHandler interface {
|
||||
// Handle processes the request and returns an error if unsuccessful
|
||||
Handle(hctx *HandlerContext) error
|
||||
}
|
||||
|
||||
// Responder sends handler responses back to the hub (over WS or SSH)
|
||||
type Responder interface {
|
||||
SendResponse(data any, requestID *uint32) error
|
||||
}
|
||||
|
||||
// HandlerRegistry manages the mapping between actions and their handlers
|
||||
type HandlerRegistry struct {
|
||||
handlers map[common.WebSocketAction]RequestHandler
|
||||
}
|
||||
|
||||
// NewHandlerRegistry creates a new handler registry with default handlers
|
||||
func NewHandlerRegistry() *HandlerRegistry {
|
||||
registry := &HandlerRegistry{
|
||||
handlers: make(map[common.WebSocketAction]RequestHandler),
|
||||
}
|
||||
|
||||
registry.Register(common.GetData, &GetDataHandler{})
|
||||
registry.Register(common.CheckFingerprint, &CheckFingerprintHandler{})
|
||||
registry.Register(common.GetContainerLogs, &GetContainerLogsHandler{})
|
||||
registry.Register(common.GetContainerInfo, &GetContainerInfoHandler{})
|
||||
registry.Register(common.GetSmartData, &GetSmartDataHandler{})
|
||||
registry.Register(common.GetSystemdInfo, &GetSystemdInfoHandler{})
|
||||
|
||||
return registry
|
||||
}
|
||||
|
||||
// Register registers a handler for a specific action type
|
||||
func (hr *HandlerRegistry) Register(action common.WebSocketAction, handler RequestHandler) {
|
||||
hr.handlers[action] = handler
|
||||
}
|
||||
|
||||
// Handle routes the request to the appropriate handler
|
||||
func (hr *HandlerRegistry) Handle(hctx *HandlerContext) error {
|
||||
handler, exists := hr.handlers[hctx.Request.Action]
|
||||
if !exists {
|
||||
return fmt.Errorf("unknown action: %d", hctx.Request.Action)
|
||||
}
|
||||
|
||||
// Check verification requirement - default to requiring verification
|
||||
if hctx.Request.Action != common.CheckFingerprint && !hctx.HubVerified {
|
||||
return errors.New("hub not verified")
|
||||
}
|
||||
|
||||
// Log handler execution for debugging
|
||||
// slog.Debug("Executing handler", "action", hctx.Request.Action)
|
||||
|
||||
return handler.Handle(hctx)
|
||||
}
|
||||
|
||||
// GetHandler returns the handler for a specific action
|
||||
func (hr *HandlerRegistry) GetHandler(action common.WebSocketAction) (RequestHandler, bool) {
|
||||
handler, exists := hr.handlers[action]
|
||||
return handler, exists
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// GetDataHandler handles system data requests
|
||||
type GetDataHandler struct{}
|
||||
|
||||
func (h *GetDataHandler) Handle(hctx *HandlerContext) error {
|
||||
var options common.DataRequestOptions
|
||||
_ = cbor.Unmarshal(hctx.Request.Data, &options)
|
||||
|
||||
sysStats := hctx.Agent.gatherStats(options.CacheTimeMs)
|
||||
return hctx.SendResponse(sysStats, hctx.RequestID)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// CheckFingerprintHandler handles authentication challenges
|
||||
type CheckFingerprintHandler struct{}
|
||||
|
||||
func (h *CheckFingerprintHandler) Handle(hctx *HandlerContext) error {
|
||||
return hctx.Client.handleAuthChallenge(hctx.Request, hctx.RequestID)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// GetContainerLogsHandler handles container log requests
|
||||
type GetContainerLogsHandler struct{}
|
||||
|
||||
func (h *GetContainerLogsHandler) Handle(hctx *HandlerContext) error {
|
||||
if hctx.Agent.dockerManager == nil {
|
||||
return hctx.SendResponse("", hctx.RequestID)
|
||||
}
|
||||
|
||||
var req common.ContainerLogsRequest
|
||||
if err := cbor.Unmarshal(hctx.Request.Data, &req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
logContent, err := hctx.Agent.dockerManager.getLogs(ctx, req.ContainerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return hctx.SendResponse(logContent, hctx.RequestID)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// GetContainerInfoHandler handles container info requests
|
||||
type GetContainerInfoHandler struct{}
|
||||
|
||||
func (h *GetContainerInfoHandler) Handle(hctx *HandlerContext) error {
|
||||
if hctx.Agent.dockerManager == nil {
|
||||
return hctx.SendResponse("", hctx.RequestID)
|
||||
}
|
||||
|
||||
var req common.ContainerInfoRequest
|
||||
if err := cbor.Unmarshal(hctx.Request.Data, &req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
info, err := hctx.Agent.dockerManager.getContainerInfo(ctx, req.ContainerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return hctx.SendResponse(string(info), hctx.RequestID)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// GetSmartDataHandler handles SMART data requests
|
||||
type GetSmartDataHandler struct{}
|
||||
|
||||
func (h *GetSmartDataHandler) Handle(hctx *HandlerContext) error {
|
||||
if hctx.Agent.smartManager == nil {
|
||||
// return empty map to indicate no data
|
||||
return hctx.SendResponse(map[string]smart.SmartData{}, hctx.RequestID)
|
||||
}
|
||||
if err := hctx.Agent.smartManager.Refresh(false); err != nil {
|
||||
slog.Debug("smart refresh failed", "err", err)
|
||||
}
|
||||
data := hctx.Agent.smartManager.GetCurrentData()
|
||||
return hctx.SendResponse(data, hctx.RequestID)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// GetSystemdInfoHandler handles detailed systemd service info requests
|
||||
type GetSystemdInfoHandler struct{}
|
||||
|
||||
func (h *GetSystemdInfoHandler) Handle(hctx *HandlerContext) error {
|
||||
if hctx.Agent.systemdManager == nil {
|
||||
return errors.ErrUnsupported
|
||||
}
|
||||
|
||||
var req common.SystemdInfoRequest
|
||||
if err := cbor.Unmarshal(hctx.Request.Data, &req); err != nil {
|
||||
return err
|
||||
}
|
||||
if req.ServiceName == "" {
|
||||
return errors.New("service name is required")
|
||||
}
|
||||
|
||||
details, err := hctx.Agent.systemdManager.getServiceDetails(req.ServiceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return hctx.SendResponse(details, hctx.RequestID)
|
||||
}
|
||||
112
agent/handlers_test.go
Normal file
112
agent/handlers_test.go
Normal file
@@ -0,0 +1,112 @@
|
||||
//go:build testing
|
||||
// +build testing
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// MockHandler for testing
|
||||
type MockHandler struct {
|
||||
requiresVerification bool
|
||||
description string
|
||||
handleFunc func(ctx *HandlerContext) error
|
||||
}
|
||||
|
||||
func (m *MockHandler) Handle(ctx *HandlerContext) error {
|
||||
if m.handleFunc != nil {
|
||||
return m.handleFunc(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockHandler) RequiresVerification() bool {
|
||||
return m.requiresVerification
|
||||
}
|
||||
|
||||
// TestHandlerRegistry tests the handler registry functionality
|
||||
func TestHandlerRegistry(t *testing.T) {
|
||||
t.Run("default registration", func(t *testing.T) {
|
||||
registry := NewHandlerRegistry()
|
||||
|
||||
// Check default handlers are registered
|
||||
getDataHandler, exists := registry.GetHandler(common.GetData)
|
||||
assert.True(t, exists)
|
||||
assert.IsType(t, &GetDataHandler{}, getDataHandler)
|
||||
|
||||
fingerprintHandler, exists := registry.GetHandler(common.CheckFingerprint)
|
||||
assert.True(t, exists)
|
||||
assert.IsType(t, &CheckFingerprintHandler{}, fingerprintHandler)
|
||||
})
|
||||
|
||||
t.Run("custom handler registration", func(t *testing.T) {
|
||||
registry := NewHandlerRegistry()
|
||||
mockHandler := &MockHandler{
|
||||
requiresVerification: true,
|
||||
description: "Test handler",
|
||||
}
|
||||
|
||||
// Register a custom handler for a mock action
|
||||
const mockAction common.WebSocketAction = 99
|
||||
registry.Register(mockAction, mockHandler)
|
||||
|
||||
// Verify registration
|
||||
handler, exists := registry.GetHandler(mockAction)
|
||||
assert.True(t, exists)
|
||||
assert.Equal(t, mockHandler, handler)
|
||||
})
|
||||
|
||||
t.Run("unknown action", func(t *testing.T) {
|
||||
registry := NewHandlerRegistry()
|
||||
ctx := &HandlerContext{
|
||||
Request: &common.HubRequest[cbor.RawMessage]{
|
||||
Action: common.WebSocketAction(255), // Unknown action
|
||||
},
|
||||
HubVerified: true,
|
||||
}
|
||||
|
||||
err := registry.Handle(ctx)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unknown action: 255")
|
||||
})
|
||||
|
||||
t.Run("verification required", func(t *testing.T) {
|
||||
registry := NewHandlerRegistry()
|
||||
ctx := &HandlerContext{
|
||||
Request: &common.HubRequest[cbor.RawMessage]{
|
||||
Action: common.GetData, // Requires verification
|
||||
},
|
||||
HubVerified: false, // Not verified
|
||||
}
|
||||
|
||||
err := registry.Handle(ctx)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "hub not verified")
|
||||
})
|
||||
}
|
||||
|
||||
// TestCheckFingerprintHandler tests the CheckFingerprint handler
|
||||
func TestCheckFingerprintHandler(t *testing.T) {
|
||||
handler := &CheckFingerprintHandler{}
|
||||
|
||||
t.Run("handle with invalid data", func(t *testing.T) {
|
||||
client := &WebSocketClient{}
|
||||
ctx := &HandlerContext{
|
||||
Client: client,
|
||||
HubVerified: false,
|
||||
Request: &common.HubRequest[cbor.RawMessage]{
|
||||
Action: common.CheckFingerprint,
|
||||
Data: cbor.RawMessage{}, // Empty/invalid data
|
||||
},
|
||||
}
|
||||
|
||||
// Should fail to decode the fingerprint request
|
||||
err := handler.Handle(ctx)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
43
agent/health/health.go
Normal file
43
agent/health/health.go
Normal file
@@ -0,0 +1,43 @@
|
||||
// Package health provides functions to check and update the health of the agent.
|
||||
// It uses a file in the temp directory to store the timestamp of the last connection attempt.
|
||||
// If the timestamp is older than 90 seconds, the agent is considered unhealthy.
|
||||
// NB: The agent must be started with the Start() method to be considered healthy.
|
||||
package health
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
// healthFile is the path to the health file
|
||||
var healthFile = filepath.Join(os.TempDir(), "beszel_health")
|
||||
|
||||
// Check checks if the agent is connected by checking the modification time of the health file
|
||||
func Check() error {
|
||||
fileInfo, err := os.Stat(healthFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if time.Since(fileInfo.ModTime()) > 91*time.Second {
|
||||
log.Println("over 90 seconds since last connection")
|
||||
return errors.New("unhealthy")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update updates the modification time of the health file
|
||||
func Update() error {
|
||||
file, err := os.Create(healthFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return file.Close()
|
||||
}
|
||||
|
||||
// CleanUp removes the health file
|
||||
func CleanUp() error {
|
||||
return os.Remove(healthFile)
|
||||
}
|
||||
67
agent/health/health_test.go
Normal file
67
agent/health/health_test.go
Normal file
@@ -0,0 +1,67 @@
|
||||
//go:build testing
|
||||
// +build testing
|
||||
|
||||
package health
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"testing/synctest"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestHealth(t *testing.T) {
|
||||
// Override healthFile to use a temporary directory for this test.
|
||||
originalHealthFile := healthFile
|
||||
tmpDir := t.TempDir()
|
||||
healthFile = filepath.Join(tmpDir, "beszel_health_test")
|
||||
defer func() { healthFile = originalHealthFile }()
|
||||
|
||||
t.Run("check with no health file", func(t *testing.T) {
|
||||
err := Check()
|
||||
require.Error(t, err)
|
||||
assert.True(t, os.IsNotExist(err), "expected a file-not-exist error, but got: %v", err)
|
||||
})
|
||||
|
||||
t.Run("update and check", func(t *testing.T) {
|
||||
err := Update()
|
||||
require.NoError(t, err, "Update() failed")
|
||||
|
||||
err = Check()
|
||||
assert.NoError(t, err, "Check() failed immediately after Update()")
|
||||
})
|
||||
|
||||
// This test uses synctest to simulate time passing.
|
||||
// NOTE: This test requires GOEXPERIMENT=synctest to run.
|
||||
t.Run("check with simulated time", func(t *testing.T) {
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
// Update the file to set the initial timestamp.
|
||||
require.NoError(t, Update(), "Update() failed inside synctest")
|
||||
|
||||
// Set the mtime to the current fake time to align the file's timestamp with the simulated clock.
|
||||
now := time.Now()
|
||||
require.NoError(t, os.Chtimes(healthFile, now, now), "Chtimes failed")
|
||||
|
||||
// Wait a duration less than the threshold.
|
||||
time.Sleep(89 * time.Second)
|
||||
synctest.Wait()
|
||||
|
||||
// The check should still pass.
|
||||
assert.NoError(t, Check(), "Check() failed after 89s")
|
||||
|
||||
// Wait for the total duration to exceed the threshold.
|
||||
time.Sleep(5 * time.Second)
|
||||
synctest.Wait()
|
||||
|
||||
// The check should now fail as unhealthy.
|
||||
err := Check()
|
||||
require.Error(t, err, "Check() should have failed after 91s")
|
||||
assert.Equal(t, "unhealthy", err.Error(), "Check() returned wrong error")
|
||||
})
|
||||
})
|
||||
}
|
||||
80
agent/lhm/beszel_lhm.cs
Normal file
80
agent/lhm/beszel_lhm.cs
Normal file
@@ -0,0 +1,80 @@
|
||||
using System;
|
||||
using System.Globalization;
|
||||
using LibreHardwareMonitor.Hardware;
|
||||
|
||||
class Program
|
||||
{
|
||||
static void Main()
|
||||
{
|
||||
var computer = new Computer
|
||||
{
|
||||
IsCpuEnabled = true,
|
||||
IsGpuEnabled = true,
|
||||
IsMemoryEnabled = true,
|
||||
IsMotherboardEnabled = true,
|
||||
IsStorageEnabled = true,
|
||||
// IsPsuEnabled = true,
|
||||
// IsNetworkEnabled = true,
|
||||
};
|
||||
computer.Open();
|
||||
|
||||
var reader = Console.In;
|
||||
var writer = Console.Out;
|
||||
|
||||
string line;
|
||||
while ((line = reader.ReadLine()) != null)
|
||||
{
|
||||
if (line.Trim().Equals("getTemps", StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
foreach (var hw in computer.Hardware)
|
||||
{
|
||||
// process main hardware sensors
|
||||
ProcessSensors(hw, writer);
|
||||
|
||||
// process subhardware sensors
|
||||
foreach (var subhardware in hw.SubHardware)
|
||||
{
|
||||
ProcessSensors(subhardware, writer);
|
||||
}
|
||||
}
|
||||
// send empty line to signal end of sensor data
|
||||
writer.WriteLine();
|
||||
writer.Flush();
|
||||
}
|
||||
}
|
||||
|
||||
computer.Close();
|
||||
}
|
||||
|
||||
static void ProcessSensors(IHardware hardware, System.IO.TextWriter writer)
|
||||
{
|
||||
var updated = false;
|
||||
foreach (var sensor in hardware.Sensors)
|
||||
{
|
||||
var validTemp = sensor.SensorType == SensorType.Temperature && sensor.Value.HasValue;
|
||||
if (!validTemp || sensor.Name.Contains("Distance"))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!updated)
|
||||
{
|
||||
hardware.Update();
|
||||
updated = true;
|
||||
}
|
||||
|
||||
var name = sensor.Name;
|
||||
// if sensor.Name starts with "Temperature" replace with hardware.Identifier but retain the rest of the name.
|
||||
// usually this is a number like Temperature 3
|
||||
if (sensor.Name.StartsWith("Temperature"))
|
||||
{
|
||||
name = hardware.Identifier.ToString().Replace("/", "_").TrimStart('_') + sensor.Name.Substring(11);
|
||||
}
|
||||
|
||||
// invariant culture assures the value is parsable as a float
|
||||
var value = sensor.Value.Value.ToString("0.##", CultureInfo.InvariantCulture);
|
||||
// write the name and value to the writer
|
||||
writer.WriteLine($"{name}|{value}");
|
||||
}
|
||||
}
|
||||
}
|
||||
11
agent/lhm/beszel_lhm.csproj
Normal file
11
agent/lhm/beszel_lhm.csproj
Normal file
@@ -0,0 +1,11 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
<PropertyGroup>
|
||||
<OutputType>Exe</OutputType>
|
||||
<TargetFramework>net48</TargetFramework>
|
||||
<Platforms>x64</Platforms>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="LibreHardwareMonitorLib" Version="0.9.4" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
259
agent/network.go
Normal file
259
agent/network.go
Normal file
@@ -0,0 +1,259 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/agent/deltatracker"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
psutilNet "github.com/shirou/gopsutil/v4/net"
|
||||
)
|
||||
|
||||
// NicConfig controls inclusion/exclusion of network interfaces via the NICS env var
|
||||
//
|
||||
// Behavior mirrors SensorConfig's matching logic:
|
||||
// - Leading '-' means blacklist mode; otherwise whitelist mode
|
||||
// - Supports '*' wildcards using path.Match
|
||||
// - In whitelist mode with an empty list, no NICs are selected
|
||||
// - In blacklist mode with an empty list, all NICs are selected
|
||||
type NicConfig struct {
|
||||
nics map[string]struct{}
|
||||
isBlacklist bool
|
||||
hasWildcards bool
|
||||
}
|
||||
|
||||
func newNicConfig(nicsEnvVal string) *NicConfig {
|
||||
cfg := &NicConfig{
|
||||
nics: make(map[string]struct{}),
|
||||
}
|
||||
if strings.HasPrefix(nicsEnvVal, "-") {
|
||||
cfg.isBlacklist = true
|
||||
nicsEnvVal = nicsEnvVal[1:]
|
||||
}
|
||||
for nic := range strings.SplitSeq(nicsEnvVal, ",") {
|
||||
nic = strings.TrimSpace(nic)
|
||||
if nic != "" {
|
||||
cfg.nics[nic] = struct{}{}
|
||||
if strings.Contains(nic, "*") {
|
||||
cfg.hasWildcards = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
// isValidNic determines if a NIC should be included based on NicConfig rules
|
||||
func isValidNic(nicName string, cfg *NicConfig) bool {
|
||||
// Empty list behavior differs by mode: blacklist: allow all; whitelist: allow none
|
||||
if len(cfg.nics) == 0 {
|
||||
return cfg.isBlacklist
|
||||
}
|
||||
|
||||
// Exact match: return true if whitelist, false if blacklist
|
||||
if _, exactMatch := cfg.nics[nicName]; exactMatch {
|
||||
return !cfg.isBlacklist
|
||||
}
|
||||
|
||||
// If no wildcards, return true if blacklist, false if whitelist
|
||||
if !cfg.hasWildcards {
|
||||
return cfg.isBlacklist
|
||||
}
|
||||
|
||||
// Check for wildcard patterns
|
||||
for pattern := range cfg.nics {
|
||||
if !strings.Contains(pattern, "*") {
|
||||
continue
|
||||
}
|
||||
if match, _ := path.Match(pattern, nicName); match {
|
||||
return !cfg.isBlacklist
|
||||
}
|
||||
}
|
||||
|
||||
return cfg.isBlacklist
|
||||
}
|
||||
|
||||
func (a *Agent) updateNetworkStats(cacheTimeMs uint16, systemStats *system.Stats) {
|
||||
// network stats
|
||||
a.ensureNetInterfacesInitialized()
|
||||
|
||||
a.ensureNetworkInterfacesMap(systemStats)
|
||||
|
||||
if netIO, err := psutilNet.IOCounters(true); err == nil {
|
||||
nis, msElapsed := a.loadAndTickNetBaseline(cacheTimeMs)
|
||||
totalBytesSent, totalBytesRecv := a.sumAndTrackPerNicDeltas(cacheTimeMs, msElapsed, netIO, systemStats)
|
||||
bytesSentPerSecond, bytesRecvPerSecond := a.computeBytesPerSecond(msElapsed, totalBytesSent, totalBytesRecv, nis)
|
||||
a.applyNetworkTotals(cacheTimeMs, netIO, systemStats, nis, totalBytesSent, totalBytesRecv, bytesSentPerSecond, bytesRecvPerSecond)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Agent) initializeNetIoStats() {
|
||||
// reset valid network interfaces
|
||||
a.netInterfaces = make(map[string]struct{}, 0)
|
||||
|
||||
// parse NICS env var for whitelist / blacklist
|
||||
nicsEnvVal, nicsEnvExists := GetEnv("NICS")
|
||||
var nicCfg *NicConfig
|
||||
if nicsEnvExists {
|
||||
nicCfg = newNicConfig(nicsEnvVal)
|
||||
}
|
||||
|
||||
// get current network I/O stats and record valid interfaces
|
||||
if netIO, err := psutilNet.IOCounters(true); err == nil {
|
||||
for _, v := range netIO {
|
||||
if nicsEnvExists && !isValidNic(v.Name, nicCfg) {
|
||||
continue
|
||||
}
|
||||
if a.skipNetworkInterface(v) {
|
||||
continue
|
||||
}
|
||||
slog.Info("Detected network interface", "name", v.Name, "sent", v.BytesSent, "recv", v.BytesRecv)
|
||||
// store as a valid network interface
|
||||
a.netInterfaces[v.Name] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Reset per-cache-time trackers and baselines so they will reinitialize on next use
|
||||
a.netInterfaceDeltaTrackers = make(map[uint16]*deltatracker.DeltaTracker[string, uint64])
|
||||
a.netIoStats = make(map[uint16]system.NetIoStats)
|
||||
}
|
||||
|
||||
// ensureNetInterfacesInitialized re-initializes NICs if none are currently tracked
|
||||
func (a *Agent) ensureNetInterfacesInitialized() {
|
||||
if len(a.netInterfaces) == 0 {
|
||||
// if no network interfaces, initialize again
|
||||
// this is a fix if agent started before network is online (#466)
|
||||
// maybe refactor this in the future to not cache interface names at all so we
|
||||
// don't miss an interface that's been added after agent started in any circumstance
|
||||
a.initializeNetIoStats()
|
||||
}
|
||||
}
|
||||
|
||||
// ensureNetworkInterfacesMap ensures systemStats.NetworkInterfaces map exists
|
||||
func (a *Agent) ensureNetworkInterfacesMap(systemStats *system.Stats) {
|
||||
if systemStats.NetworkInterfaces == nil {
|
||||
systemStats.NetworkInterfaces = make(map[string][4]uint64, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// loadAndTickNetBaseline returns the NetIoStats baseline and milliseconds elapsed, updating time
|
||||
func (a *Agent) loadAndTickNetBaseline(cacheTimeMs uint16) (netIoStat system.NetIoStats, msElapsed uint64) {
|
||||
netIoStat = a.netIoStats[cacheTimeMs]
|
||||
if netIoStat.Time.IsZero() {
|
||||
netIoStat.Time = time.Now()
|
||||
msElapsed = 0
|
||||
} else {
|
||||
msElapsed = uint64(time.Since(netIoStat.Time).Milliseconds())
|
||||
netIoStat.Time = time.Now()
|
||||
}
|
||||
return netIoStat, msElapsed
|
||||
}
|
||||
|
||||
// sumAndTrackPerNicDeltas accumulates totals and records per-NIC up/down deltas into systemStats
|
||||
func (a *Agent) sumAndTrackPerNicDeltas(cacheTimeMs uint16, msElapsed uint64, netIO []psutilNet.IOCountersStat, systemStats *system.Stats) (totalBytesSent, totalBytesRecv uint64) {
|
||||
tracker := a.netInterfaceDeltaTrackers[cacheTimeMs]
|
||||
if tracker == nil {
|
||||
tracker = deltatracker.NewDeltaTracker[string, uint64]()
|
||||
a.netInterfaceDeltaTrackers[cacheTimeMs] = tracker
|
||||
}
|
||||
tracker.Cycle()
|
||||
|
||||
for _, v := range netIO {
|
||||
if _, exists := a.netInterfaces[v.Name]; !exists {
|
||||
continue
|
||||
}
|
||||
totalBytesSent += v.BytesSent
|
||||
totalBytesRecv += v.BytesRecv
|
||||
|
||||
var upDelta, downDelta uint64
|
||||
upKey, downKey := fmt.Sprintf("%sup", v.Name), fmt.Sprintf("%sdown", v.Name)
|
||||
tracker.Set(upKey, v.BytesSent)
|
||||
tracker.Set(downKey, v.BytesRecv)
|
||||
if msElapsed > 0 {
|
||||
if prevVal, ok := tracker.Previous(upKey); ok {
|
||||
var deltaBytes uint64
|
||||
if v.BytesSent >= prevVal {
|
||||
deltaBytes = v.BytesSent - prevVal
|
||||
} else {
|
||||
deltaBytes = v.BytesSent
|
||||
}
|
||||
upDelta = deltaBytes * 1000 / msElapsed
|
||||
}
|
||||
if prevVal, ok := tracker.Previous(downKey); ok {
|
||||
var deltaBytes uint64
|
||||
if v.BytesRecv >= prevVal {
|
||||
deltaBytes = v.BytesRecv - prevVal
|
||||
} else {
|
||||
deltaBytes = v.BytesRecv
|
||||
}
|
||||
downDelta = deltaBytes * 1000 / msElapsed
|
||||
}
|
||||
}
|
||||
systemStats.NetworkInterfaces[v.Name] = [4]uint64{upDelta, downDelta, v.BytesSent, v.BytesRecv}
|
||||
}
|
||||
|
||||
return totalBytesSent, totalBytesRecv
|
||||
}
|
||||
|
||||
// computeBytesPerSecond calculates per-second totals from elapsed time and totals
|
||||
func (a *Agent) computeBytesPerSecond(msElapsed, totalBytesSent, totalBytesRecv uint64, nis system.NetIoStats) (bytesSentPerSecond, bytesRecvPerSecond uint64) {
|
||||
if msElapsed > 0 {
|
||||
bytesSentPerSecond = (totalBytesSent - nis.BytesSent) * 1000 / msElapsed
|
||||
bytesRecvPerSecond = (totalBytesRecv - nis.BytesRecv) * 1000 / msElapsed
|
||||
}
|
||||
return bytesSentPerSecond, bytesRecvPerSecond
|
||||
}
|
||||
|
||||
// applyNetworkTotals validates and writes computed network stats, or resets on anomaly
|
||||
func (a *Agent) applyNetworkTotals(
|
||||
cacheTimeMs uint16,
|
||||
netIO []psutilNet.IOCountersStat,
|
||||
systemStats *system.Stats,
|
||||
nis system.NetIoStats,
|
||||
totalBytesSent, totalBytesRecv uint64,
|
||||
bytesSentPerSecond, bytesRecvPerSecond uint64,
|
||||
) {
|
||||
networkSentPs := bytesToMegabytes(float64(bytesSentPerSecond))
|
||||
networkRecvPs := bytesToMegabytes(float64(bytesRecvPerSecond))
|
||||
if networkSentPs > 10_000 || networkRecvPs > 10_000 {
|
||||
slog.Warn("Invalid net stats. Resetting.", "sent", networkSentPs, "recv", networkRecvPs)
|
||||
for _, v := range netIO {
|
||||
if _, exists := a.netInterfaces[v.Name]; !exists {
|
||||
continue
|
||||
}
|
||||
slog.Info(v.Name, "recv", v.BytesRecv, "sent", v.BytesSent)
|
||||
}
|
||||
a.initializeNetIoStats()
|
||||
delete(a.netIoStats, cacheTimeMs)
|
||||
delete(a.netInterfaceDeltaTrackers, cacheTimeMs)
|
||||
systemStats.NetworkSent = 0
|
||||
systemStats.NetworkRecv = 0
|
||||
systemStats.Bandwidth[0], systemStats.Bandwidth[1] = 0, 0
|
||||
return
|
||||
}
|
||||
|
||||
systemStats.NetworkSent = networkSentPs
|
||||
systemStats.NetworkRecv = networkRecvPs
|
||||
systemStats.Bandwidth[0], systemStats.Bandwidth[1] = bytesSentPerSecond, bytesRecvPerSecond
|
||||
nis.BytesSent = totalBytesSent
|
||||
nis.BytesRecv = totalBytesRecv
|
||||
a.netIoStats[cacheTimeMs] = nis
|
||||
}
|
||||
|
||||
func (a *Agent) skipNetworkInterface(v psutilNet.IOCountersStat) bool {
|
||||
switch {
|
||||
case strings.HasPrefix(v.Name, "lo"),
|
||||
strings.HasPrefix(v.Name, "docker"),
|
||||
strings.HasPrefix(v.Name, "br-"),
|
||||
strings.HasPrefix(v.Name, "veth"),
|
||||
strings.HasPrefix(v.Name, "bond"),
|
||||
strings.HasPrefix(v.Name, "cali"),
|
||||
v.BytesRecv == 0,
|
||||
v.BytesSent == 0:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
502
agent/network_test.go
Normal file
502
agent/network_test.go
Normal file
@@ -0,0 +1,502 @@
|
||||
//go:build testing
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/agent/deltatracker"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
psutilNet "github.com/shirou/gopsutil/v4/net"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIsValidNic(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nicName string
|
||||
config *NicConfig
|
||||
expectedValid bool
|
||||
}{
|
||||
{
|
||||
name: "Whitelist - NIC in list",
|
||||
nicName: "eth0",
|
||||
config: &NicConfig{
|
||||
nics: map[string]struct{}{"eth0": {}},
|
||||
isBlacklist: false,
|
||||
},
|
||||
expectedValid: true,
|
||||
},
|
||||
{
|
||||
name: "Whitelist - NIC not in list",
|
||||
nicName: "wlan0",
|
||||
config: &NicConfig{
|
||||
nics: map[string]struct{}{"eth0": {}},
|
||||
isBlacklist: false,
|
||||
},
|
||||
expectedValid: false,
|
||||
},
|
||||
{
|
||||
name: "Blacklist - NIC in list",
|
||||
nicName: "eth0",
|
||||
config: &NicConfig{
|
||||
nics: map[string]struct{}{"eth0": {}},
|
||||
isBlacklist: true,
|
||||
},
|
||||
expectedValid: false,
|
||||
},
|
||||
{
|
||||
name: "Blacklist - NIC not in list",
|
||||
nicName: "wlan0",
|
||||
config: &NicConfig{
|
||||
nics: map[string]struct{}{"eth0": {}},
|
||||
isBlacklist: true,
|
||||
},
|
||||
expectedValid: true,
|
||||
},
|
||||
{
|
||||
name: "Whitelist with wildcard - matching pattern",
|
||||
nicName: "eth1",
|
||||
config: &NicConfig{
|
||||
nics: map[string]struct{}{"eth*": {}},
|
||||
isBlacklist: false,
|
||||
hasWildcards: true,
|
||||
},
|
||||
expectedValid: true,
|
||||
},
|
||||
{
|
||||
name: "Whitelist with wildcard - non-matching pattern",
|
||||
nicName: "wlan0",
|
||||
config: &NicConfig{
|
||||
nics: map[string]struct{}{"eth*": {}},
|
||||
isBlacklist: false,
|
||||
hasWildcards: true,
|
||||
},
|
||||
expectedValid: false,
|
||||
},
|
||||
{
|
||||
name: "Blacklist with wildcard - matching pattern",
|
||||
nicName: "eth1",
|
||||
config: &NicConfig{
|
||||
nics: map[string]struct{}{"eth*": {}},
|
||||
isBlacklist: true,
|
||||
hasWildcards: true,
|
||||
},
|
||||
expectedValid: false,
|
||||
},
|
||||
{
|
||||
name: "Blacklist with wildcard - non-matching pattern",
|
||||
nicName: "wlan0",
|
||||
config: &NicConfig{
|
||||
nics: map[string]struct{}{"eth*": {}},
|
||||
isBlacklist: true,
|
||||
hasWildcards: true,
|
||||
},
|
||||
expectedValid: true,
|
||||
},
|
||||
{
|
||||
name: "Empty whitelist config - no NICs allowed",
|
||||
nicName: "eth0",
|
||||
config: &NicConfig{
|
||||
nics: map[string]struct{}{},
|
||||
isBlacklist: false,
|
||||
},
|
||||
expectedValid: false,
|
||||
},
|
||||
{
|
||||
name: "Empty blacklist config - all NICs allowed",
|
||||
nicName: "eth0",
|
||||
config: &NicConfig{
|
||||
nics: map[string]struct{}{},
|
||||
isBlacklist: true,
|
||||
},
|
||||
expectedValid: true,
|
||||
},
|
||||
{
|
||||
name: "Multiple patterns - exact match",
|
||||
nicName: "eth0",
|
||||
config: &NicConfig{
|
||||
nics: map[string]struct{}{"eth0": {}, "wlan*": {}},
|
||||
isBlacklist: false,
|
||||
},
|
||||
expectedValid: true,
|
||||
},
|
||||
{
|
||||
name: "Multiple patterns - wildcard match",
|
||||
nicName: "wlan1",
|
||||
config: &NicConfig{
|
||||
nics: map[string]struct{}{"eth0": {}, "wlan*": {}},
|
||||
isBlacklist: false,
|
||||
hasWildcards: true,
|
||||
},
|
||||
expectedValid: true,
|
||||
},
|
||||
{
|
||||
name: "Multiple patterns - no match",
|
||||
nicName: "bond0",
|
||||
config: &NicConfig{
|
||||
nics: map[string]struct{}{"eth0": {}, "wlan*": {}},
|
||||
isBlacklist: false,
|
||||
hasWildcards: true,
|
||||
},
|
||||
expectedValid: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := isValidNic(tt.nicName, tt.config)
|
||||
assert.Equal(t, tt.expectedValid, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewNicConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nicsEnvVal string
|
||||
expectedCfg *NicConfig
|
||||
}{
|
||||
{
|
||||
name: "Empty string",
|
||||
nicsEnvVal: "",
|
||||
expectedCfg: &NicConfig{
|
||||
nics: map[string]struct{}{},
|
||||
isBlacklist: false,
|
||||
hasWildcards: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Single NIC whitelist",
|
||||
nicsEnvVal: "eth0",
|
||||
expectedCfg: &NicConfig{
|
||||
nics: map[string]struct{}{"eth0": {}},
|
||||
isBlacklist: false,
|
||||
hasWildcards: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Multiple NICs whitelist",
|
||||
nicsEnvVal: "eth0,wlan0",
|
||||
expectedCfg: &NicConfig{
|
||||
nics: map[string]struct{}{"eth0": {}, "wlan0": {}},
|
||||
isBlacklist: false,
|
||||
hasWildcards: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Blacklist mode",
|
||||
nicsEnvVal: "-eth0,wlan0",
|
||||
expectedCfg: &NicConfig{
|
||||
nics: map[string]struct{}{"eth0": {}, "wlan0": {}},
|
||||
isBlacklist: true,
|
||||
hasWildcards: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "With wildcards",
|
||||
nicsEnvVal: "eth*,wlan0",
|
||||
expectedCfg: &NicConfig{
|
||||
nics: map[string]struct{}{"eth*": {}, "wlan0": {}},
|
||||
isBlacklist: false,
|
||||
hasWildcards: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Blacklist with wildcards",
|
||||
nicsEnvVal: "-eth*,wlan0",
|
||||
expectedCfg: &NicConfig{
|
||||
nics: map[string]struct{}{"eth*": {}, "wlan0": {}},
|
||||
isBlacklist: true,
|
||||
hasWildcards: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "With whitespace",
|
||||
nicsEnvVal: "eth0, wlan0 , eth1",
|
||||
expectedCfg: &NicConfig{
|
||||
nics: map[string]struct{}{"eth0": {}, "wlan0": {}, "eth1": {}},
|
||||
isBlacklist: false,
|
||||
hasWildcards: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Only wildcards",
|
||||
nicsEnvVal: "eth*,wlan*",
|
||||
expectedCfg: &NicConfig{
|
||||
nics: map[string]struct{}{"eth*": {}, "wlan*": {}},
|
||||
isBlacklist: false,
|
||||
hasWildcards: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Leading dash only",
|
||||
nicsEnvVal: "-",
|
||||
expectedCfg: &NicConfig{
|
||||
nics: map[string]struct{}{},
|
||||
isBlacklist: true,
|
||||
hasWildcards: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Mixed exact and wildcard",
|
||||
nicsEnvVal: "eth0,br-*",
|
||||
expectedCfg: &NicConfig{
|
||||
nics: map[string]struct{}{"eth0": {}, "br-*": {}},
|
||||
isBlacklist: false,
|
||||
hasWildcards: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := newNicConfig(tt.nicsEnvVal)
|
||||
require.NotNil(t, cfg)
|
||||
assert.Equal(t, tt.expectedCfg.isBlacklist, cfg.isBlacklist)
|
||||
assert.Equal(t, tt.expectedCfg.hasWildcards, cfg.hasWildcards)
|
||||
assert.Equal(t, tt.expectedCfg.nics, cfg.nics)
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestEnsureNetworkInterfacesMap(t *testing.T) {
|
||||
var a Agent
|
||||
var stats system.Stats
|
||||
|
||||
// Initially nil
|
||||
assert.Nil(t, stats.NetworkInterfaces)
|
||||
// Ensure map is created
|
||||
a.ensureNetworkInterfacesMap(&stats)
|
||||
assert.NotNil(t, stats.NetworkInterfaces)
|
||||
// Idempotent
|
||||
a.ensureNetworkInterfacesMap(&stats)
|
||||
assert.NotNil(t, stats.NetworkInterfaces)
|
||||
}
|
||||
|
||||
func TestLoadAndTickNetBaseline(t *testing.T) {
|
||||
a := &Agent{netIoStats: make(map[uint16]system.NetIoStats)}
|
||||
|
||||
// First call initializes time and returns 0 elapsed
|
||||
ni, elapsed := a.loadAndTickNetBaseline(100)
|
||||
assert.Equal(t, uint64(0), elapsed)
|
||||
assert.False(t, ni.Time.IsZero())
|
||||
|
||||
// Store back what loadAndTick returns to mimic updateNetworkStats behavior
|
||||
a.netIoStats[100] = ni
|
||||
|
||||
time.Sleep(2 * time.Millisecond)
|
||||
|
||||
// Next call should produce >= 0 elapsed and update time
|
||||
ni2, elapsed2 := a.loadAndTickNetBaseline(100)
|
||||
assert.True(t, elapsed2 > 0)
|
||||
assert.False(t, ni2.Time.IsZero())
|
||||
}
|
||||
|
||||
func TestComputeBytesPerSecond(t *testing.T) {
|
||||
a := &Agent{}
|
||||
|
||||
// No elapsed -> zero rate
|
||||
bytesUp, bytesDown := a.computeBytesPerSecond(0, 2000, 3000, system.NetIoStats{BytesSent: 1000, BytesRecv: 1000})
|
||||
assert.Equal(t, uint64(0), bytesUp)
|
||||
assert.Equal(t, uint64(0), bytesDown)
|
||||
|
||||
// With elapsed -> per-second calculation
|
||||
bytesUp, bytesDown = a.computeBytesPerSecond(500, 6000, 11000, system.NetIoStats{BytesSent: 1000, BytesRecv: 1000})
|
||||
// (6000-1000)*1000/500 = 10000; (11000-1000)*1000/500 = 20000
|
||||
assert.Equal(t, uint64(10000), bytesUp)
|
||||
assert.Equal(t, uint64(20000), bytesDown)
|
||||
}
|
||||
|
||||
func TestSumAndTrackPerNicDeltas(t *testing.T) {
|
||||
a := &Agent{
|
||||
netInterfaces: map[string]struct{}{"eth0": {}, "wlan0": {}},
|
||||
netInterfaceDeltaTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||
}
|
||||
|
||||
// Two samples for same cache interval to verify delta behavior
|
||||
cache := uint16(42)
|
||||
net1 := []psutilNet.IOCountersStat{{Name: "eth0", BytesSent: 1000, BytesRecv: 2000}}
|
||||
stats1 := &system.Stats{}
|
||||
a.ensureNetworkInterfacesMap(stats1)
|
||||
tx1, rx1 := a.sumAndTrackPerNicDeltas(cache, 0, net1, stats1)
|
||||
assert.Equal(t, uint64(1000), tx1)
|
||||
assert.Equal(t, uint64(2000), rx1)
|
||||
|
||||
// Second cycle with elapsed, larger counters -> deltas computed inside
|
||||
net2 := []psutilNet.IOCountersStat{{Name: "eth0", BytesSent: 4000, BytesRecv: 9000}}
|
||||
stats := &system.Stats{}
|
||||
a.ensureNetworkInterfacesMap(stats)
|
||||
tx2, rx2 := a.sumAndTrackPerNicDeltas(cache, 1000, net2, stats)
|
||||
assert.Equal(t, uint64(4000), tx2)
|
||||
assert.Equal(t, uint64(9000), rx2)
|
||||
// Up/Down deltas per second should be (4000-1000)/1s = 3000 and (9000-2000)/1s = 7000
|
||||
ni, ok := stats.NetworkInterfaces["eth0"]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, uint64(3000), ni[0])
|
||||
assert.Equal(t, uint64(7000), ni[1])
|
||||
}
|
||||
|
||||
func TestSumAndTrackPerNicDeltasHandlesCounterReset(t *testing.T) {
|
||||
a := &Agent{
|
||||
netInterfaces: map[string]struct{}{"eth0": {}},
|
||||
netInterfaceDeltaTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||
}
|
||||
|
||||
cache := uint16(77)
|
||||
|
||||
// First interval establishes baseline values
|
||||
initial := []psutilNet.IOCountersStat{{Name: "eth0", BytesSent: 4_000, BytesRecv: 6_000}}
|
||||
statsInitial := &system.Stats{}
|
||||
a.ensureNetworkInterfacesMap(statsInitial)
|
||||
_, _ = a.sumAndTrackPerNicDeltas(cache, 0, initial, statsInitial)
|
||||
|
||||
// Second interval increments counters normally so previous snapshot gets populated
|
||||
increment := []psutilNet.IOCountersStat{{Name: "eth0", BytesSent: 9_000, BytesRecv: 11_000}}
|
||||
statsIncrement := &system.Stats{}
|
||||
a.ensureNetworkInterfacesMap(statsIncrement)
|
||||
_, _ = a.sumAndTrackPerNicDeltas(cache, 1_000, increment, statsIncrement)
|
||||
|
||||
niIncrement, ok := statsIncrement.NetworkInterfaces["eth0"]
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, uint64(5_000), niIncrement[0])
|
||||
assert.Equal(t, uint64(5_000), niIncrement[1])
|
||||
|
||||
// Third interval simulates counter reset (values drop below previous totals)
|
||||
reset := []psutilNet.IOCountersStat{{Name: "eth0", BytesSent: 1_200, BytesRecv: 1_500}}
|
||||
statsReset := &system.Stats{}
|
||||
a.ensureNetworkInterfacesMap(statsReset)
|
||||
_, _ = a.sumAndTrackPerNicDeltas(cache, 1_000, reset, statsReset)
|
||||
|
||||
niReset, ok := statsReset.NetworkInterfaces["eth0"]
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, uint64(1_200), niReset[0], "upload delta should match new counter value after reset")
|
||||
assert.Equal(t, uint64(1_500), niReset[1], "download delta should match new counter value after reset")
|
||||
}
|
||||
|
||||
func TestApplyNetworkTotals(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
bytesSentPerSecond uint64
|
||||
bytesRecvPerSecond uint64
|
||||
totalBytesSent uint64
|
||||
totalBytesRecv uint64
|
||||
expectReset bool
|
||||
expectedNetworkSent float64
|
||||
expectedNetworkRecv float64
|
||||
expectedBandwidthSent uint64
|
||||
expectedBandwidthRecv uint64
|
||||
}{
|
||||
{
|
||||
name: "Valid network stats - normal values",
|
||||
bytesSentPerSecond: 1000000, // 1 MB/s
|
||||
bytesRecvPerSecond: 2000000, // 2 MB/s
|
||||
totalBytesSent: 10000000,
|
||||
totalBytesRecv: 20000000,
|
||||
expectReset: false,
|
||||
expectedNetworkSent: 0.95, // ~1 MB/s rounded to 2 decimals
|
||||
expectedNetworkRecv: 1.91, // ~2 MB/s rounded to 2 decimals
|
||||
expectedBandwidthSent: 1000000,
|
||||
expectedBandwidthRecv: 2000000,
|
||||
},
|
||||
{
|
||||
name: "Invalid network stats - sent exceeds threshold",
|
||||
bytesSentPerSecond: 11000000000, // ~10.5 GB/s > 10 GB/s threshold
|
||||
bytesRecvPerSecond: 1000000, // 1 MB/s
|
||||
totalBytesSent: 10000000,
|
||||
totalBytesRecv: 20000000,
|
||||
expectReset: true,
|
||||
},
|
||||
{
|
||||
name: "Invalid network stats - recv exceeds threshold",
|
||||
bytesSentPerSecond: 1000000, // 1 MB/s
|
||||
bytesRecvPerSecond: 11000000000, // ~10.5 GB/s > 10 GB/s threshold
|
||||
totalBytesSent: 10000000,
|
||||
totalBytesRecv: 20000000,
|
||||
expectReset: true,
|
||||
},
|
||||
{
|
||||
name: "Invalid network stats - both exceed threshold",
|
||||
bytesSentPerSecond: 12000000000, // ~11.4 GB/s
|
||||
bytesRecvPerSecond: 13000000000, // ~12.4 GB/s
|
||||
totalBytesSent: 10000000,
|
||||
totalBytesRecv: 20000000,
|
||||
expectReset: true,
|
||||
},
|
||||
{
|
||||
name: "Valid network stats - at threshold boundary",
|
||||
bytesSentPerSecond: 10485750000, // ~9999.99 MB/s (rounds to 9999.99)
|
||||
bytesRecvPerSecond: 10485750000, // ~9999.99 MB/s (rounds to 9999.99)
|
||||
totalBytesSent: 10000000,
|
||||
totalBytesRecv: 20000000,
|
||||
expectReset: false,
|
||||
expectedNetworkSent: 9999.99,
|
||||
expectedNetworkRecv: 9999.99,
|
||||
expectedBandwidthSent: 10485750000,
|
||||
expectedBandwidthRecv: 10485750000,
|
||||
},
|
||||
{
|
||||
name: "Zero values",
|
||||
bytesSentPerSecond: 0,
|
||||
bytesRecvPerSecond: 0,
|
||||
totalBytesSent: 0,
|
||||
totalBytesRecv: 0,
|
||||
expectReset: false,
|
||||
expectedNetworkSent: 0.0,
|
||||
expectedNetworkRecv: 0.0,
|
||||
expectedBandwidthSent: 0,
|
||||
expectedBandwidthRecv: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Setup agent with initialized maps
|
||||
a := &Agent{
|
||||
netInterfaces: make(map[string]struct{}),
|
||||
netIoStats: make(map[uint16]system.NetIoStats),
|
||||
netInterfaceDeltaTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||
}
|
||||
|
||||
cacheTimeMs := uint16(100)
|
||||
netIO := []psutilNet.IOCountersStat{
|
||||
{Name: "eth0", BytesSent: 1000, BytesRecv: 2000},
|
||||
}
|
||||
systemStats := &system.Stats{}
|
||||
nis := system.NetIoStats{}
|
||||
|
||||
a.applyNetworkTotals(
|
||||
cacheTimeMs,
|
||||
netIO,
|
||||
systemStats,
|
||||
nis,
|
||||
tt.totalBytesSent,
|
||||
tt.totalBytesRecv,
|
||||
tt.bytesSentPerSecond,
|
||||
tt.bytesRecvPerSecond,
|
||||
)
|
||||
|
||||
if tt.expectReset {
|
||||
// Should have reset network tracking state - maps cleared and stats zeroed
|
||||
assert.NotContains(t, a.netIoStats, cacheTimeMs, "cache entry should be cleared after reset")
|
||||
assert.NotContains(t, a.netInterfaceDeltaTrackers, cacheTimeMs, "tracker should be cleared on reset")
|
||||
assert.Zero(t, systemStats.NetworkSent)
|
||||
assert.Zero(t, systemStats.NetworkRecv)
|
||||
assert.Zero(t, systemStats.Bandwidth[0])
|
||||
assert.Zero(t, systemStats.Bandwidth[1])
|
||||
} else {
|
||||
// Should have applied stats
|
||||
assert.Equal(t, tt.expectedNetworkSent, systemStats.NetworkSent)
|
||||
assert.Equal(t, tt.expectedNetworkRecv, systemStats.NetworkRecv)
|
||||
assert.Equal(t, tt.expectedBandwidthSent, systemStats.Bandwidth[0])
|
||||
assert.Equal(t, tt.expectedBandwidthRecv, systemStats.Bandwidth[1])
|
||||
|
||||
// Should have updated NetIoStats
|
||||
updatedNis := a.netIoStats[cacheTimeMs]
|
||||
assert.Equal(t, tt.totalBytesSent, updatedNis.BytesSent)
|
||||
assert.Equal(t, tt.totalBytesRecv, updatedNis.BytesRecv)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
198
agent/sensors.go
Normal file
198
agent/sensors.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"path"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
|
||||
"github.com/shirou/gopsutil/v4/common"
|
||||
"github.com/shirou/gopsutil/v4/sensors"
|
||||
)
|
||||
|
||||
type SensorConfig struct {
|
||||
context context.Context
|
||||
sensors map[string]struct{}
|
||||
primarySensor string
|
||||
isBlacklist bool
|
||||
hasWildcards bool
|
||||
skipCollection bool
|
||||
}
|
||||
|
||||
func (a *Agent) newSensorConfig() *SensorConfig {
|
||||
primarySensor, _ := GetEnv("PRIMARY_SENSOR")
|
||||
sysSensors, _ := GetEnv("SYS_SENSORS")
|
||||
sensorsEnvVal, sensorsSet := GetEnv("SENSORS")
|
||||
skipCollection := sensorsSet && sensorsEnvVal == ""
|
||||
|
||||
return a.newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal, skipCollection)
|
||||
}
|
||||
|
||||
// Matches sensors.TemperaturesWithContext to allow for panic recovery (gopsutil/issues/1832)
|
||||
type getTempsFn func(ctx context.Context) ([]sensors.TemperatureStat, error)
|
||||
|
||||
// newSensorConfigWithEnv creates a SensorConfig with the provided environment variables
|
||||
// sensorsSet indicates if the SENSORS environment variable was explicitly set (even to empty string)
|
||||
func (a *Agent) newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal string, skipCollection bool) *SensorConfig {
|
||||
config := &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: primarySensor,
|
||||
skipCollection: skipCollection,
|
||||
sensors: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
// Set sensors context (allows overriding sys location for sensors)
|
||||
if sysSensors != "" {
|
||||
slog.Info("SYS_SENSORS", "path", sysSensors)
|
||||
config.context = context.WithValue(config.context,
|
||||
common.EnvKey, common.EnvMap{common.HostSysEnvKey: sysSensors},
|
||||
)
|
||||
}
|
||||
|
||||
// handle blacklist
|
||||
if strings.HasPrefix(sensorsEnvVal, "-") {
|
||||
config.isBlacklist = true
|
||||
sensorsEnvVal = sensorsEnvVal[1:]
|
||||
}
|
||||
|
||||
for sensor := range strings.SplitSeq(sensorsEnvVal, ",") {
|
||||
sensor = strings.TrimSpace(sensor)
|
||||
if sensor != "" {
|
||||
config.sensors[sensor] = struct{}{}
|
||||
if strings.Contains(sensor, "*") {
|
||||
config.hasWildcards = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
// updateTemperatures updates the agent with the latest sensor temperatures
|
||||
func (a *Agent) updateTemperatures(systemStats *system.Stats) {
|
||||
// skip if sensors whitelist is set to empty string
|
||||
if a.sensorConfig.skipCollection {
|
||||
slog.Debug("Skipping temperature collection")
|
||||
return
|
||||
}
|
||||
|
||||
// reset high temp
|
||||
a.systemInfo.DashboardTemp = 0
|
||||
|
||||
temps, err := a.getTempsWithPanicRecovery(getSensorTemps)
|
||||
if err != nil {
|
||||
// retry once on panic (gopsutil/issues/1832)
|
||||
temps, err = a.getTempsWithPanicRecovery(getSensorTemps)
|
||||
if err != nil {
|
||||
slog.Warn("Error updating temperatures", "err", err)
|
||||
if len(systemStats.Temperatures) > 0 {
|
||||
systemStats.Temperatures = make(map[string]float64)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
slog.Debug("Temperature", "sensors", temps)
|
||||
|
||||
// return if no sensors
|
||||
if len(temps) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
systemStats.Temperatures = make(map[string]float64, len(temps))
|
||||
for i, sensor := range temps {
|
||||
// check for malformed strings on darwin (gopsutil/issues/1832)
|
||||
if runtime.GOOS == "darwin" && !utf8.ValidString(sensor.SensorKey) {
|
||||
continue
|
||||
}
|
||||
|
||||
// scale temperature
|
||||
if sensor.Temperature != 0 && sensor.Temperature < 1 {
|
||||
sensor.Temperature = scaleTemperature(sensor.Temperature)
|
||||
}
|
||||
// skip if temperature is unreasonable
|
||||
if sensor.Temperature <= 0 || sensor.Temperature >= 200 {
|
||||
continue
|
||||
}
|
||||
sensorName := sensor.SensorKey
|
||||
if _, ok := systemStats.Temperatures[sensorName]; ok {
|
||||
// if key already exists, append int to key
|
||||
sensorName = sensorName + "_" + strconv.Itoa(i)
|
||||
}
|
||||
// skip if not in whitelist or blacklist
|
||||
if !isValidSensor(sensorName, a.sensorConfig) {
|
||||
continue
|
||||
}
|
||||
// set dashboard temperature
|
||||
switch a.sensorConfig.primarySensor {
|
||||
case "":
|
||||
a.systemInfo.DashboardTemp = max(a.systemInfo.DashboardTemp, sensor.Temperature)
|
||||
case sensorName:
|
||||
a.systemInfo.DashboardTemp = sensor.Temperature
|
||||
}
|
||||
systemStats.Temperatures[sensorName] = twoDecimals(sensor.Temperature)
|
||||
}
|
||||
}
|
||||
|
||||
// getTempsWithPanicRecovery wraps sensors.TemperaturesWithContext to recover from panics (gopsutil/issues/1832)
|
||||
func (a *Agent) getTempsWithPanicRecovery(getTemps getTempsFn) (temps []sensors.TemperatureStat, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf("panic: %v", r)
|
||||
}
|
||||
}()
|
||||
// get sensor data (error ignored intentionally as it may be only with one sensor)
|
||||
temps, _ = getTemps(a.sensorConfig.context)
|
||||
return
|
||||
}
|
||||
|
||||
// isValidSensor checks if a sensor is valid based on the sensor name and the sensor config
|
||||
func isValidSensor(sensorName string, config *SensorConfig) bool {
|
||||
// if no sensors configured, everything is valid
|
||||
if len(config.sensors) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
// Exact match - return true if whitelist, false if blacklist
|
||||
if _, exactMatch := config.sensors[sensorName]; exactMatch {
|
||||
return !config.isBlacklist
|
||||
}
|
||||
|
||||
// If no wildcards, return true if blacklist, false if whitelist
|
||||
if !config.hasWildcards {
|
||||
return config.isBlacklist
|
||||
}
|
||||
|
||||
// Check for wildcard patterns
|
||||
for pattern := range config.sensors {
|
||||
if !strings.Contains(pattern, "*") {
|
||||
continue
|
||||
}
|
||||
if match, _ := path.Match(pattern, sensorName); match {
|
||||
return !config.isBlacklist
|
||||
}
|
||||
}
|
||||
|
||||
return config.isBlacklist
|
||||
}
|
||||
|
||||
// scaleTemperature scales temperatures in fractional values to reasonable Celsius values
|
||||
func scaleTemperature(temp float64) float64 {
|
||||
if temp > 1 {
|
||||
return temp
|
||||
}
|
||||
scaled100 := temp * 100
|
||||
scaled1000 := temp * 1000
|
||||
|
||||
if scaled100 >= 15 && scaled100 <= 95 {
|
||||
return scaled100
|
||||
} else if scaled1000 >= 15 && scaled1000 <= 95 {
|
||||
return scaled1000
|
||||
}
|
||||
return scaled100
|
||||
}
|
||||
9
agent/sensors_default.go
Normal file
9
agent/sensors_default.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//go:build !windows
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"github.com/shirou/gopsutil/v4/sensors"
|
||||
)
|
||||
|
||||
var getSensorTemps = sensors.TemperaturesWithContext
|
||||
554
agent/sensors_test.go
Normal file
554
agent/sensors_test.go
Normal file
@@ -0,0 +1,554 @@
|
||||
//go:build testing
|
||||
// +build testing
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
|
||||
"github.com/shirou/gopsutil/v4/common"
|
||||
"github.com/shirou/gopsutil/v4/sensors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIsValidSensor(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
sensorName string
|
||||
config *SensorConfig
|
||||
expectedValid bool
|
||||
}{
|
||||
{
|
||||
name: "Whitelist - sensor in list",
|
||||
sensorName: "cpu_temp",
|
||||
config: &SensorConfig{
|
||||
sensors: map[string]struct{}{"cpu_temp": {}},
|
||||
isBlacklist: false,
|
||||
},
|
||||
expectedValid: true,
|
||||
},
|
||||
{
|
||||
name: "Whitelist - sensor not in list",
|
||||
sensorName: "gpu_temp",
|
||||
config: &SensorConfig{
|
||||
sensors: map[string]struct{}{"cpu_temp": {}},
|
||||
isBlacklist: false,
|
||||
},
|
||||
expectedValid: false,
|
||||
},
|
||||
{
|
||||
name: "Blacklist - sensor in list",
|
||||
sensorName: "cpu_temp",
|
||||
config: &SensorConfig{
|
||||
sensors: map[string]struct{}{"cpu_temp": {}},
|
||||
isBlacklist: true,
|
||||
},
|
||||
expectedValid: false,
|
||||
},
|
||||
{
|
||||
name: "Blacklist - sensor not in list",
|
||||
sensorName: "gpu_temp",
|
||||
config: &SensorConfig{
|
||||
sensors: map[string]struct{}{"cpu_temp": {}},
|
||||
isBlacklist: true,
|
||||
},
|
||||
expectedValid: true,
|
||||
},
|
||||
{
|
||||
name: "Whitelist with wildcard - matching pattern",
|
||||
sensorName: "core_0_temp",
|
||||
config: &SensorConfig{
|
||||
sensors: map[string]struct{}{"core_*_temp": {}},
|
||||
isBlacklist: false,
|
||||
hasWildcards: true,
|
||||
},
|
||||
expectedValid: true,
|
||||
},
|
||||
{
|
||||
name: "Whitelist with wildcard - non-matching pattern",
|
||||
sensorName: "gpu_temp",
|
||||
config: &SensorConfig{
|
||||
sensors: map[string]struct{}{"core_*_temp": {}},
|
||||
isBlacklist: false,
|
||||
hasWildcards: true,
|
||||
},
|
||||
expectedValid: false,
|
||||
},
|
||||
{
|
||||
name: "Blacklist with wildcard - matching pattern",
|
||||
sensorName: "core_0_temp",
|
||||
config: &SensorConfig{
|
||||
sensors: map[string]struct{}{"core_*_temp": {}},
|
||||
isBlacklist: true,
|
||||
hasWildcards: true,
|
||||
},
|
||||
expectedValid: false,
|
||||
},
|
||||
{
|
||||
name: "Blacklist with wildcard - non-matching pattern",
|
||||
sensorName: "gpu_temp",
|
||||
config: &SensorConfig{
|
||||
sensors: map[string]struct{}{"core_*_temp": {}},
|
||||
isBlacklist: true,
|
||||
hasWildcards: true,
|
||||
},
|
||||
expectedValid: true,
|
||||
},
|
||||
{
|
||||
name: "No sensors configured",
|
||||
sensorName: "any_temp",
|
||||
config: &SensorConfig{
|
||||
sensors: map[string]struct{}{},
|
||||
isBlacklist: false,
|
||||
hasWildcards: false,
|
||||
skipCollection: false,
|
||||
},
|
||||
expectedValid: true,
|
||||
},
|
||||
{
|
||||
name: "Mixed patterns in whitelist - exact match",
|
||||
sensorName: "cpu_temp",
|
||||
config: &SensorConfig{
|
||||
sensors: map[string]struct{}{"cpu_temp": {}, "core_*_temp": {}},
|
||||
isBlacklist: false,
|
||||
hasWildcards: true,
|
||||
},
|
||||
expectedValid: true,
|
||||
},
|
||||
{
|
||||
name: "Mixed patterns in whitelist - wildcard match",
|
||||
sensorName: "core_1_temp",
|
||||
config: &SensorConfig{
|
||||
sensors: map[string]struct{}{"cpu_temp": {}, "core_*_temp": {}},
|
||||
isBlacklist: false,
|
||||
hasWildcards: true,
|
||||
},
|
||||
expectedValid: true,
|
||||
},
|
||||
{
|
||||
name: "Mixed patterns in blacklist - exact match",
|
||||
sensorName: "cpu_temp",
|
||||
config: &SensorConfig{
|
||||
sensors: map[string]struct{}{"cpu_temp": {}, "core_*_temp": {}},
|
||||
isBlacklist: true,
|
||||
hasWildcards: true,
|
||||
},
|
||||
expectedValid: false,
|
||||
},
|
||||
{
|
||||
name: "Mixed patterns in blacklist - wildcard match",
|
||||
sensorName: "core_1_temp",
|
||||
config: &SensorConfig{
|
||||
sensors: map[string]struct{}{"cpu_temp": {}, "core_*_temp": {}},
|
||||
isBlacklist: true,
|
||||
hasWildcards: true,
|
||||
},
|
||||
expectedValid: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := isValidSensor(tt.sensorName, tt.config)
|
||||
assert.Equal(t, tt.expectedValid, result, "isValidSensor(%q, config) returned unexpected result", tt.sensorName)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
agent := &Agent{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
primarySensor string
|
||||
sysSensors string
|
||||
sensors string
|
||||
skipCollection bool
|
||||
expectedConfig *SensorConfig
|
||||
}{
|
||||
{
|
||||
name: "Empty configuration",
|
||||
primarySensor: "",
|
||||
sysSensors: "",
|
||||
sensors: "",
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "",
|
||||
sensors: map[string]struct{}{},
|
||||
isBlacklist: false,
|
||||
hasWildcards: false,
|
||||
skipCollection: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Explicitly set to empty string",
|
||||
primarySensor: "",
|
||||
sysSensors: "",
|
||||
sensors: "",
|
||||
skipCollection: true,
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "",
|
||||
sensors: map[string]struct{}{},
|
||||
isBlacklist: false,
|
||||
hasWildcards: false,
|
||||
skipCollection: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Primary sensor only - should create sensor map",
|
||||
primarySensor: "cpu_temp",
|
||||
sysSensors: "",
|
||||
sensors: "",
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "cpu_temp",
|
||||
sensors: map[string]struct{}{},
|
||||
isBlacklist: false,
|
||||
hasWildcards: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Whitelist sensors",
|
||||
primarySensor: "cpu_temp",
|
||||
sysSensors: "",
|
||||
sensors: "cpu_temp,gpu_temp",
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "cpu_temp",
|
||||
sensors: map[string]struct{}{
|
||||
"cpu_temp": {},
|
||||
"gpu_temp": {},
|
||||
},
|
||||
isBlacklist: false,
|
||||
hasWildcards: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Blacklist sensors",
|
||||
primarySensor: "cpu_temp",
|
||||
sysSensors: "",
|
||||
sensors: "-cpu_temp,gpu_temp",
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "cpu_temp",
|
||||
sensors: map[string]struct{}{
|
||||
"cpu_temp": {},
|
||||
"gpu_temp": {},
|
||||
},
|
||||
isBlacklist: true,
|
||||
hasWildcards: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Sensors with wildcard",
|
||||
primarySensor: "cpu_temp",
|
||||
sysSensors: "",
|
||||
sensors: "cpu_*,gpu_temp",
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "cpu_temp",
|
||||
sensors: map[string]struct{}{
|
||||
"cpu_*": {},
|
||||
"gpu_temp": {},
|
||||
},
|
||||
isBlacklist: false,
|
||||
hasWildcards: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Sensors with whitespace",
|
||||
primarySensor: "cpu_temp",
|
||||
sysSensors: "",
|
||||
sensors: "cpu_*, gpu_temp",
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "cpu_temp",
|
||||
sensors: map[string]struct{}{
|
||||
"cpu_*": {},
|
||||
"gpu_temp": {},
|
||||
},
|
||||
isBlacklist: false,
|
||||
hasWildcards: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "With SYS_SENSORS path",
|
||||
primarySensor: "cpu_temp",
|
||||
sysSensors: "/custom/path",
|
||||
sensors: "cpu_temp",
|
||||
expectedConfig: &SensorConfig{
|
||||
primarySensor: "cpu_temp",
|
||||
sensors: map[string]struct{}{
|
||||
"cpu_temp": {},
|
||||
},
|
||||
isBlacklist: false,
|
||||
hasWildcards: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := agent.newSensorConfigWithEnv(tt.primarySensor, tt.sysSensors, tt.sensors, tt.skipCollection)
|
||||
|
||||
// Check primary sensor
|
||||
assert.Equal(t, tt.expectedConfig.primarySensor, result.primarySensor)
|
||||
|
||||
// Check sensor map
|
||||
if tt.expectedConfig.sensors == nil {
|
||||
assert.Nil(t, result.sensors)
|
||||
} else {
|
||||
assert.Equal(t, len(tt.expectedConfig.sensors), len(result.sensors))
|
||||
for sensor := range tt.expectedConfig.sensors {
|
||||
_, exists := result.sensors[sensor]
|
||||
assert.True(t, exists, "Sensor %s should exist in the result", sensor)
|
||||
}
|
||||
}
|
||||
|
||||
// Check flags
|
||||
assert.Equal(t, tt.expectedConfig.isBlacklist, result.isBlacklist)
|
||||
assert.Equal(t, tt.expectedConfig.hasWildcards, result.hasWildcards)
|
||||
|
||||
// Check context
|
||||
if tt.sysSensors != "" {
|
||||
// Verify context contains correct values
|
||||
envMap, ok := result.context.Value(common.EnvKey).(common.EnvMap)
|
||||
require.True(t, ok, "Context should contain EnvMap")
|
||||
sysPath, ok := envMap[common.HostSysEnvKey]
|
||||
require.True(t, ok, "EnvMap should contain HostSysEnvKey")
|
||||
assert.Equal(t, tt.sysSensors, sysPath)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewSensorConfig(t *testing.T) {
|
||||
// Save original environment variables
|
||||
originalPrimary, hasPrimary := os.LookupEnv("BESZEL_AGENT_PRIMARY_SENSOR")
|
||||
originalSys, hasSys := os.LookupEnv("BESZEL_AGENT_SYS_SENSORS")
|
||||
originalSensors, hasSensors := os.LookupEnv("BESZEL_AGENT_SENSORS")
|
||||
|
||||
// Restore environment variables after the test
|
||||
defer func() {
|
||||
// Clean up test environment variables
|
||||
os.Unsetenv("BESZEL_AGENT_PRIMARY_SENSOR")
|
||||
os.Unsetenv("BESZEL_AGENT_SYS_SENSORS")
|
||||
os.Unsetenv("BESZEL_AGENT_SENSORS")
|
||||
|
||||
// Restore original values if they existed
|
||||
if hasPrimary {
|
||||
os.Setenv("BESZEL_AGENT_PRIMARY_SENSOR", originalPrimary)
|
||||
}
|
||||
if hasSys {
|
||||
os.Setenv("BESZEL_AGENT_SYS_SENSORS", originalSys)
|
||||
}
|
||||
if hasSensors {
|
||||
os.Setenv("BESZEL_AGENT_SENSORS", originalSensors)
|
||||
}
|
||||
}()
|
||||
|
||||
// Set test environment variables
|
||||
os.Setenv("BESZEL_AGENT_PRIMARY_SENSOR", "test_primary")
|
||||
os.Setenv("BESZEL_AGENT_SYS_SENSORS", "/test/path")
|
||||
os.Setenv("BESZEL_AGENT_SENSORS", "test_sensor1,test_*,test_sensor3")
|
||||
|
||||
agent := &Agent{}
|
||||
result := agent.newSensorConfig()
|
||||
|
||||
// Verify results
|
||||
assert.Equal(t, "test_primary", result.primarySensor)
|
||||
assert.NotNil(t, result.sensors)
|
||||
assert.Equal(t, 3, len(result.sensors))
|
||||
assert.True(t, result.hasWildcards)
|
||||
assert.False(t, result.isBlacklist)
|
||||
|
||||
// Check that sys sensors path is in context
|
||||
envMap, ok := result.context.Value(common.EnvKey).(common.EnvMap)
|
||||
require.True(t, ok, "Context should contain EnvMap")
|
||||
sysPath, ok := envMap[common.HostSysEnvKey]
|
||||
require.True(t, ok, "EnvMap should contain HostSysEnvKey")
|
||||
assert.Equal(t, "/test/path", sysPath)
|
||||
}
|
||||
|
||||
func TestScaleTemperature(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input float64
|
||||
expected float64
|
||||
desc string
|
||||
}{
|
||||
// Normal temperatures (no scaling needed)
|
||||
{"normal_cpu_temp", 45.0, 45.0, "Normal CPU temperature"},
|
||||
{"normal_room_temp", 25.0, 25.0, "Normal room temperature"},
|
||||
{"high_cpu_temp", 85.0, 85.0, "High CPU temperature"},
|
||||
// Zero temperature
|
||||
{"zero_temp", 0.0, 0.0, "Zero temperature"},
|
||||
// Fractional values that should use 100x scaling
|
||||
{"fractional_45c", 0.45, 45.0, "0.45 should become 45°C (100x)"},
|
||||
{"fractional_25c", 0.25, 25.0, "0.25 should become 25°C (100x)"},
|
||||
{"fractional_60c", 0.60, 60.0, "0.60 should become 60°C (100x)"},
|
||||
{"fractional_75c", 0.75, 75.0, "0.75 should become 75°C (100x)"},
|
||||
{"fractional_30c", 0.30, 30.0, "0.30 should become 30°C (100x)"},
|
||||
// Fractional values that should use 1000x scaling
|
||||
{"millifractional_45c", 0.045, 45.0, "0.045 should become 45°C (1000x)"},
|
||||
{"millifractional_25c", 0.025, 25.0, "0.025 should become 25°C (1000x)"},
|
||||
{"millifractional_60c", 0.060, 60.0, "0.060 should become 60°C (1000x)"},
|
||||
{"millifractional_75c", 0.075, 75.0, "0.075 should become 75°C (1000x)"},
|
||||
{"millifractional_35c", 0.035, 35.0, "0.035 should become 35°C (1000x)"},
|
||||
// Edge cases - values outside reasonable range
|
||||
{"very_low_fractional", 0.01, 1.0, "0.01 should default to 100x scaling (1°C)"},
|
||||
{"very_high_fractional", 0.99, 99.0, "0.99 should default to 100x scaling (99°C)"},
|
||||
{"extremely_low", 0.001, 0.1, "0.001 should default to 100x scaling (0.1°C)"},
|
||||
// Boundary cases around the reasonable range (15-95°C)
|
||||
{"boundary_low_100x", 0.15, 15.0, "0.15 should use 100x scaling (15°C)"},
|
||||
{"boundary_high_100x", 0.95, 95.0, "0.95 should use 100x scaling (95°C)"},
|
||||
{"boundary_low_1000x", 0.015, 15.0, "0.015 should use 1000x scaling (15°C)"},
|
||||
{"boundary_high_1000x", 0.095, 95.0, "0.095 should use 1000x scaling (95°C)"},
|
||||
// Values just outside reasonable range
|
||||
{"just_below_range_100x", 0.14, 14.0, "0.14 should default to 100x (14°C)"},
|
||||
{"just_above_range_100x", 0.96, 96.0, "0.96 should default to 100x (96°C)"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := scaleTemperature(tt.input)
|
||||
assert.InDelta(t, tt.expected, result, 0.001,
|
||||
"scaleTemperature(%v) = %v, expected %v (%s)",
|
||||
tt.input, result, tt.expected, tt.desc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestScaleTemperatureLogic(t *testing.T) {
|
||||
// Test the logic flow for ambiguous cases
|
||||
t.Run("prefers_100x_when_both_valid", func(t *testing.T) {
|
||||
// 0.5 could be 50°C (100x) or 500°C (1000x)
|
||||
// Should prefer 100x since it's tried first and is in range
|
||||
result := scaleTemperature(0.5)
|
||||
expected := 50.0
|
||||
assert.InDelta(t, expected, result, 0.001,
|
||||
"scaleTemperature(0.5) = %v, expected %v (should prefer 100x scaling)",
|
||||
result, expected)
|
||||
})
|
||||
|
||||
t.Run("uses_1000x_when_100x_too_low", func(t *testing.T) {
|
||||
// 0.05 -> 5°C (100x, too low) or 50°C (1000x, in range)
|
||||
// Should use 1000x since 100x is below reasonable range
|
||||
result := scaleTemperature(0.05)
|
||||
expected := 50.0
|
||||
assert.InDelta(t, expected, result, 0.001,
|
||||
"scaleTemperature(0.05) = %v, expected %v (should use 1000x scaling)",
|
||||
result, expected)
|
||||
})
|
||||
|
||||
t.Run("defaults_to_100x_when_both_invalid", func(t *testing.T) {
|
||||
// 0.005 -> 0.5°C (100x, too low) or 5°C (1000x, too low)
|
||||
// Should default to 100x scaling
|
||||
result := scaleTemperature(0.005)
|
||||
expected := 0.5
|
||||
assert.InDelta(t, expected, result, 0.001,
|
||||
"scaleTemperature(0.005) = %v, expected %v (should default to 100x)",
|
||||
result, expected)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetTempsWithPanicRecovery(t *testing.T) {
|
||||
agent := &Agent{
|
||||
systemInfo: system.Info{},
|
||||
sensorConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
getTempsFn getTempsFn
|
||||
expectError bool
|
||||
errorMsg string
|
||||
}{
|
||||
{
|
||||
name: "successful_function_call",
|
||||
getTempsFn: func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||
return []sensors.TemperatureStat{
|
||||
{SensorKey: "test_sensor", Temperature: 45.0},
|
||||
}, nil
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "function_returns_error",
|
||||
getTempsFn: func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||
return []sensors.TemperatureStat{
|
||||
{SensorKey: "test_sensor", Temperature: 45.0},
|
||||
}, fmt.Errorf("sensor error")
|
||||
},
|
||||
expectError: false, // getTempsWithPanicRecovery ignores errors from the function
|
||||
},
|
||||
{
|
||||
name: "function_panics_with_string",
|
||||
getTempsFn: func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||
panic("test panic")
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "panic: test panic",
|
||||
},
|
||||
{
|
||||
name: "function_panics_with_error",
|
||||
getTempsFn: func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||
panic(fmt.Errorf("panic error"))
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "panic:",
|
||||
},
|
||||
{
|
||||
name: "function_panics_with_index_out_of_bounds",
|
||||
getTempsFn: func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||
slice := []int{1, 2, 3}
|
||||
_ = slice[10] // out of bounds panic
|
||||
return nil, nil
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "panic:",
|
||||
},
|
||||
{
|
||||
name: "function_panics_with_any_conversion",
|
||||
getTempsFn: func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||
var i any = "string"
|
||||
_ = i.(int) // type assertion panic
|
||||
return nil, nil
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "panic:",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var temps []sensors.TemperatureStat
|
||||
var err error
|
||||
|
||||
// The function should not panic, regardless of what the injected function does
|
||||
assert.NotPanics(t, func() {
|
||||
temps, err = agent.getTempsWithPanicRecovery(tt.getTempsFn)
|
||||
}, "getTempsWithPanicRecovery should not panic")
|
||||
|
||||
if tt.expectError {
|
||||
assert.Error(t, err, "Expected an error to be returned")
|
||||
if tt.errorMsg != "" {
|
||||
assert.Contains(t, err.Error(), tt.errorMsg,
|
||||
"Error message should contain expected text")
|
||||
}
|
||||
assert.Nil(t, temps, "Temps should be nil when panic occurs")
|
||||
} else {
|
||||
assert.NoError(t, err, "Should not return error for successful calls")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
286
agent/sensors_windows.go
Normal file
286
agent/sensors_windows.go
Normal file
@@ -0,0 +1,286 @@
|
||||
//go:build windows
|
||||
|
||||
//go:generate dotnet build -c Release lhm/beszel_lhm.csproj
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/shirou/gopsutil/v4/sensors"
|
||||
)
|
||||
|
||||
// Note: This is always called from Agent.gatherStats() which holds Agent.Lock(),
|
||||
// so no internal concurrency protection is needed.
|
||||
|
||||
// lhmProcess is a wrapper around the LHM .NET process.
|
||||
type lhmProcess struct {
|
||||
cmd *exec.Cmd
|
||||
stdin io.WriteCloser
|
||||
stdout io.ReadCloser
|
||||
scanner *bufio.Scanner
|
||||
isRunning bool
|
||||
stoppedNoSensors bool
|
||||
consecutiveNoSensors uint8
|
||||
execPath string
|
||||
tempDir string
|
||||
}
|
||||
|
||||
//go:embed all:lhm/bin/Release/net48
|
||||
var lhmFs embed.FS
|
||||
|
||||
var (
|
||||
beszelLhm *lhmProcess
|
||||
beszelLhmOnce sync.Once
|
||||
useLHM = os.Getenv("LHM") == "true"
|
||||
)
|
||||
|
||||
var errNoSensors = errors.New("no sensors found (try running as admin with LHM=true)")
|
||||
|
||||
// newlhmProcess copies the embedded LHM executable to a temporary directory and starts it.
|
||||
func newlhmProcess() (*lhmProcess, error) {
|
||||
destDir := filepath.Join(os.TempDir(), "beszel")
|
||||
execPath := filepath.Join(destDir, "beszel_lhm.exe")
|
||||
|
||||
if err := os.MkdirAll(destDir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp directory: %w", err)
|
||||
}
|
||||
|
||||
// Only copy if executable doesn't exist
|
||||
if _, err := os.Stat(execPath); os.IsNotExist(err) {
|
||||
if err := copyEmbeddedDir(lhmFs, "lhm/bin/Release/net48", destDir); err != nil {
|
||||
return nil, fmt.Errorf("failed to copy embedded directory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
lhm := &lhmProcess{
|
||||
execPath: execPath,
|
||||
tempDir: destDir,
|
||||
}
|
||||
|
||||
if err := lhm.startProcess(); err != nil {
|
||||
return nil, fmt.Errorf("failed to start process: %w", err)
|
||||
}
|
||||
|
||||
return lhm, nil
|
||||
}
|
||||
|
||||
// startProcess starts the external LHM process
|
||||
func (lhm *lhmProcess) startProcess() error {
|
||||
// Clean up any existing process
|
||||
lhm.cleanupProcess()
|
||||
|
||||
cmd := exec.Command(lhm.execPath)
|
||||
stdin, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
stdin.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
stdin.Close()
|
||||
stdout.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
// Update process state
|
||||
lhm.cmd = cmd
|
||||
lhm.stdin = stdin
|
||||
lhm.stdout = stdout
|
||||
lhm.scanner = bufio.NewScanner(stdout)
|
||||
lhm.isRunning = true
|
||||
|
||||
// Give process a moment to initialize
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupProcess terminates the process and closes resources but preserves files
|
||||
func (lhm *lhmProcess) cleanupProcess() {
|
||||
lhm.isRunning = false
|
||||
|
||||
if lhm.cmd != nil && lhm.cmd.Process != nil {
|
||||
lhm.cmd.Process.Kill()
|
||||
lhm.cmd.Wait()
|
||||
}
|
||||
|
||||
if lhm.stdin != nil {
|
||||
lhm.stdin.Close()
|
||||
lhm.stdin = nil
|
||||
}
|
||||
if lhm.stdout != nil {
|
||||
lhm.stdout.Close()
|
||||
lhm.stdout = nil
|
||||
}
|
||||
|
||||
lhm.cmd = nil
|
||||
lhm.scanner = nil
|
||||
lhm.stoppedNoSensors = false
|
||||
lhm.consecutiveNoSensors = 0
|
||||
}
|
||||
|
||||
func (lhm *lhmProcess) getTemps(ctx context.Context) (temps []sensors.TemperatureStat, err error) {
|
||||
if !useLHM || lhm.stoppedNoSensors {
|
||||
// Fall back to gopsutil if we can't get sensors from LHM
|
||||
return sensors.TemperaturesWithContext(ctx)
|
||||
}
|
||||
|
||||
// Start process if it's not running
|
||||
if !lhm.isRunning || lhm.stdin == nil || lhm.scanner == nil {
|
||||
err := lhm.startProcess()
|
||||
if err != nil {
|
||||
return temps, err
|
||||
}
|
||||
}
|
||||
|
||||
// Send command to process
|
||||
_, err = fmt.Fprintln(lhm.stdin, "getTemps")
|
||||
if err != nil {
|
||||
lhm.isRunning = false
|
||||
return temps, fmt.Errorf("failed to send command: %w", err)
|
||||
}
|
||||
|
||||
// Read all sensor lines until we hit an empty line or EOF
|
||||
for lhm.scanner.Scan() {
|
||||
line := strings.TrimSpace(lhm.scanner.Text())
|
||||
if line == "" {
|
||||
break
|
||||
}
|
||||
|
||||
parts := strings.Split(line, "|")
|
||||
if len(parts) != 2 {
|
||||
slog.Debug("Invalid sensor format", "line", line)
|
||||
continue
|
||||
}
|
||||
|
||||
name := strings.TrimSpace(parts[0])
|
||||
valueStr := strings.TrimSpace(parts[1])
|
||||
|
||||
value, err := strconv.ParseFloat(valueStr, 64)
|
||||
if err != nil {
|
||||
slog.Debug("Failed to parse sensor", "err", err, "line", line)
|
||||
continue
|
||||
}
|
||||
|
||||
if name == "" || value <= 0 || value > 150 {
|
||||
slog.Debug("Invalid sensor", "name", name, "val", value, "line", line)
|
||||
continue
|
||||
}
|
||||
|
||||
temps = append(temps, sensors.TemperatureStat{
|
||||
SensorKey: name,
|
||||
Temperature: value,
|
||||
})
|
||||
}
|
||||
|
||||
if err := lhm.scanner.Err(); err != nil {
|
||||
lhm.isRunning = false
|
||||
return temps, err
|
||||
}
|
||||
|
||||
// Handle no sensors case
|
||||
if len(temps) == 0 {
|
||||
lhm.consecutiveNoSensors++
|
||||
if lhm.consecutiveNoSensors >= 3 {
|
||||
lhm.stoppedNoSensors = true
|
||||
slog.Warn(errNoSensors.Error())
|
||||
lhm.cleanup()
|
||||
}
|
||||
return sensors.TemperaturesWithContext(ctx)
|
||||
}
|
||||
|
||||
lhm.consecutiveNoSensors = 0
|
||||
|
||||
return temps, nil
|
||||
}
|
||||
|
||||
// getSensorTemps attempts to pull sensor temperatures from the embedded LHM process.
|
||||
// NB: LibreHardwareMonitorLib requires admin privileges to access all available sensors.
|
||||
func getSensorTemps(ctx context.Context) (temps []sensors.TemperatureStat, err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
slog.Debug("Error reading sensors", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if !useLHM {
|
||||
return sensors.TemperaturesWithContext(ctx)
|
||||
}
|
||||
|
||||
// Initialize process once
|
||||
beszelLhmOnce.Do(func() {
|
||||
beszelLhm, err = newlhmProcess()
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return temps, fmt.Errorf("failed to initialize lhm: %w", err)
|
||||
}
|
||||
|
||||
if beszelLhm == nil {
|
||||
return temps, fmt.Errorf("lhm not available")
|
||||
}
|
||||
|
||||
return beszelLhm.getTemps(ctx)
|
||||
}
|
||||
|
||||
// cleanup terminates the process and closes resources
|
||||
func (lhm *lhmProcess) cleanup() {
|
||||
lhm.cleanupProcess()
|
||||
if lhm.tempDir != "" {
|
||||
os.RemoveAll(lhm.tempDir)
|
||||
}
|
||||
}
|
||||
|
||||
// copyEmbeddedDir copies the embedded directory to the destination path
|
||||
func copyEmbeddedDir(fs embed.FS, srcPath, destPath string) error {
|
||||
entries, err := fs.ReadDir(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(destPath, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
srcEntryPath := path.Join(srcPath, entry.Name())
|
||||
destEntryPath := filepath.Join(destPath, entry.Name())
|
||||
|
||||
if entry.IsDir() {
|
||||
if err := copyEmbeddedDir(fs, srcEntryPath, destEntryPath); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
data, err := fs.ReadFile(srcEntryPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.WriteFile(destEntryPath, data, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
292
agent/server.go
Normal file
292
agent/server.go
Normal file
@@ -0,0 +1,292 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel"
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/entities/smart"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/gliderlabs/ssh"
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
// ServerOptions contains configuration options for starting the SSH server.
|
||||
type ServerOptions struct {
|
||||
Addr string // Network address to listen on (e.g., ":45876" or "/path/to/socket")
|
||||
Network string // Network type ("tcp" or "unix")
|
||||
Keys []gossh.PublicKey // SSH public keys for authentication
|
||||
}
|
||||
|
||||
// hubVersions caches hub versions by session ID to avoid repeated parsing.
|
||||
var hubVersions map[string]semver.Version
|
||||
|
||||
// StartServer starts the SSH server with the provided options.
|
||||
// It configures the server with secure defaults, sets up authentication,
|
||||
// and begins listening for connections. Returns an error if the server
|
||||
// is already running or if there's an issue starting the server.
|
||||
func (a *Agent) StartServer(opts ServerOptions) error {
|
||||
if a.server != nil {
|
||||
return errors.New("server already started")
|
||||
}
|
||||
|
||||
slog.Info("Starting SSH server", "addr", opts.Addr, "network", opts.Network)
|
||||
|
||||
if opts.Network == "unix" {
|
||||
// remove existing socket file if it exists
|
||||
if err := os.Remove(opts.Addr); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// start listening on the address
|
||||
ln, err := net.Listen(opts.Network, opts.Addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ln.Close()
|
||||
|
||||
// base config (limit to allowed algorithms)
|
||||
config := &gossh.ServerConfig{
|
||||
ServerVersion: fmt.Sprintf("SSH-2.0-%s_%s", beszel.AppName, beszel.Version),
|
||||
}
|
||||
config.KeyExchanges = common.DefaultKeyExchanges
|
||||
config.MACs = common.DefaultMACs
|
||||
config.Ciphers = common.DefaultCiphers
|
||||
|
||||
// set default handler
|
||||
ssh.Handle(a.handleSession)
|
||||
|
||||
a.server = &ssh.Server{
|
||||
ServerConfigCallback: func(ctx ssh.Context) *gossh.ServerConfig {
|
||||
return config
|
||||
},
|
||||
// check public key(s)
|
||||
PublicKeyHandler: func(ctx ssh.Context, key ssh.PublicKey) bool {
|
||||
remoteAddr := ctx.RemoteAddr()
|
||||
for _, pubKey := range opts.Keys {
|
||||
if ssh.KeysEqual(key, pubKey) {
|
||||
slog.Info("SSH connected", "addr", remoteAddr)
|
||||
return true
|
||||
}
|
||||
}
|
||||
slog.Warn("Invalid SSH key", "addr", remoteAddr)
|
||||
return false
|
||||
},
|
||||
// disable pty
|
||||
PtyCallback: func(ctx ssh.Context, pty ssh.Pty) bool {
|
||||
return false
|
||||
},
|
||||
// close idle connections after 70 seconds
|
||||
IdleTimeout: 70 * time.Second,
|
||||
}
|
||||
|
||||
// Start SSH server on the listener
|
||||
return a.server.Serve(ln)
|
||||
}
|
||||
|
||||
// getHubVersion retrieves and caches the hub version for a given session.
|
||||
// It extracts the version from the SSH client version string and caches
|
||||
// it to avoid repeated parsing. Returns a zero version if parsing fails.
|
||||
func (a *Agent) getHubVersion(sessionId string, sessionCtx ssh.Context) semver.Version {
|
||||
if hubVersions == nil {
|
||||
hubVersions = make(map[string]semver.Version, 1)
|
||||
}
|
||||
hubVersion, ok := hubVersions[sessionId]
|
||||
if ok {
|
||||
return hubVersion
|
||||
}
|
||||
// Extract hub version from SSH client version
|
||||
clientVersion := sessionCtx.Value(ssh.ContextKeyClientVersion)
|
||||
if versionStr, ok := clientVersion.(string); ok {
|
||||
hubVersion, _ = extractHubVersion(versionStr)
|
||||
}
|
||||
hubVersions[sessionId] = hubVersion
|
||||
return hubVersion
|
||||
}
|
||||
|
||||
// handleSession handles an incoming SSH session by gathering system statistics
|
||||
// and sending them to the hub. It signals connection events, determines the
|
||||
// appropriate encoding format based on hub version, and exits with appropriate
|
||||
// status codes.
|
||||
func (a *Agent) handleSession(s ssh.Session) {
|
||||
a.connectionManager.eventChan <- SSHConnect
|
||||
|
||||
sessionCtx := s.Context()
|
||||
sessionID := sessionCtx.SessionID()
|
||||
|
||||
hubVersion := a.getHubVersion(sessionID, sessionCtx)
|
||||
|
||||
// Legacy one-shot behavior for older hubs
|
||||
if hubVersion.LT(beszel.MinVersionAgentResponse) {
|
||||
if err := a.handleLegacyStats(s, hubVersion); err != nil {
|
||||
slog.Error("Error encoding stats", "err", err)
|
||||
s.Exit(1)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var req common.HubRequest[cbor.RawMessage]
|
||||
if err := cbor.NewDecoder(s).Decode(&req); err != nil {
|
||||
// Fallback to legacy one-shot if the first decode fails
|
||||
if err2 := a.handleLegacyStats(s, hubVersion); err2 != nil {
|
||||
slog.Error("Error encoding stats (fallback)", "err", err2)
|
||||
s.Exit(1)
|
||||
return
|
||||
}
|
||||
s.Exit(0)
|
||||
return
|
||||
}
|
||||
if err := a.handleSSHRequest(s, &req); err != nil {
|
||||
slog.Error("SSH request handling failed", "err", err)
|
||||
s.Exit(1)
|
||||
return
|
||||
}
|
||||
s.Exit(0)
|
||||
}
|
||||
|
||||
// handleSSHRequest builds a handler context and dispatches to the shared registry
|
||||
func (a *Agent) handleSSHRequest(w io.Writer, req *common.HubRequest[cbor.RawMessage]) error {
|
||||
// SSH does not support fingerprint auth action
|
||||
if req.Action == common.CheckFingerprint {
|
||||
return cbor.NewEncoder(w).Encode(common.AgentResponse{Error: "unsupported action"})
|
||||
}
|
||||
|
||||
// responder that writes AgentResponse to stdout
|
||||
sshResponder := func(data any, requestID *uint32) error {
|
||||
response := common.AgentResponse{Id: requestID}
|
||||
switch v := data.(type) {
|
||||
case *system.CombinedData:
|
||||
response.SystemData = v
|
||||
case string:
|
||||
response.String = &v
|
||||
case map[string]smart.SmartData:
|
||||
response.SmartData = v
|
||||
case systemd.ServiceDetails:
|
||||
response.ServiceInfo = v
|
||||
default:
|
||||
response.Error = fmt.Sprintf("unsupported response type: %T", data)
|
||||
}
|
||||
return cbor.NewEncoder(w).Encode(response)
|
||||
}
|
||||
|
||||
ctx := &HandlerContext{
|
||||
Client: nil,
|
||||
Agent: a,
|
||||
Request: req,
|
||||
RequestID: nil,
|
||||
HubVerified: true,
|
||||
SendResponse: sshResponder,
|
||||
}
|
||||
|
||||
if handler, ok := a.handlerRegistry.GetHandler(req.Action); ok {
|
||||
if err := handler.Handle(ctx); err != nil {
|
||||
return cbor.NewEncoder(w).Encode(common.AgentResponse{Error: err.Error()})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return cbor.NewEncoder(w).Encode(common.AgentResponse{Error: fmt.Sprintf("unknown action: %d", req.Action)})
|
||||
}
|
||||
|
||||
// handleLegacyStats serves the legacy one-shot stats payload for older hubs
|
||||
func (a *Agent) handleLegacyStats(w io.Writer, hubVersion semver.Version) error {
|
||||
stats := a.gatherStats(60_000)
|
||||
return a.writeToSession(w, stats, hubVersion)
|
||||
}
|
||||
|
||||
// writeToSession encodes and writes system statistics to the session.
|
||||
// It chooses between CBOR and JSON encoding based on the hub version,
|
||||
// using CBOR for newer versions and JSON for legacy compatibility.
|
||||
func (a *Agent) writeToSession(w io.Writer, stats *system.CombinedData, hubVersion semver.Version) error {
|
||||
if hubVersion.GTE(beszel.MinVersionCbor) {
|
||||
return cbor.NewEncoder(w).Encode(stats)
|
||||
}
|
||||
return json.NewEncoder(w).Encode(stats)
|
||||
}
|
||||
|
||||
// extractHubVersion extracts the beszel version from SSH client version string.
|
||||
// Expected format: "SSH-2.0-beszel_X.Y.Z" or "beszel_X.Y.Z"
|
||||
func extractHubVersion(versionString string) (semver.Version, error) {
|
||||
_, after, _ := strings.Cut(versionString, "_")
|
||||
return semver.Parse(after)
|
||||
}
|
||||
|
||||
// ParseKeys parses a string containing SSH public keys in authorized_keys format.
|
||||
// It returns a slice of ssh.PublicKey and an error if any key fails to parse.
|
||||
func ParseKeys(input string) ([]gossh.PublicKey, error) {
|
||||
var parsedKeys []gossh.PublicKey
|
||||
for line := range strings.Lines(input) {
|
||||
line = strings.TrimSpace(line)
|
||||
// Skip empty lines or comments
|
||||
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
// Parse the key
|
||||
parsedKey, _, _, _, err := gossh.ParseAuthorizedKey([]byte(line))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse key: %s, error: %w", line, err)
|
||||
}
|
||||
parsedKeys = append(parsedKeys, parsedKey)
|
||||
}
|
||||
return parsedKeys, nil
|
||||
}
|
||||
|
||||
// GetAddress determines the network address to listen on from various sources.
|
||||
// It checks the provided address, then environment variables (LISTEN, PORT),
|
||||
// and finally defaults to ":45876".
|
||||
func GetAddress(addr string) string {
|
||||
if addr == "" {
|
||||
addr, _ = GetEnv("LISTEN")
|
||||
}
|
||||
if addr == "" {
|
||||
// Legacy PORT environment variable support
|
||||
addr, _ = GetEnv("PORT")
|
||||
}
|
||||
if addr == "" {
|
||||
return ":45876"
|
||||
}
|
||||
// prefix with : if only port was provided
|
||||
if GetNetwork(addr) != "unix" && !strings.Contains(addr, ":") {
|
||||
addr = ":" + addr
|
||||
}
|
||||
return addr
|
||||
}
|
||||
|
||||
// GetNetwork determines the network type based on the address format.
|
||||
// It checks the NETWORK environment variable first, then infers from
|
||||
// the address format: addresses starting with "/" are "unix", others are "tcp".
|
||||
func GetNetwork(addr string) string {
|
||||
if network, ok := GetEnv("NETWORK"); ok && network != "" {
|
||||
return network
|
||||
}
|
||||
if strings.HasPrefix(addr, "/") {
|
||||
return "unix"
|
||||
}
|
||||
return "tcp"
|
||||
}
|
||||
|
||||
// StopServer stops the SSH server if it's running.
|
||||
// It returns an error if the server is not running or if there's an error stopping it.
|
||||
func (a *Agent) StopServer() error {
|
||||
if a.server == nil {
|
||||
return errors.New("SSH server not running")
|
||||
}
|
||||
|
||||
slog.Info("Stopping SSH server")
|
||||
_ = a.server.Close()
|
||||
a.server = nil
|
||||
a.connectionManager.eventChan <- SSHDisconnect
|
||||
return nil
|
||||
}
|
||||
606
agent/server_test.go
Normal file
606
agent/server_test.go
Normal file
@@ -0,0 +1,606 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/container"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/gliderlabs/ssh"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
func TestStartServer(t *testing.T) {
|
||||
// Generate a test key pair
|
||||
pubKey, privKey, err := ed25519.GenerateKey(nil)
|
||||
require.NoError(t, err)
|
||||
signer, err := gossh.NewSignerFromKey(privKey)
|
||||
require.NoError(t, err)
|
||||
sshPubKey, err := gossh.NewPublicKey(pubKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate a different key pair for bad key test
|
||||
badPubKey, badPrivKey, err := ed25519.GenerateKey(nil)
|
||||
require.NoError(t, err)
|
||||
badSigner, err := gossh.NewSignerFromKey(badPrivKey)
|
||||
require.NoError(t, err)
|
||||
sshBadPubKey, err := gossh.NewPublicKey(badPubKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
socketFile := filepath.Join(t.TempDir(), "beszel-test.sock")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
config ServerOptions
|
||||
wantErr bool
|
||||
errContains string
|
||||
setup func() error
|
||||
cleanup func() error
|
||||
}{
|
||||
{
|
||||
name: "tcp port only",
|
||||
config: ServerOptions{
|
||||
Network: "tcp",
|
||||
Addr: ":45987",
|
||||
Keys: []gossh.PublicKey{sshPubKey},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "tcp with ipv4",
|
||||
config: ServerOptions{
|
||||
Network: "tcp4",
|
||||
Addr: "127.0.0.1:45988",
|
||||
Keys: []gossh.PublicKey{sshPubKey},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "tcp with ipv6",
|
||||
config: ServerOptions{
|
||||
Network: "tcp6",
|
||||
Addr: "[::1]:45989",
|
||||
Keys: []gossh.PublicKey{sshPubKey},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unix socket",
|
||||
config: ServerOptions{
|
||||
Network: "unix",
|
||||
Addr: socketFile,
|
||||
Keys: []gossh.PublicKey{sshPubKey},
|
||||
},
|
||||
setup: func() error {
|
||||
// Create a socket file that should be removed
|
||||
f, err := os.Create(socketFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.Close()
|
||||
},
|
||||
cleanup: func() error {
|
||||
return os.Remove(socketFile)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bad key should fail",
|
||||
config: ServerOptions{
|
||||
Network: "tcp",
|
||||
Addr: ":45987",
|
||||
Keys: []gossh.PublicKey{sshBadPubKey},
|
||||
},
|
||||
wantErr: true,
|
||||
errContains: "ssh: handshake failed",
|
||||
},
|
||||
{
|
||||
name: "good key still good",
|
||||
config: ServerOptions{
|
||||
Network: "tcp",
|
||||
Addr: ":45987",
|
||||
Keys: []gossh.PublicKey{sshPubKey},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.setup != nil {
|
||||
err := tt.setup()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
if tt.cleanup != nil {
|
||||
defer tt.cleanup()
|
||||
}
|
||||
|
||||
agent, err := NewAgent("")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Start server in a goroutine since it blocks
|
||||
errChan := make(chan error, 1)
|
||||
go func() {
|
||||
errChan <- agent.StartServer(tt.config)
|
||||
}()
|
||||
|
||||
// Add a short delay to allow the server to start
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Try to connect to verify server is running
|
||||
var client *gossh.Client
|
||||
|
||||
// Choose the appropriate signer based on the test case
|
||||
testSigner := signer
|
||||
if tt.name == "bad key should fail" {
|
||||
testSigner = badSigner
|
||||
}
|
||||
|
||||
sshClientConfig := &gossh.ClientConfig{
|
||||
User: "a",
|
||||
Auth: []gossh.AuthMethod{
|
||||
gossh.PublicKeys(testSigner),
|
||||
},
|
||||
HostKeyCallback: gossh.InsecureIgnoreHostKey(),
|
||||
Timeout: 4 * time.Second,
|
||||
}
|
||||
|
||||
switch tt.config.Network {
|
||||
case "unix":
|
||||
client, err = gossh.Dial("unix", tt.config.Addr, sshClientConfig)
|
||||
default:
|
||||
if !strings.Contains(tt.config.Addr, ":") {
|
||||
tt.config.Addr = ":" + tt.config.Addr
|
||||
}
|
||||
client, err = gossh.Dial("tcp", tt.config.Addr, sshClientConfig)
|
||||
}
|
||||
|
||||
if tt.wantErr {
|
||||
assert.Error(t, err)
|
||||
if tt.errContains != "" {
|
||||
assert.Contains(t, err.Error(), tt.errContains)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, client)
|
||||
client.Close()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
//////////////////// ParseKeys Tests ////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
||||
// Helper function to generate a temporary file with content
|
||||
func createTempFile(content string) (string, error) {
|
||||
tmpFile, err := os.CreateTemp("", "ssh_keys_*.txt")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create temp file: %w", err)
|
||||
}
|
||||
defer tmpFile.Close()
|
||||
|
||||
if _, err := tmpFile.WriteString(content); err != nil {
|
||||
return "", fmt.Errorf("failed to write to temp file: %w", err)
|
||||
}
|
||||
|
||||
return tmpFile.Name(), nil
|
||||
}
|
||||
|
||||
// Test case 1: String with a single SSH key
|
||||
func TestParseSingleKeyFromString(t *testing.T) {
|
||||
input := "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKCBM91kukN7hbvFKtbpEeo2JXjCcNxXcdBH7V7ADMBo"
|
||||
keys, err := ParseKeys(input)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got: %v", err)
|
||||
}
|
||||
if len(keys) != 1 {
|
||||
t.Fatalf("Expected 1 key, got %d keys", len(keys))
|
||||
}
|
||||
if keys[0].Type() != "ssh-ed25519" {
|
||||
t.Fatalf("Expected key type 'ssh-ed25519', got '%s'", keys[0].Type())
|
||||
}
|
||||
}
|
||||
|
||||
// Test case 2: String with multiple SSH keys
|
||||
func TestParseMultipleKeysFromString(t *testing.T) {
|
||||
input := "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKCBM91kukN7hbvFKtbpEeo2JXjCcNxXcdBH7V7ADMBo\nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJDMtAOQfxDlCxe+A5lVbUY/DHxK1LAF2Z3AV0FYv36D \n #comment\n ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJDMtAOQfxDlCxe+A5lVbUY/DHxK1LAF2Z3AV0FYv36D"
|
||||
keys, err := ParseKeys(input)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got: %v", err)
|
||||
}
|
||||
if len(keys) != 3 {
|
||||
t.Fatalf("Expected 3 keys, got %d keys", len(keys))
|
||||
}
|
||||
if keys[0].Type() != "ssh-ed25519" || keys[1].Type() != "ssh-ed25519" || keys[2].Type() != "ssh-ed25519" {
|
||||
t.Fatalf("Unexpected key types: %s, %s, %s", keys[0].Type(), keys[1].Type(), keys[2].Type())
|
||||
}
|
||||
}
|
||||
|
||||
// Test case 3: File with a single SSH key
|
||||
func TestParseSingleKeyFromFile(t *testing.T) {
|
||||
content := "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKCBM91kukN7hbvFKtbpEeo2JXjCcNxXcdBH7V7ADMBo"
|
||||
filePath, err := createTempFile(content)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp file: %v", err)
|
||||
}
|
||||
defer os.Remove(filePath) // Clean up the file after the test
|
||||
|
||||
// Read the file content
|
||||
fileContent, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read temp file: %v", err)
|
||||
}
|
||||
|
||||
// Parse the keys
|
||||
keys, err := ParseKeys(string(fileContent))
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got: %v", err)
|
||||
}
|
||||
if len(keys) != 1 {
|
||||
t.Fatalf("Expected 1 key, got %d keys", len(keys))
|
||||
}
|
||||
if keys[0].Type() != "ssh-ed25519" {
|
||||
t.Fatalf("Expected key type 'ssh-ed25519', got '%s'", keys[0].Type())
|
||||
}
|
||||
}
|
||||
|
||||
// Test case 4: File with multiple SSH keys
|
||||
func TestParseMultipleKeysFromFile(t *testing.T) {
|
||||
content := "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKCBM91kukN7hbvFKtbpEeo2JXjCcNxXcdBH7V7ADMBo\nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJDMtAOQfxDlCxe+A5lVbUY/DHxK1LAF2Z3AV0FYv36D \n #comment\n ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJDMtAOQfxDlCxe+A5lVbUY/DHxK1LAF2Z3AV0FYv36D"
|
||||
filePath, err := createTempFile(content)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp file: %v", err)
|
||||
}
|
||||
// defer os.Remove(filePath) // Clean up the file after the test
|
||||
|
||||
// Read the file content
|
||||
fileContent, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read temp file: %v", err)
|
||||
}
|
||||
|
||||
// Parse the keys
|
||||
keys, err := ParseKeys(string(fileContent))
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got: %v", err)
|
||||
}
|
||||
if len(keys) != 3 {
|
||||
t.Fatalf("Expected 3 keys, got %d keys", len(keys))
|
||||
}
|
||||
if keys[0].Type() != "ssh-ed25519" || keys[1].Type() != "ssh-ed25519" || keys[2].Type() != "ssh-ed25519" {
|
||||
t.Fatalf("Unexpected key types: %s, %s, %s", keys[0].Type(), keys[1].Type(), keys[2].Type())
|
||||
}
|
||||
}
|
||||
|
||||
// Test case 5: Invalid SSH key input
|
||||
func TestParseInvalidKey(t *testing.T) {
|
||||
input := "invalid-key-data"
|
||||
_, err := ParseKeys(input)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected an error for invalid key, got nil")
|
||||
}
|
||||
expectedErrMsg := "failed to parse key"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
t.Fatalf("Expected error message to contain '%s', got: %v", expectedErrMsg, err)
|
||||
}
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
//////////////////// Hub Version Tests //////////////////////////
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
||||
func TestExtractHubVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
clientVersion string
|
||||
expectedVersion string
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "valid beszel client version with underscore",
|
||||
clientVersion: "SSH-2.0-beszel_0.11.1",
|
||||
expectedVersion: "0.11.1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "valid beszel client version with beta",
|
||||
clientVersion: "SSH-2.0-beszel_1.0.0-beta",
|
||||
expectedVersion: "1.0.0-beta",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "valid beszel client version with rc",
|
||||
clientVersion: "SSH-2.0-beszel_0.12.0-rc1",
|
||||
expectedVersion: "0.12.0-rc1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "different SSH client",
|
||||
clientVersion: "SSH-2.0-OpenSSH_8.0",
|
||||
expectedVersion: "8.0",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "malformed version string without underscore",
|
||||
clientVersion: "SSH-2.0-beszel",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "empty version string",
|
||||
clientVersion: "",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "version string with underscore but no version",
|
||||
clientVersion: "beszel_",
|
||||
expectedVersion: "",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "version with patch and build metadata",
|
||||
clientVersion: "SSH-2.0-beszel_1.2.3+build.123",
|
||||
expectedVersion: "1.2.3+build.123",
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := extractHubVersion(tt.clientVersion)
|
||||
|
||||
if tt.expectError {
|
||||
assert.Error(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedVersion, result.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
/////////////// Hub Version Detection Tests ////////////////////
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
||||
func TestGetHubVersion(t *testing.T) {
|
||||
agent, err := NewAgent("")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Mock SSH context that implements the ssh.Context interface
|
||||
mockCtx := &mockSSHContext{
|
||||
sessionID: "test-session-123",
|
||||
clientVersion: "SSH-2.0-beszel_0.12.0",
|
||||
}
|
||||
|
||||
// Test first call - should extract and cache version
|
||||
version := agent.getHubVersion("test-session-123", mockCtx)
|
||||
assert.Equal(t, "0.12.0", version.String())
|
||||
|
||||
// Test second call - should return cached version
|
||||
mockCtx.clientVersion = "SSH-2.0-beszel_0.11.0" // Change version but should still return cached
|
||||
version = agent.getHubVersion("test-session-123", mockCtx)
|
||||
assert.Equal(t, "0.12.0", version.String()) // Should still be cached version
|
||||
|
||||
// Test different session - should extract new version
|
||||
version = agent.getHubVersion("different-session", mockCtx)
|
||||
assert.Equal(t, "0.11.0", version.String())
|
||||
|
||||
// Test with invalid version string (non-beszel client)
|
||||
mockCtx.clientVersion = "SSH-2.0-OpenSSH_8.0"
|
||||
version = agent.getHubVersion("invalid-session", mockCtx)
|
||||
assert.Equal(t, "0.0.0", version.String()) // Should be empty version for non-beszel clients
|
||||
|
||||
// Test with no client version
|
||||
mockCtx.clientVersion = ""
|
||||
version = agent.getHubVersion("no-version-session", mockCtx)
|
||||
assert.True(t, version.EQ(semver.Version{})) // Should be empty version
|
||||
}
|
||||
|
||||
// mockSSHContext implements ssh.Context for testing
|
||||
type mockSSHContext struct {
|
||||
context.Context
|
||||
sync.Mutex
|
||||
sessionID string
|
||||
clientVersion string
|
||||
}
|
||||
|
||||
func (m *mockSSHContext) SessionID() string {
|
||||
return m.sessionID
|
||||
}
|
||||
|
||||
func (m *mockSSHContext) ClientVersion() string {
|
||||
return m.clientVersion
|
||||
}
|
||||
|
||||
func (m *mockSSHContext) ServerVersion() string {
|
||||
return "SSH-2.0-beszel_test"
|
||||
}
|
||||
|
||||
func (m *mockSSHContext) Value(key interface{}) interface{} {
|
||||
if key == ssh.ContextKeyClientVersion {
|
||||
return m.clientVersion
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSSHContext) User() string { return "test-user" }
|
||||
func (m *mockSSHContext) RemoteAddr() net.Addr { return nil }
|
||||
func (m *mockSSHContext) LocalAddr() net.Addr { return nil }
|
||||
func (m *mockSSHContext) Permissions() *ssh.Permissions { return nil }
|
||||
func (m *mockSSHContext) SetValue(key, value interface{}) {}
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
/////////////// CBOR vs JSON Encoding Tests ////////////////////
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
||||
// TestWriteToSessionEncoding tests that writeToSession actually encodes data in the correct format
|
||||
func TestWriteToSessionEncoding(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
hubVersion string
|
||||
expectedUsesCbor bool
|
||||
}{
|
||||
{
|
||||
name: "old hub version should use JSON",
|
||||
hubVersion: "0.11.1",
|
||||
expectedUsesCbor: false,
|
||||
},
|
||||
{
|
||||
name: "non-beta release should use CBOR",
|
||||
hubVersion: "0.12.0",
|
||||
expectedUsesCbor: true,
|
||||
},
|
||||
{
|
||||
name: "even newer hub version should use CBOR",
|
||||
hubVersion: "0.16.4",
|
||||
expectedUsesCbor: true,
|
||||
},
|
||||
{
|
||||
name: "beta version below release threshold should use JSON",
|
||||
hubVersion: "0.12.0-beta0",
|
||||
expectedUsesCbor: false,
|
||||
},
|
||||
// {
|
||||
// name: "matching beta version should use CBOR",
|
||||
// hubVersion: "0.12.0-beta2",
|
||||
// expectedUsesCbor: true,
|
||||
// },
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Reset the global hubVersions map to ensure clean state for each test
|
||||
hubVersions = nil
|
||||
|
||||
agent, err := NewAgent("")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Parse the test version
|
||||
version, err := semver.Parse(tt.hubVersion)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create test data to encode
|
||||
testData := createTestCombinedData()
|
||||
|
||||
var buf strings.Builder
|
||||
err = agent.writeToSession(&buf, testData, version)
|
||||
require.NoError(t, err)
|
||||
|
||||
encodedData := buf.String()
|
||||
require.NotEmpty(t, encodedData)
|
||||
|
||||
// Verify the encoding format by attempting to decode
|
||||
if tt.expectedUsesCbor {
|
||||
var decodedCbor system.CombinedData
|
||||
err = cbor.Unmarshal([]byte(encodedData), &decodedCbor)
|
||||
assert.NoError(t, err, "Should be valid CBOR data")
|
||||
|
||||
var decodedJson system.CombinedData
|
||||
err = json.Unmarshal([]byte(encodedData), &decodedJson)
|
||||
assert.Error(t, err, "Should not be valid JSON data")
|
||||
|
||||
assert.Equal(t, testData.Info.Hostname, decodedCbor.Info.Hostname)
|
||||
assert.Equal(t, testData.Stats.Cpu, decodedCbor.Stats.Cpu)
|
||||
} else {
|
||||
// Should be JSON - try to decode as JSON
|
||||
var decodedJson system.CombinedData
|
||||
err = json.Unmarshal([]byte(encodedData), &decodedJson)
|
||||
assert.NoError(t, err, "Should be valid JSON data")
|
||||
|
||||
var decodedCbor system.CombinedData
|
||||
err = cbor.Unmarshal([]byte(encodedData), &decodedCbor)
|
||||
assert.Error(t, err, "Should not be valid CBOR data")
|
||||
|
||||
// Verify the decoded JSON data matches our test data
|
||||
assert.Equal(t, testData.Info.Hostname, decodedJson.Info.Hostname)
|
||||
assert.Equal(t, testData.Stats.Cpu, decodedJson.Stats.Cpu)
|
||||
|
||||
// Verify it looks like JSON (starts with '{' and contains readable field names)
|
||||
assert.True(t, strings.HasPrefix(encodedData, "{"), "JSON should start with '{'")
|
||||
assert.Contains(t, encodedData, `"info"`, "JSON should contain readable field names")
|
||||
assert.Contains(t, encodedData, `"stats"`, "JSON should contain readable field names")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to create test data for encoding tests
|
||||
func createTestCombinedData() *system.CombinedData {
|
||||
return &system.CombinedData{
|
||||
Stats: system.Stats{
|
||||
Cpu: 25.5,
|
||||
Mem: 8589934592, // 8GB
|
||||
MemUsed: 4294967296, // 4GB
|
||||
MemPct: 50.0,
|
||||
DiskTotal: 1099511627776, // 1TB
|
||||
DiskUsed: 549755813888, // 512GB
|
||||
DiskPct: 50.0,
|
||||
},
|
||||
Info: system.Info{
|
||||
Hostname: "test-host",
|
||||
Cores: 8,
|
||||
CpuModel: "Test CPU Model",
|
||||
Uptime: 3600,
|
||||
AgentVersion: "0.12.0",
|
||||
Os: system.Linux,
|
||||
},
|
||||
Containers: []*container.Stats{
|
||||
{
|
||||
Name: "test-container",
|
||||
Cpu: 10.5,
|
||||
Mem: 1073741824, // 1GB
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestHubVersionCaching(t *testing.T) {
|
||||
// Reset the global hubVersions map to ensure clean state
|
||||
hubVersions = nil
|
||||
|
||||
agent, err := NewAgent("")
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx1 := &mockSSHContext{
|
||||
sessionID: "session1",
|
||||
clientVersion: "SSH-2.0-beszel_0.12.0",
|
||||
}
|
||||
ctx2 := &mockSSHContext{
|
||||
sessionID: "session2",
|
||||
clientVersion: "SSH-2.0-beszel_0.11.0",
|
||||
}
|
||||
|
||||
// First calls should cache the versions
|
||||
v1 := agent.getHubVersion("session1", ctx1)
|
||||
v2 := agent.getHubVersion("session2", ctx2)
|
||||
|
||||
assert.Equal(t, "0.12.0", v1.String())
|
||||
assert.Equal(t, "0.11.0", v2.String())
|
||||
|
||||
// Verify caching by changing context but keeping same session ID
|
||||
ctx1.clientVersion = "SSH-2.0-beszel_0.10.0"
|
||||
v1Cached := agent.getHubVersion("session1", ctx1)
|
||||
assert.Equal(t, "0.12.0", v1Cached.String()) // Should still be cached version
|
||||
|
||||
// New session should get new version
|
||||
ctx3 := &mockSSHContext{
|
||||
sessionID: "session3",
|
||||
clientVersion: "SSH-2.0-beszel_0.13.0",
|
||||
}
|
||||
v3 := agent.getHubVersion("session3", ctx3)
|
||||
assert.Equal(t, "0.13.0", v3.String())
|
||||
}
|
||||
926
agent/smart.go
Normal file
926
agent/smart.go
Normal file
@@ -0,0 +1,926 @@
|
||||
//go:generate -command fetchsmartctl go run ./tools/fetchsmartctl
|
||||
//go:generate fetchsmartctl -out ./smartmontools/smartctl.exe -url https://static.beszel.dev/bin/smartctl/smartctl-nc.exe -sha 3912249c3b329249aa512ce796fd1b64d7cbd8378b68ad2756b39163d9c30b47
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/smart"
|
||||
|
||||
"golang.org/x/exp/slog"
|
||||
)
|
||||
|
||||
// SmartManager manages data collection for SMART devices
|
||||
type SmartManager struct {
|
||||
sync.Mutex
|
||||
SmartDataMap map[string]*smart.SmartData
|
||||
SmartDevices []*DeviceInfo
|
||||
refreshMutex sync.Mutex
|
||||
lastScanTime time.Time
|
||||
binPath string
|
||||
}
|
||||
|
||||
type scanOutput struct {
|
||||
Devices []struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
InfoName string `json:"info_name"`
|
||||
Protocol string `json:"protocol"`
|
||||
} `json:"devices"`
|
||||
}
|
||||
|
||||
type DeviceInfo struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
InfoName string `json:"info_name"`
|
||||
Protocol string `json:"protocol"`
|
||||
// typeVerified reports whether we have already parsed SMART data for this device
|
||||
// with the stored parserType. When true we can skip re-running the detection logic.
|
||||
typeVerified bool
|
||||
// parserType holds the parser type (nvme, sat, scsi) that last succeeded.
|
||||
parserType string
|
||||
}
|
||||
|
||||
var errNoValidSmartData = fmt.Errorf("no valid SMART data found") // Error for missing data
|
||||
|
||||
// Refresh updates SMART data for all known devices
|
||||
func (sm *SmartManager) Refresh(forceScan bool) error {
|
||||
sm.refreshMutex.Lock()
|
||||
defer sm.refreshMutex.Unlock()
|
||||
|
||||
scanErr := sm.ScanDevices(false)
|
||||
if scanErr != nil {
|
||||
slog.Debug("smartctl scan failed", "err", scanErr)
|
||||
}
|
||||
|
||||
devices := sm.devicesSnapshot()
|
||||
var collectErr error
|
||||
for _, deviceInfo := range devices {
|
||||
if deviceInfo == nil {
|
||||
continue
|
||||
}
|
||||
if err := sm.CollectSmart(deviceInfo); err != nil {
|
||||
slog.Debug("smartctl collect failed", "device", deviceInfo.Name, "err", err)
|
||||
collectErr = err
|
||||
}
|
||||
}
|
||||
|
||||
return sm.resolveRefreshError(scanErr, collectErr)
|
||||
}
|
||||
|
||||
// devicesSnapshot returns a copy of the current device slice to avoid iterating
|
||||
// while holding the primary mutex for longer than necessary.
|
||||
func (sm *SmartManager) devicesSnapshot() []*DeviceInfo {
|
||||
sm.Lock()
|
||||
defer sm.Unlock()
|
||||
|
||||
devices := make([]*DeviceInfo, len(sm.SmartDevices))
|
||||
copy(devices, sm.SmartDevices)
|
||||
return devices
|
||||
}
|
||||
|
||||
// hasSmartData reports whether any SMART data has been collected.
|
||||
// func (sm *SmartManager) hasSmartData() bool {
|
||||
// sm.Lock()
|
||||
// defer sm.Unlock()
|
||||
|
||||
// return len(sm.SmartDataMap) > 0
|
||||
// }
|
||||
|
||||
// resolveRefreshError determines the proper error to return after a refresh.
|
||||
func (sm *SmartManager) resolveRefreshError(scanErr, collectErr error) error {
|
||||
sm.Lock()
|
||||
noDevices := len(sm.SmartDevices) == 0
|
||||
noData := len(sm.SmartDataMap) == 0
|
||||
sm.Unlock()
|
||||
|
||||
if noDevices {
|
||||
if scanErr != nil {
|
||||
return scanErr
|
||||
}
|
||||
}
|
||||
|
||||
if !noData {
|
||||
return nil
|
||||
}
|
||||
|
||||
if collectErr != nil {
|
||||
return collectErr
|
||||
}
|
||||
if scanErr != nil {
|
||||
return scanErr
|
||||
}
|
||||
return errNoValidSmartData
|
||||
}
|
||||
|
||||
// GetCurrentData returns the current SMART data
|
||||
func (sm *SmartManager) GetCurrentData() map[string]smart.SmartData {
|
||||
sm.Lock()
|
||||
defer sm.Unlock()
|
||||
result := make(map[string]smart.SmartData, len(sm.SmartDataMap))
|
||||
for key, value := range sm.SmartDataMap {
|
||||
if value != nil {
|
||||
result[key] = *value
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ScanDevices scans for SMART devices
|
||||
// Scan devices using `smartctl --scan -j`
|
||||
// If scan fails, return error
|
||||
// If scan succeeds, parse the output and update the SmartDevices slice
|
||||
func (sm *SmartManager) ScanDevices(force bool) error {
|
||||
if !force && time.Since(sm.lastScanTime) < 30*time.Minute {
|
||||
return nil
|
||||
}
|
||||
sm.lastScanTime = time.Now()
|
||||
currentDevices := sm.devicesSnapshot()
|
||||
|
||||
var configuredDevices []*DeviceInfo
|
||||
if configuredRaw, ok := GetEnv("SMART_DEVICES"); ok {
|
||||
slog.Info("SMART_DEVICES", "value", configuredRaw)
|
||||
config := strings.TrimSpace(configuredRaw)
|
||||
if config == "" {
|
||||
return errNoValidSmartData
|
||||
}
|
||||
|
||||
parsedDevices, err := sm.parseConfiguredDevices(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
configuredDevices = parsedDevices
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, sm.binPath, "--scan", "-j")
|
||||
output, err := cmd.Output()
|
||||
|
||||
var (
|
||||
scanErr error
|
||||
scannedDevices []*DeviceInfo
|
||||
hasValidScan bool
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
scanErr = err
|
||||
} else {
|
||||
scannedDevices, hasValidScan = sm.parseScan(output)
|
||||
if !hasValidScan {
|
||||
scanErr = errNoValidSmartData
|
||||
}
|
||||
}
|
||||
|
||||
finalDevices := mergeDeviceLists(currentDevices, scannedDevices, configuredDevices)
|
||||
sm.updateSmartDevices(finalDevices)
|
||||
|
||||
if len(finalDevices) == 0 {
|
||||
if scanErr != nil {
|
||||
slog.Debug("smartctl scan failed", "err", scanErr)
|
||||
return scanErr
|
||||
}
|
||||
return errNoValidSmartData
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sm *SmartManager) parseConfiguredDevices(config string) ([]*DeviceInfo, error) {
|
||||
entries := strings.Split(config, ",")
|
||||
devices := make([]*DeviceInfo, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
entry = strings.TrimSpace(entry)
|
||||
if entry == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
parts := strings.SplitN(entry, ":", 2)
|
||||
|
||||
name := strings.TrimSpace(parts[0])
|
||||
if name == "" {
|
||||
return nil, fmt.Errorf("invalid SMART_DEVICES entry %q", entry)
|
||||
}
|
||||
|
||||
devType := ""
|
||||
if len(parts) == 2 {
|
||||
devType = strings.ToLower(strings.TrimSpace(parts[1]))
|
||||
}
|
||||
|
||||
devices = append(devices, &DeviceInfo{
|
||||
Name: name,
|
||||
Type: devType,
|
||||
})
|
||||
}
|
||||
|
||||
if len(devices) == 0 {
|
||||
return nil, errNoValidSmartData
|
||||
}
|
||||
|
||||
return devices, nil
|
||||
}
|
||||
|
||||
// detectSmartOutputType inspects sections that are unique to each smartctl
|
||||
// JSON schema (NVMe, ATA/SATA, SCSI) to determine which parser should be used
|
||||
// when the reported device type is ambiguous or missing.
|
||||
func detectSmartOutputType(output []byte) string {
|
||||
var hints struct {
|
||||
AtaSmartAttributes json.RawMessage `json:"ata_smart_attributes"`
|
||||
NVMeSmartHealthInformationLog json.RawMessage `json:"nvme_smart_health_information_log"`
|
||||
ScsiErrorCounterLog json.RawMessage `json:"scsi_error_counter_log"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(output, &hints); err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
switch {
|
||||
case hasJSONValue(hints.NVMeSmartHealthInformationLog):
|
||||
return "nvme"
|
||||
case hasJSONValue(hints.AtaSmartAttributes):
|
||||
return "sat"
|
||||
case hasJSONValue(hints.ScsiErrorCounterLog):
|
||||
return "scsi"
|
||||
default:
|
||||
return "sat"
|
||||
}
|
||||
}
|
||||
|
||||
// hasJSONValue reports whether a JSON payload contains a concrete value. The
|
||||
// smartctl output often emits "null" for sections that do not apply, so we
|
||||
// only treat non-null content as a hint.
|
||||
func hasJSONValue(raw json.RawMessage) bool {
|
||||
if len(raw) == 0 {
|
||||
return false
|
||||
}
|
||||
trimmed := strings.TrimSpace(string(raw))
|
||||
return trimmed != "" && trimmed != "null"
|
||||
}
|
||||
|
||||
func normalizeParserType(value string) string {
|
||||
switch strings.ToLower(strings.TrimSpace(value)) {
|
||||
case "nvme", "sntasmedia", "sntrealtek":
|
||||
return "nvme"
|
||||
case "sat", "ata":
|
||||
return "sat"
|
||||
case "scsi":
|
||||
return "scsi"
|
||||
default:
|
||||
return strings.ToLower(strings.TrimSpace(value))
|
||||
}
|
||||
}
|
||||
|
||||
// parseSmartOutput attempts each SMART parser, optionally detecting the type when
|
||||
// it is not provided, and updates the device info when a parser succeeds.
|
||||
func (sm *SmartManager) parseSmartOutput(deviceInfo *DeviceInfo, output []byte) bool {
|
||||
parsers := []struct {
|
||||
Type string
|
||||
Parse func([]byte) (bool, int)
|
||||
}{
|
||||
{Type: "nvme", Parse: sm.parseSmartForNvme},
|
||||
{Type: "sat", Parse: sm.parseSmartForSata},
|
||||
{Type: "scsi", Parse: sm.parseSmartForScsi},
|
||||
}
|
||||
|
||||
deviceType := normalizeParserType(deviceInfo.parserType)
|
||||
if deviceType == "" {
|
||||
deviceType = normalizeParserType(deviceInfo.Type)
|
||||
}
|
||||
if deviceInfo.parserType == "" {
|
||||
switch deviceType {
|
||||
case "nvme", "sat", "scsi":
|
||||
deviceInfo.parserType = deviceType
|
||||
}
|
||||
}
|
||||
|
||||
// Only run the type detection when we do not yet know which parser works
|
||||
// or the previous attempt failed.
|
||||
needsDetection := deviceType == "" || !deviceInfo.typeVerified
|
||||
if needsDetection {
|
||||
structureType := detectSmartOutputType(output)
|
||||
if deviceType != structureType {
|
||||
deviceType = structureType
|
||||
deviceInfo.parserType = structureType
|
||||
deviceInfo.typeVerified = false
|
||||
}
|
||||
if deviceInfo.Type == "" || strings.EqualFold(deviceInfo.Type, structureType) {
|
||||
deviceInfo.Type = structureType
|
||||
}
|
||||
}
|
||||
|
||||
// Try the most likely parser first, but keep the remaining parsers in reserve
|
||||
// so an incorrect hint never leaves the device unparsed.
|
||||
selectedParsers := make([]struct {
|
||||
Type string
|
||||
Parse func([]byte) (bool, int)
|
||||
}, 0, len(parsers))
|
||||
if deviceType != "" {
|
||||
for _, parser := range parsers {
|
||||
if parser.Type == deviceType {
|
||||
selectedParsers = append(selectedParsers, parser)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, parser := range parsers {
|
||||
alreadySelected := false
|
||||
for _, selected := range selectedParsers {
|
||||
if selected.Type == parser.Type {
|
||||
alreadySelected = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if alreadySelected {
|
||||
continue
|
||||
}
|
||||
selectedParsers = append(selectedParsers, parser)
|
||||
}
|
||||
|
||||
// Try the selected parsers in order until we find one that succeeds.
|
||||
for _, parser := range selectedParsers {
|
||||
hasData, _ := parser.Parse(output)
|
||||
if hasData {
|
||||
deviceInfo.parserType = parser.Type
|
||||
if deviceInfo.Type == "" || strings.EqualFold(deviceInfo.Type, parser.Type) {
|
||||
deviceInfo.Type = parser.Type
|
||||
}
|
||||
// Remember that this parser is valid so future refreshes can bypass
|
||||
// detection entirely.
|
||||
deviceInfo.typeVerified = true
|
||||
return true
|
||||
}
|
||||
slog.Debug("parser failed", "device", deviceInfo.Name, "parser", parser.Type)
|
||||
}
|
||||
|
||||
// Leave verification false so the next pass will attempt detection again.
|
||||
deviceInfo.typeVerified = false
|
||||
slog.Debug("parsing failed", "device", deviceInfo.Name)
|
||||
return false
|
||||
}
|
||||
|
||||
// CollectSmart collects SMART data for a device
|
||||
// Collect data using `smartctl -d <type> -aj /dev/<device>` when device type is known
|
||||
// Always attempts to parse output even if command fails, as some data may still be available
|
||||
// If collect fails, return error
|
||||
// If collect succeeds, parse the output and update the SmartDataMap
|
||||
// Uses -n standby to avoid waking up sleeping disks, but bypasses standby mode
|
||||
// for initial data collection when no cached data exists
|
||||
func (sm *SmartManager) CollectSmart(deviceInfo *DeviceInfo) error {
|
||||
// slog.Info("collecting SMART data", "device", deviceInfo.Name, "type", deviceInfo.Type, "has_existing_data", sm.hasDataForDevice(deviceInfo.Name))
|
||||
|
||||
// Check if we have any existing data for this device
|
||||
hasExistingData := sm.hasDataForDevice(deviceInfo.Name)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Try with -n standby first if we have existing data
|
||||
args := sm.smartctlArgs(deviceInfo, true)
|
||||
cmd := exec.CommandContext(ctx, sm.binPath, args...)
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
// Check if device is in standby (exit status 2)
|
||||
if exitErr, ok := err.(*exec.ExitError); ok && exitErr.ExitCode() == 2 {
|
||||
if hasExistingData {
|
||||
// Device is in standby and we have cached data, keep using cache
|
||||
return nil
|
||||
}
|
||||
// No cached data, need to collect initial data by bypassing standby
|
||||
ctx2, cancel2 := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel2()
|
||||
args = sm.smartctlArgs(deviceInfo, false)
|
||||
cmd = exec.CommandContext(ctx2, sm.binPath, args...)
|
||||
output, err = cmd.CombinedOutput()
|
||||
}
|
||||
|
||||
hasValidData := sm.parseSmartOutput(deviceInfo, output)
|
||||
|
||||
if !hasValidData {
|
||||
if err != nil {
|
||||
slog.Debug("smartctl failed", "device", deviceInfo.Name, "err", err)
|
||||
return err
|
||||
}
|
||||
slog.Debug("no valid SMART data found", "device", deviceInfo.Name)
|
||||
return errNoValidSmartData
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// smartctlArgs returns the arguments for the smartctl command
|
||||
// based on the device type and whether to include standby mode
|
||||
func (sm *SmartManager) smartctlArgs(deviceInfo *DeviceInfo, includeStandby bool) []string {
|
||||
args := make([]string, 0, 7)
|
||||
|
||||
if deviceInfo != nil {
|
||||
deviceType := strings.ToLower(deviceInfo.Type)
|
||||
// types sometimes misidentified in scan; see github.com/henrygd/beszel/issues/1345
|
||||
if deviceType != "" && deviceType != "scsi" && deviceType != "ata" {
|
||||
args = append(args, "-d", deviceInfo.Type)
|
||||
}
|
||||
}
|
||||
|
||||
args = append(args, "-a", "--json=c")
|
||||
|
||||
if includeStandby {
|
||||
args = append(args, "-n", "standby")
|
||||
}
|
||||
|
||||
if deviceInfo != nil {
|
||||
args = append(args, deviceInfo.Name)
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
// hasDataForDevice checks if we have cached SMART data for a specific device
|
||||
func (sm *SmartManager) hasDataForDevice(deviceName string) bool {
|
||||
sm.Lock()
|
||||
defer sm.Unlock()
|
||||
|
||||
// Check if any cached data has this device name
|
||||
for _, data := range sm.SmartDataMap {
|
||||
if data != nil && data.DiskName == deviceName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parseScan parses the output of smartctl --scan -j and returns the discovered devices.
|
||||
func (sm *SmartManager) parseScan(output []byte) ([]*DeviceInfo, bool) {
|
||||
scan := &scanOutput{}
|
||||
|
||||
if err := json.Unmarshal(output, scan); err != nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if len(scan.Devices) == 0 {
|
||||
slog.Debug("no devices found in smartctl scan")
|
||||
return nil, false
|
||||
}
|
||||
|
||||
devices := make([]*DeviceInfo, 0, len(scan.Devices))
|
||||
for _, device := range scan.Devices {
|
||||
slog.Debug("smartctl scan", "name", device.Name, "type", device.Type, "protocol", device.Protocol)
|
||||
devices = append(devices, &DeviceInfo{
|
||||
Name: device.Name,
|
||||
Type: device.Type,
|
||||
InfoName: device.InfoName,
|
||||
Protocol: device.Protocol,
|
||||
})
|
||||
}
|
||||
|
||||
return devices, true
|
||||
}
|
||||
|
||||
// mergeDeviceLists combines scanned and configured SMART devices, preferring
|
||||
// configured SMART_DEVICES when both sources reference the same device.
|
||||
func mergeDeviceLists(existing, scanned, configured []*DeviceInfo) []*DeviceInfo {
|
||||
if len(scanned) == 0 && len(configured) == 0 {
|
||||
return existing
|
||||
}
|
||||
|
||||
// preserveVerifiedType copies the verified type/parser metadata from an existing
|
||||
// device record so that subsequent scans/config updates never downgrade a
|
||||
// previously verified device.
|
||||
preserveVerifiedType := func(target, prev *DeviceInfo) {
|
||||
if prev == nil || !prev.typeVerified {
|
||||
return
|
||||
}
|
||||
target.Type = prev.Type
|
||||
target.typeVerified = true
|
||||
target.parserType = prev.parserType
|
||||
}
|
||||
|
||||
existingIndex := make(map[string]*DeviceInfo, len(existing))
|
||||
for _, dev := range existing {
|
||||
if dev == nil || dev.Name == "" {
|
||||
continue
|
||||
}
|
||||
existingIndex[dev.Name] = dev
|
||||
}
|
||||
|
||||
finalDevices := make([]*DeviceInfo, 0, len(scanned)+len(configured))
|
||||
deviceIndex := make(map[string]*DeviceInfo, len(scanned)+len(configured))
|
||||
|
||||
// Start with the newly scanned devices so we always surface fresh metadata,
|
||||
// but ensure we retain any previously verified parser assignment.
|
||||
for _, dev := range scanned {
|
||||
if dev == nil || dev.Name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Work on a copy so we can safely adjust metadata without mutating the
|
||||
// input slices that may be reused elsewhere.
|
||||
copyDev := *dev
|
||||
if prev := existingIndex[copyDev.Name]; prev != nil {
|
||||
preserveVerifiedType(©Dev, prev)
|
||||
}
|
||||
|
||||
finalDevices = append(finalDevices, ©Dev)
|
||||
deviceIndex[copyDev.Name] = finalDevices[len(finalDevices)-1]
|
||||
}
|
||||
|
||||
// Merge configured devices on top so users can override scan results (except
|
||||
// for verified type information).
|
||||
for _, dev := range configured {
|
||||
if dev == nil || dev.Name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if existingDev, ok := deviceIndex[dev.Name]; ok {
|
||||
// Only update the type if it has not been verified yet; otherwise we
|
||||
// keep the existing verified metadata intact.
|
||||
if dev.Type != "" && !existingDev.typeVerified {
|
||||
newType := strings.TrimSpace(dev.Type)
|
||||
existingDev.Type = newType
|
||||
existingDev.typeVerified = false
|
||||
existingDev.parserType = normalizeParserType(newType)
|
||||
}
|
||||
if dev.InfoName != "" {
|
||||
existingDev.InfoName = dev.InfoName
|
||||
}
|
||||
if dev.Protocol != "" {
|
||||
existingDev.Protocol = dev.Protocol
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
copyDev := *dev
|
||||
if prev := existingIndex[copyDev.Name]; prev != nil {
|
||||
preserveVerifiedType(©Dev, prev)
|
||||
} else if copyDev.Type != "" {
|
||||
copyDev.parserType = normalizeParserType(copyDev.Type)
|
||||
}
|
||||
|
||||
finalDevices = append(finalDevices, ©Dev)
|
||||
deviceIndex[copyDev.Name] = finalDevices[len(finalDevices)-1]
|
||||
}
|
||||
|
||||
return finalDevices
|
||||
}
|
||||
|
||||
// updateSmartDevices replaces the cached device list and prunes SMART data
|
||||
// entries whose backing device no longer exists.
|
||||
func (sm *SmartManager) updateSmartDevices(devices []*DeviceInfo) {
|
||||
sm.Lock()
|
||||
defer sm.Unlock()
|
||||
|
||||
sm.SmartDevices = devices
|
||||
|
||||
if len(sm.SmartDataMap) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
validNames := make(map[string]struct{}, len(devices))
|
||||
for _, device := range devices {
|
||||
if device == nil || device.Name == "" {
|
||||
continue
|
||||
}
|
||||
validNames[device.Name] = struct{}{}
|
||||
}
|
||||
|
||||
for key, data := range sm.SmartDataMap {
|
||||
if data == nil {
|
||||
delete(sm.SmartDataMap, key)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := validNames[data.DiskName]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
delete(sm.SmartDataMap, key)
|
||||
}
|
||||
}
|
||||
|
||||
// isVirtualDevice checks if a device is a virtual disk that should be filtered out
|
||||
func (sm *SmartManager) isVirtualDevice(data *smart.SmartInfoForSata) bool {
|
||||
vendorUpper := strings.ToUpper(data.ScsiVendor)
|
||||
productUpper := strings.ToUpper(data.ScsiProduct)
|
||||
modelUpper := strings.ToUpper(data.ModelName)
|
||||
|
||||
return sm.isVirtualDeviceFromStrings(vendorUpper, productUpper, modelUpper)
|
||||
}
|
||||
|
||||
// isVirtualDeviceNvme checks if an NVMe device is a virtual disk that should be filtered out
|
||||
func (sm *SmartManager) isVirtualDeviceNvme(data *smart.SmartInfoForNvme) bool {
|
||||
modelUpper := strings.ToUpper(data.ModelName)
|
||||
|
||||
return sm.isVirtualDeviceFromStrings(modelUpper)
|
||||
}
|
||||
|
||||
// isVirtualDeviceScsi checks if a SCSI device is a virtual disk that should be filtered out
|
||||
func (sm *SmartManager) isVirtualDeviceScsi(data *smart.SmartInfoForScsi) bool {
|
||||
vendorUpper := strings.ToUpper(data.ScsiVendor)
|
||||
productUpper := strings.ToUpper(data.ScsiProduct)
|
||||
modelUpper := strings.ToUpper(data.ScsiModelName)
|
||||
|
||||
return sm.isVirtualDeviceFromStrings(vendorUpper, productUpper, modelUpper)
|
||||
}
|
||||
|
||||
// isVirtualDeviceFromStrings checks if any of the provided strings indicate a virtual device
|
||||
func (sm *SmartManager) isVirtualDeviceFromStrings(fields ...string) bool {
|
||||
for _, field := range fields {
|
||||
fieldUpper := strings.ToUpper(field)
|
||||
switch {
|
||||
case strings.Contains(fieldUpper, "IET"), // iSCSI Enterprise Target
|
||||
strings.Contains(fieldUpper, "VIRTUAL"),
|
||||
strings.Contains(fieldUpper, "QEMU"),
|
||||
strings.Contains(fieldUpper, "VBOX"),
|
||||
strings.Contains(fieldUpper, "VMWARE"),
|
||||
strings.Contains(fieldUpper, "MSFT"): // Microsoft Hyper-V
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parseSmartForSata parses the output of smartctl --all -j for SATA/ATA devices and updates the SmartDataMap
|
||||
// Returns hasValidData and exitStatus
|
||||
func (sm *SmartManager) parseSmartForSata(output []byte) (bool, int) {
|
||||
var data smart.SmartInfoForSata
|
||||
|
||||
if err := json.Unmarshal(output, &data); err != nil {
|
||||
return false, 0
|
||||
}
|
||||
|
||||
if data.SerialNumber == "" {
|
||||
slog.Debug("no serial number", "device", data.Device.Name)
|
||||
return false, data.Smartctl.ExitStatus
|
||||
}
|
||||
|
||||
// Skip virtual devices (e.g., Kubernetes PVCs, QEMU, VirtualBox, etc.)
|
||||
if sm.isVirtualDevice(&data) {
|
||||
slog.Debug("skipping smart", "device", data.Device.Name, "model", data.ModelName)
|
||||
return false, data.Smartctl.ExitStatus
|
||||
}
|
||||
|
||||
sm.Lock()
|
||||
defer sm.Unlock()
|
||||
|
||||
keyName := data.SerialNumber
|
||||
|
||||
// if device does not exist in SmartDataMap, initialize it
|
||||
if _, ok := sm.SmartDataMap[keyName]; !ok {
|
||||
sm.SmartDataMap[keyName] = &smart.SmartData{}
|
||||
}
|
||||
|
||||
// update SmartData
|
||||
smartData := sm.SmartDataMap[keyName]
|
||||
// smartData.ModelFamily = data.ModelFamily
|
||||
smartData.ModelName = data.ModelName
|
||||
smartData.SerialNumber = data.SerialNumber
|
||||
smartData.FirmwareVersion = data.FirmwareVersion
|
||||
smartData.Capacity = data.UserCapacity.Bytes
|
||||
smartData.Temperature = data.Temperature.Current
|
||||
smartData.SmartStatus = getSmartStatus(smartData.Temperature, data.SmartStatus.Passed)
|
||||
smartData.DiskName = data.Device.Name
|
||||
smartData.DiskType = data.Device.Type
|
||||
|
||||
// update SmartAttributes
|
||||
smartData.Attributes = make([]*smart.SmartAttribute, 0, len(data.AtaSmartAttributes.Table))
|
||||
for _, attr := range data.AtaSmartAttributes.Table {
|
||||
rawValue := uint64(attr.Raw.Value)
|
||||
if parsed, ok := smart.ParseSmartRawValueString(attr.Raw.String); ok {
|
||||
rawValue = parsed
|
||||
}
|
||||
smartAttr := &smart.SmartAttribute{
|
||||
ID: attr.ID,
|
||||
Name: attr.Name,
|
||||
Value: attr.Value,
|
||||
Worst: attr.Worst,
|
||||
Threshold: attr.Thresh,
|
||||
RawValue: rawValue,
|
||||
RawString: attr.Raw.String,
|
||||
WhenFailed: attr.WhenFailed,
|
||||
}
|
||||
smartData.Attributes = append(smartData.Attributes, smartAttr)
|
||||
}
|
||||
sm.SmartDataMap[keyName] = smartData
|
||||
|
||||
return true, data.Smartctl.ExitStatus
|
||||
}
|
||||
|
||||
func getSmartStatus(temperature uint8, passed bool) string {
|
||||
if passed {
|
||||
return "PASSED"
|
||||
} else if temperature > 0 {
|
||||
return "FAILED"
|
||||
} else {
|
||||
return "UNKNOWN"
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *SmartManager) parseSmartForScsi(output []byte) (bool, int) {
|
||||
var data smart.SmartInfoForScsi
|
||||
|
||||
if err := json.Unmarshal(output, &data); err != nil {
|
||||
return false, 0
|
||||
}
|
||||
|
||||
if data.SerialNumber == "" {
|
||||
slog.Debug("no serial number", "device", data.Device.Name)
|
||||
return false, data.Smartctl.ExitStatus
|
||||
}
|
||||
|
||||
// Skip virtual devices (e.g., Kubernetes PVCs, QEMU, VirtualBox, etc.)
|
||||
if sm.isVirtualDeviceScsi(&data) {
|
||||
slog.Debug("skipping smart", "device", data.Device.Name, "model", data.ScsiModelName)
|
||||
return false, data.Smartctl.ExitStatus
|
||||
}
|
||||
|
||||
sm.Lock()
|
||||
defer sm.Unlock()
|
||||
|
||||
keyName := data.SerialNumber
|
||||
if _, ok := sm.SmartDataMap[keyName]; !ok {
|
||||
sm.SmartDataMap[keyName] = &smart.SmartData{}
|
||||
}
|
||||
|
||||
smartData := sm.SmartDataMap[keyName]
|
||||
smartData.ModelName = data.ScsiModelName
|
||||
smartData.SerialNumber = data.SerialNumber
|
||||
smartData.FirmwareVersion = data.ScsiRevision
|
||||
smartData.Capacity = data.UserCapacity.Bytes
|
||||
smartData.Temperature = data.Temperature.Current
|
||||
smartData.SmartStatus = getSmartStatus(smartData.Temperature, data.SmartStatus.Passed)
|
||||
smartData.DiskName = data.Device.Name
|
||||
smartData.DiskType = data.Device.Type
|
||||
|
||||
attributes := make([]*smart.SmartAttribute, 0, 10)
|
||||
attributes = append(attributes, &smart.SmartAttribute{Name: "PowerOnHours", RawValue: data.PowerOnTime.Hours})
|
||||
attributes = append(attributes, &smart.SmartAttribute{Name: "PowerOnMinutes", RawValue: data.PowerOnTime.Minutes})
|
||||
attributes = append(attributes, &smart.SmartAttribute{Name: "GrownDefectList", RawValue: data.ScsiGrownDefectList})
|
||||
attributes = append(attributes, &smart.SmartAttribute{Name: "StartStopCycles", RawValue: data.ScsiStartStopCycleCounter.AccumulatedStartStopCycles})
|
||||
attributes = append(attributes, &smart.SmartAttribute{Name: "LoadUnloadCycles", RawValue: data.ScsiStartStopCycleCounter.AccumulatedLoadUnloadCycles})
|
||||
attributes = append(attributes, &smart.SmartAttribute{Name: "StartStopSpecified", RawValue: data.ScsiStartStopCycleCounter.SpecifiedCycleCountOverDeviceLifetime})
|
||||
attributes = append(attributes, &smart.SmartAttribute{Name: "LoadUnloadSpecified", RawValue: data.ScsiStartStopCycleCounter.SpecifiedLoadUnloadCountOverDeviceLifetime})
|
||||
|
||||
readStats := data.ScsiErrorCounterLog.Read
|
||||
writeStats := data.ScsiErrorCounterLog.Write
|
||||
verifyStats := data.ScsiErrorCounterLog.Verify
|
||||
|
||||
attributes = append(attributes, &smart.SmartAttribute{Name: "ReadTotalErrorsCorrected", RawValue: readStats.TotalErrorsCorrected})
|
||||
attributes = append(attributes, &smart.SmartAttribute{Name: "ReadTotalUncorrectedErrors", RawValue: readStats.TotalUncorrectedErrors})
|
||||
attributes = append(attributes, &smart.SmartAttribute{Name: "ReadCorrectionAlgorithmInvocations", RawValue: readStats.CorrectionAlgorithmInvocations})
|
||||
if val := parseScsiGigabytesProcessed(readStats.GigabytesProcessed); val >= 0 {
|
||||
attributes = append(attributes, &smart.SmartAttribute{Name: "ReadGigabytesProcessed", RawValue: uint64(val)})
|
||||
}
|
||||
attributes = append(attributes, &smart.SmartAttribute{Name: "WriteTotalErrorsCorrected", RawValue: writeStats.TotalErrorsCorrected})
|
||||
attributes = append(attributes, &smart.SmartAttribute{Name: "WriteTotalUncorrectedErrors", RawValue: writeStats.TotalUncorrectedErrors})
|
||||
attributes = append(attributes, &smart.SmartAttribute{Name: "WriteCorrectionAlgorithmInvocations", RawValue: writeStats.CorrectionAlgorithmInvocations})
|
||||
if val := parseScsiGigabytesProcessed(writeStats.GigabytesProcessed); val >= 0 {
|
||||
attributes = append(attributes, &smart.SmartAttribute{Name: "WriteGigabytesProcessed", RawValue: uint64(val)})
|
||||
}
|
||||
attributes = append(attributes, &smart.SmartAttribute{Name: "VerifyTotalErrorsCorrected", RawValue: verifyStats.TotalErrorsCorrected})
|
||||
attributes = append(attributes, &smart.SmartAttribute{Name: "VerifyTotalUncorrectedErrors", RawValue: verifyStats.TotalUncorrectedErrors})
|
||||
attributes = append(attributes, &smart.SmartAttribute{Name: "VerifyCorrectionAlgorithmInvocations", RawValue: verifyStats.CorrectionAlgorithmInvocations})
|
||||
if val := parseScsiGigabytesProcessed(verifyStats.GigabytesProcessed); val >= 0 {
|
||||
attributes = append(attributes, &smart.SmartAttribute{Name: "VerifyGigabytesProcessed", RawValue: uint64(val)})
|
||||
}
|
||||
|
||||
smartData.Attributes = attributes
|
||||
sm.SmartDataMap[keyName] = smartData
|
||||
|
||||
return true, data.Smartctl.ExitStatus
|
||||
}
|
||||
|
||||
func parseScsiGigabytesProcessed(value string) int64 {
|
||||
if value == "" {
|
||||
return -1
|
||||
}
|
||||
normalized := strings.ReplaceAll(value, ",", "")
|
||||
parsed, err := strconv.ParseInt(normalized, 10, 64)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
|
||||
// parseSmartForNvme parses the output of smartctl --all -j /dev/nvmeX and updates the SmartDataMap
|
||||
// Returns hasValidData and exitStatus
|
||||
func (sm *SmartManager) parseSmartForNvme(output []byte) (bool, int) {
|
||||
data := &smart.SmartInfoForNvme{}
|
||||
|
||||
if err := json.Unmarshal(output, &data); err != nil {
|
||||
return false, 0
|
||||
}
|
||||
|
||||
if data.SerialNumber == "" {
|
||||
slog.Debug("no serial number", "device", data.Device.Name)
|
||||
return false, data.Smartctl.ExitStatus
|
||||
}
|
||||
|
||||
// Skip virtual devices (e.g., Kubernetes PVCs, QEMU, VirtualBox, etc.)
|
||||
if sm.isVirtualDeviceNvme(data) {
|
||||
slog.Debug("skipping smart", "device", data.Device.Name, "model", data.ModelName)
|
||||
return false, data.Smartctl.ExitStatus
|
||||
}
|
||||
|
||||
sm.Lock()
|
||||
defer sm.Unlock()
|
||||
|
||||
keyName := data.SerialNumber
|
||||
|
||||
// if device does not exist in SmartDataMap, initialize it
|
||||
if _, ok := sm.SmartDataMap[keyName]; !ok {
|
||||
sm.SmartDataMap[keyName] = &smart.SmartData{}
|
||||
}
|
||||
|
||||
// update SmartData
|
||||
smartData := sm.SmartDataMap[keyName]
|
||||
smartData.ModelName = data.ModelName
|
||||
smartData.SerialNumber = data.SerialNumber
|
||||
smartData.FirmwareVersion = data.FirmwareVersion
|
||||
smartData.Capacity = data.UserCapacity.Bytes
|
||||
smartData.Temperature = data.NVMeSmartHealthInformationLog.Temperature
|
||||
smartData.SmartStatus = getSmartStatus(smartData.Temperature, data.SmartStatus.Passed)
|
||||
smartData.DiskName = data.Device.Name
|
||||
smartData.DiskType = data.Device.Type
|
||||
|
||||
// nvme attributes does not follow the same format as ata attributes,
|
||||
// so we manually map each field to SmartAttributes
|
||||
log := data.NVMeSmartHealthInformationLog
|
||||
smartData.Attributes = []*smart.SmartAttribute{
|
||||
{Name: "CriticalWarning", RawValue: uint64(log.CriticalWarning)},
|
||||
{Name: "Temperature", RawValue: uint64(log.Temperature)},
|
||||
{Name: "AvailableSpare", RawValue: uint64(log.AvailableSpare)},
|
||||
{Name: "AvailableSpareThreshold", RawValue: uint64(log.AvailableSpareThreshold)},
|
||||
{Name: "PercentageUsed", RawValue: uint64(log.PercentageUsed)},
|
||||
{Name: "DataUnitsRead", RawValue: log.DataUnitsRead},
|
||||
{Name: "DataUnitsWritten", RawValue: log.DataUnitsWritten},
|
||||
{Name: "HostReads", RawValue: uint64(log.HostReads)},
|
||||
{Name: "HostWrites", RawValue: uint64(log.HostWrites)},
|
||||
{Name: "ControllerBusyTime", RawValue: uint64(log.ControllerBusyTime)},
|
||||
{Name: "PowerCycles", RawValue: uint64(log.PowerCycles)},
|
||||
{Name: "PowerOnHours", RawValue: uint64(log.PowerOnHours)},
|
||||
{Name: "UnsafeShutdowns", RawValue: uint64(log.UnsafeShutdowns)},
|
||||
{Name: "MediaErrors", RawValue: uint64(log.MediaErrors)},
|
||||
{Name: "NumErrLogEntries", RawValue: uint64(log.NumErrLogEntries)},
|
||||
{Name: "WarningTempTime", RawValue: uint64(log.WarningTempTime)},
|
||||
{Name: "CriticalCompTime", RawValue: uint64(log.CriticalCompTime)},
|
||||
}
|
||||
|
||||
sm.SmartDataMap[keyName] = smartData
|
||||
|
||||
return true, data.Smartctl.ExitStatus
|
||||
}
|
||||
|
||||
// detectSmartctl checks if smartctl is installed, returns an error if not
|
||||
func (sm *SmartManager) detectSmartctl() (string, error) {
|
||||
isWindows := runtime.GOOS == "windows"
|
||||
|
||||
// Load embedded smartctl.exe for Windows amd64 builds.
|
||||
if isWindows && runtime.GOARCH == "amd64" {
|
||||
if path, err := ensureEmbeddedSmartctl(); err == nil {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
if path, err := exec.LookPath("smartctl"); err == nil {
|
||||
return path, nil
|
||||
}
|
||||
locations := []string{}
|
||||
if isWindows {
|
||||
locations = append(locations,
|
||||
"C:\\Program Files\\smartmontools\\bin\\smartctl.exe",
|
||||
)
|
||||
} else {
|
||||
locations = append(locations, "/opt/homebrew/bin/smartctl")
|
||||
}
|
||||
for _, location := range locations {
|
||||
if _, err := os.Stat(location); err == nil {
|
||||
return location, nil
|
||||
}
|
||||
}
|
||||
return "", errors.New("smartctl not found")
|
||||
}
|
||||
|
||||
// NewSmartManager creates and initializes a new SmartManager
|
||||
func NewSmartManager() (*SmartManager, error) {
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: make(map[string]*smart.SmartData),
|
||||
}
|
||||
path, err := sm.detectSmartctl()
|
||||
if err != nil {
|
||||
slog.Debug(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
slog.Debug("smartctl", "path", path)
|
||||
sm.binPath = path
|
||||
return sm, nil
|
||||
}
|
||||
9
agent/smart_nonwindows.go
Normal file
9
agent/smart_nonwindows.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//go:build !windows
|
||||
|
||||
package agent
|
||||
|
||||
import "errors"
|
||||
|
||||
func ensureEmbeddedSmartctl() (string, error) {
|
||||
return "", errors.ErrUnsupported
|
||||
}
|
||||
590
agent/smart_test.go
Normal file
590
agent/smart_test.go
Normal file
@@ -0,0 +1,590 @@
|
||||
//go:build testing
|
||||
// +build testing
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/smart"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseSmartForScsi(t *testing.T) {
|
||||
fixturePath := filepath.Join("test-data", "smart", "scsi.json")
|
||||
data, err := os.ReadFile(fixturePath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed reading fixture: %v", err)
|
||||
}
|
||||
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: make(map[string]*smart.SmartData),
|
||||
}
|
||||
|
||||
hasData, exitStatus := sm.parseSmartForScsi(data)
|
||||
if !hasData {
|
||||
t.Fatalf("expected SCSI data to parse successfully")
|
||||
}
|
||||
if exitStatus != 0 {
|
||||
t.Fatalf("expected exit status 0, got %d", exitStatus)
|
||||
}
|
||||
|
||||
deviceData, ok := sm.SmartDataMap["9YHSDH9B"]
|
||||
if !ok {
|
||||
t.Fatalf("expected smart data entry for serial 9YHSDH9B")
|
||||
}
|
||||
|
||||
assert.Equal(t, deviceData.ModelName, "YADRO WUH721414AL4204")
|
||||
assert.Equal(t, deviceData.SerialNumber, "9YHSDH9B")
|
||||
assert.Equal(t, deviceData.FirmwareVersion, "C240")
|
||||
assert.Equal(t, deviceData.DiskName, "/dev/sde")
|
||||
assert.Equal(t, deviceData.DiskType, "scsi")
|
||||
assert.EqualValues(t, deviceData.Temperature, 34)
|
||||
assert.Equal(t, deviceData.SmartStatus, "PASSED")
|
||||
assert.EqualValues(t, deviceData.Capacity, 14000519643136)
|
||||
|
||||
if len(deviceData.Attributes) == 0 {
|
||||
t.Fatalf("expected attributes to be populated")
|
||||
}
|
||||
|
||||
assertAttrValue(t, deviceData.Attributes, "PowerOnHours", 458)
|
||||
assertAttrValue(t, deviceData.Attributes, "PowerOnMinutes", 25)
|
||||
assertAttrValue(t, deviceData.Attributes, "GrownDefectList", 0)
|
||||
assertAttrValue(t, deviceData.Attributes, "StartStopCycles", 2)
|
||||
assertAttrValue(t, deviceData.Attributes, "LoadUnloadCycles", 418)
|
||||
assertAttrValue(t, deviceData.Attributes, "ReadGigabytesProcessed", 3641)
|
||||
assertAttrValue(t, deviceData.Attributes, "WriteGigabytesProcessed", 2124590)
|
||||
assertAttrValue(t, deviceData.Attributes, "VerifyGigabytesProcessed", 0)
|
||||
}
|
||||
|
||||
func TestParseSmartForSata(t *testing.T) {
|
||||
fixturePath := filepath.Join("test-data", "smart", "sda.json")
|
||||
data, err := os.ReadFile(fixturePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: make(map[string]*smart.SmartData),
|
||||
}
|
||||
|
||||
hasData, exitStatus := sm.parseSmartForSata(data)
|
||||
require.True(t, hasData)
|
||||
assert.Equal(t, 64, exitStatus)
|
||||
|
||||
deviceData, ok := sm.SmartDataMap["9C40918040082"]
|
||||
require.True(t, ok, "expected smart data entry for serial 9C40918040082")
|
||||
|
||||
assert.Equal(t, "P3-2TB", deviceData.ModelName)
|
||||
assert.Equal(t, "X0104A0", deviceData.FirmwareVersion)
|
||||
assert.Equal(t, "/dev/sda", deviceData.DiskName)
|
||||
assert.Equal(t, "sat", deviceData.DiskType)
|
||||
assert.Equal(t, uint8(31), deviceData.Temperature)
|
||||
assert.Equal(t, "PASSED", deviceData.SmartStatus)
|
||||
assert.Equal(t, uint64(2048408248320), deviceData.Capacity)
|
||||
if assert.NotEmpty(t, deviceData.Attributes) {
|
||||
assertAttrValue(t, deviceData.Attributes, "Temperature_Celsius", 31)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseSmartForSataParentheticalRawValue(t *testing.T) {
|
||||
jsonPayload := []byte(`{
|
||||
"smartctl": {"exit_status": 0},
|
||||
"device": {"name": "/dev/sdz", "type": "sat"},
|
||||
"model_name": "Example",
|
||||
"serial_number": "PARENTHESES123",
|
||||
"firmware_version": "1.0",
|
||||
"user_capacity": {"bytes": 1024},
|
||||
"smart_status": {"passed": true},
|
||||
"temperature": {"current": 25},
|
||||
"ata_smart_attributes": {
|
||||
"table": [
|
||||
{
|
||||
"id": 9,
|
||||
"name": "Power_On_Hours",
|
||||
"value": 93,
|
||||
"worst": 55,
|
||||
"thresh": 0,
|
||||
"when_failed": "",
|
||||
"raw": {
|
||||
"value": 57891864217128,
|
||||
"string": "39925 (212 206 0)"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}`)
|
||||
|
||||
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
|
||||
|
||||
hasData, exitStatus := sm.parseSmartForSata(jsonPayload)
|
||||
require.True(t, hasData)
|
||||
assert.Equal(t, 0, exitStatus)
|
||||
|
||||
data, ok := sm.SmartDataMap["PARENTHESES123"]
|
||||
require.True(t, ok)
|
||||
require.Len(t, data.Attributes, 1)
|
||||
|
||||
attr := data.Attributes[0]
|
||||
assert.Equal(t, uint64(39925), attr.RawValue)
|
||||
assert.Equal(t, "39925 (212 206 0)", attr.RawString)
|
||||
}
|
||||
|
||||
func TestParseSmartForNvme(t *testing.T) {
|
||||
fixturePath := filepath.Join("test-data", "smart", "nvme0.json")
|
||||
data, err := os.ReadFile(fixturePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: make(map[string]*smart.SmartData),
|
||||
}
|
||||
|
||||
hasData, exitStatus := sm.parseSmartForNvme(data)
|
||||
require.True(t, hasData)
|
||||
assert.Equal(t, 0, exitStatus)
|
||||
|
||||
deviceData, ok := sm.SmartDataMap["2024031600129"]
|
||||
require.True(t, ok, "expected smart data entry for serial 2024031600129")
|
||||
|
||||
assert.Equal(t, "PELADN 512GB", deviceData.ModelName)
|
||||
assert.Equal(t, "VC2S038E", deviceData.FirmwareVersion)
|
||||
assert.Equal(t, "/dev/nvme0", deviceData.DiskName)
|
||||
assert.Equal(t, "nvme", deviceData.DiskType)
|
||||
assert.Equal(t, uint8(61), deviceData.Temperature)
|
||||
assert.Equal(t, "PASSED", deviceData.SmartStatus)
|
||||
assert.Equal(t, uint64(512110190592), deviceData.Capacity)
|
||||
if assert.NotEmpty(t, deviceData.Attributes) {
|
||||
assertAttrValue(t, deviceData.Attributes, "PercentageUsed", 0)
|
||||
assertAttrValue(t, deviceData.Attributes, "DataUnitsWritten", 16040567)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasDataForDevice(t *testing.T) {
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: map[string]*smart.SmartData{
|
||||
"serial-1": {DiskName: "/dev/sda"},
|
||||
"serial-2": nil,
|
||||
},
|
||||
}
|
||||
|
||||
assert.True(t, sm.hasDataForDevice("/dev/sda"))
|
||||
assert.False(t, sm.hasDataForDevice("/dev/sdb"))
|
||||
}
|
||||
|
||||
func TestDevicesSnapshotReturnsCopy(t *testing.T) {
|
||||
originalDevice := &DeviceInfo{Name: "/dev/sda"}
|
||||
sm := &SmartManager{
|
||||
SmartDevices: []*DeviceInfo{
|
||||
originalDevice,
|
||||
{Name: "/dev/sdb"},
|
||||
},
|
||||
}
|
||||
|
||||
snapshot := sm.devicesSnapshot()
|
||||
require.Len(t, snapshot, 2)
|
||||
|
||||
sm.SmartDevices[0] = &DeviceInfo{Name: "/dev/sdz"}
|
||||
assert.Equal(t, "/dev/sda", snapshot[0].Name)
|
||||
|
||||
snapshot[1] = &DeviceInfo{Name: "/dev/nvme0"}
|
||||
assert.Equal(t, "/dev/sdb", sm.SmartDevices[1].Name)
|
||||
|
||||
sm.SmartDevices = append(sm.SmartDevices, &DeviceInfo{Name: "/dev/nvme1"})
|
||||
assert.Len(t, snapshot, 2)
|
||||
}
|
||||
|
||||
func TestScanDevicesWithEnvOverride(t *testing.T) {
|
||||
t.Setenv("SMART_DEVICES", "/dev/sda:sat, /dev/nvme0:nvme")
|
||||
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: make(map[string]*smart.SmartData),
|
||||
}
|
||||
|
||||
err := sm.ScanDevices(true)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, sm.SmartDevices, 2)
|
||||
assert.Equal(t, "/dev/sda", sm.SmartDevices[0].Name)
|
||||
assert.Equal(t, "sat", sm.SmartDevices[0].Type)
|
||||
assert.Equal(t, "/dev/nvme0", sm.SmartDevices[1].Name)
|
||||
assert.Equal(t, "nvme", sm.SmartDevices[1].Type)
|
||||
}
|
||||
|
||||
func TestScanDevicesWithEnvOverrideInvalid(t *testing.T) {
|
||||
t.Setenv("SMART_DEVICES", ":sat")
|
||||
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: make(map[string]*smart.SmartData),
|
||||
}
|
||||
|
||||
err := sm.ScanDevices(true)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestScanDevicesWithEnvOverrideEmpty(t *testing.T) {
|
||||
t.Setenv("SMART_DEVICES", " ")
|
||||
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: make(map[string]*smart.SmartData),
|
||||
}
|
||||
|
||||
err := sm.ScanDevices(true)
|
||||
assert.ErrorIs(t, err, errNoValidSmartData)
|
||||
assert.Empty(t, sm.SmartDevices)
|
||||
}
|
||||
|
||||
func TestSmartctlArgsWithoutType(t *testing.T) {
|
||||
device := &DeviceInfo{Name: "/dev/sda"}
|
||||
|
||||
sm := &SmartManager{}
|
||||
|
||||
args := sm.smartctlArgs(device, true)
|
||||
assert.Equal(t, []string{"-a", "--json=c", "-n", "standby", "/dev/sda"}, args)
|
||||
}
|
||||
|
||||
func TestSmartctlArgs(t *testing.T) {
|
||||
sm := &SmartManager{}
|
||||
|
||||
sataDevice := &DeviceInfo{Name: "/dev/sda", Type: "sat"}
|
||||
assert.Equal(t,
|
||||
[]string{"-d", "sat", "-a", "--json=c", "-n", "standby", "/dev/sda"},
|
||||
sm.smartctlArgs(sataDevice, true),
|
||||
)
|
||||
|
||||
assert.Equal(t,
|
||||
[]string{"-d", "sat", "-a", "--json=c", "/dev/sda"},
|
||||
sm.smartctlArgs(sataDevice, false),
|
||||
)
|
||||
|
||||
assert.Equal(t,
|
||||
[]string{"-a", "--json=c", "-n", "standby"},
|
||||
sm.smartctlArgs(nil, true),
|
||||
)
|
||||
}
|
||||
|
||||
func TestResolveRefreshError(t *testing.T) {
|
||||
scanErr := errors.New("scan failed")
|
||||
collectErr := errors.New("collect failed")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
devices []*DeviceInfo
|
||||
data map[string]*smart.SmartData
|
||||
scanErr error
|
||||
collectErr error
|
||||
expectedErr error
|
||||
expectNoErr bool
|
||||
}{
|
||||
{
|
||||
name: "no devices returns scan error",
|
||||
devices: nil,
|
||||
data: make(map[string]*smart.SmartData),
|
||||
scanErr: scanErr,
|
||||
expectedErr: scanErr,
|
||||
},
|
||||
{
|
||||
name: "has data ignores errors",
|
||||
devices: []*DeviceInfo{{Name: "/dev/sda"}},
|
||||
data: map[string]*smart.SmartData{"serial": {}},
|
||||
scanErr: scanErr,
|
||||
collectErr: collectErr,
|
||||
expectNoErr: true,
|
||||
},
|
||||
{
|
||||
name: "collect error preferred",
|
||||
devices: []*DeviceInfo{{Name: "/dev/sda"}},
|
||||
data: make(map[string]*smart.SmartData),
|
||||
collectErr: collectErr,
|
||||
expectedErr: collectErr,
|
||||
},
|
||||
{
|
||||
name: "scan error returned when no data",
|
||||
devices: []*DeviceInfo{{Name: "/dev/sda"}},
|
||||
data: make(map[string]*smart.SmartData),
|
||||
scanErr: scanErr,
|
||||
expectedErr: scanErr,
|
||||
},
|
||||
{
|
||||
name: "no errors returns sentinel",
|
||||
devices: []*DeviceInfo{{Name: "/dev/sda"}},
|
||||
data: make(map[string]*smart.SmartData),
|
||||
expectedErr: errNoValidSmartData,
|
||||
},
|
||||
{
|
||||
name: "no devices collect error",
|
||||
devices: nil,
|
||||
data: make(map[string]*smart.SmartData),
|
||||
collectErr: collectErr,
|
||||
expectedErr: collectErr,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
sm := &SmartManager{
|
||||
SmartDevices: tt.devices,
|
||||
SmartDataMap: tt.data,
|
||||
}
|
||||
|
||||
err := sm.resolveRefreshError(tt.scanErr, tt.collectErr)
|
||||
if tt.expectNoErr {
|
||||
assert.NoError(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
if tt.expectedErr == nil {
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
assert.Equal(t, tt.expectedErr, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseScan(t *testing.T) {
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: map[string]*smart.SmartData{
|
||||
"serial-active": {DiskName: "/dev/sda"},
|
||||
"serial-stale": {DiskName: "/dev/sdb"},
|
||||
},
|
||||
}
|
||||
|
||||
scanJSON := []byte(`{
|
||||
"devices": [
|
||||
{"name": "/dev/sda", "type": "sat", "info_name": "/dev/sda [SAT]", "protocol": "ATA"},
|
||||
{"name": "/dev/nvme0", "type": "nvme", "info_name": "/dev/nvme0", "protocol": "NVMe"}
|
||||
]
|
||||
}`)
|
||||
|
||||
devices, hasData := sm.parseScan(scanJSON)
|
||||
assert.True(t, hasData)
|
||||
|
||||
sm.updateSmartDevices(devices)
|
||||
|
||||
require.Len(t, sm.SmartDevices, 2)
|
||||
assert.Equal(t, "/dev/sda", sm.SmartDevices[0].Name)
|
||||
assert.Equal(t, "sat", sm.SmartDevices[0].Type)
|
||||
assert.Equal(t, "/dev/nvme0", sm.SmartDevices[1].Name)
|
||||
assert.Equal(t, "nvme", sm.SmartDevices[1].Type)
|
||||
|
||||
_, activeExists := sm.SmartDataMap["serial-active"]
|
||||
assert.True(t, activeExists, "active smart data should be preserved when device path remains")
|
||||
|
||||
_, staleExists := sm.SmartDataMap["serial-stale"]
|
||||
assert.False(t, staleExists, "stale smart data entry should be removed when device path disappears")
|
||||
}
|
||||
|
||||
func TestMergeDeviceListsPrefersConfigured(t *testing.T) {
|
||||
scanned := []*DeviceInfo{
|
||||
{Name: "/dev/sda", Type: "sat", InfoName: "scan-info", Protocol: "ATA"},
|
||||
{Name: "/dev/nvme0", Type: "nvme"},
|
||||
}
|
||||
|
||||
configured := []*DeviceInfo{
|
||||
{Name: "/dev/sda", Type: "sat-override"},
|
||||
{Name: "/dev/sdb", Type: "sat"},
|
||||
}
|
||||
|
||||
merged := mergeDeviceLists(nil, scanned, configured)
|
||||
require.Len(t, merged, 3)
|
||||
|
||||
byName := make(map[string]*DeviceInfo, len(merged))
|
||||
for _, dev := range merged {
|
||||
byName[dev.Name] = dev
|
||||
}
|
||||
|
||||
require.Contains(t, byName, "/dev/sda")
|
||||
assert.Equal(t, "sat-override", byName["/dev/sda"].Type, "configured type should override scanned type")
|
||||
assert.Equal(t, "scan-info", byName["/dev/sda"].InfoName, "scan metadata should be preserved when config does not provide it")
|
||||
|
||||
require.Contains(t, byName, "/dev/nvme0")
|
||||
assert.Equal(t, "nvme", byName["/dev/nvme0"].Type)
|
||||
|
||||
require.Contains(t, byName, "/dev/sdb")
|
||||
assert.Equal(t, "sat", byName["/dev/sdb"].Type)
|
||||
}
|
||||
|
||||
func TestMergeDeviceListsPreservesVerification(t *testing.T) {
|
||||
existing := []*DeviceInfo{
|
||||
{Name: "/dev/sda", Type: "sat+megaraid", parserType: "sat", typeVerified: true},
|
||||
}
|
||||
|
||||
scanned := []*DeviceInfo{
|
||||
{Name: "/dev/sda", Type: "nvme"},
|
||||
}
|
||||
|
||||
merged := mergeDeviceLists(existing, scanned, nil)
|
||||
require.Len(t, merged, 1)
|
||||
|
||||
device := merged[0]
|
||||
assert.True(t, device.typeVerified)
|
||||
assert.Equal(t, "sat", device.parserType)
|
||||
assert.Equal(t, "sat+megaraid", device.Type)
|
||||
}
|
||||
|
||||
func TestMergeDeviceListsUpdatesTypeWhenUnverified(t *testing.T) {
|
||||
existing := []*DeviceInfo{
|
||||
{Name: "/dev/sda", Type: "sat", parserType: "sat", typeVerified: false},
|
||||
}
|
||||
|
||||
scanned := []*DeviceInfo{
|
||||
{Name: "/dev/sda", Type: "nvme"},
|
||||
}
|
||||
|
||||
merged := mergeDeviceLists(existing, scanned, nil)
|
||||
require.Len(t, merged, 1)
|
||||
|
||||
device := merged[0]
|
||||
assert.False(t, device.typeVerified)
|
||||
assert.Equal(t, "nvme", device.Type)
|
||||
assert.Equal(t, "", device.parserType)
|
||||
}
|
||||
|
||||
func TestParseSmartOutputMarksVerified(t *testing.T) {
|
||||
fixturePath := filepath.Join("test-data", "smart", "nvme0.json")
|
||||
data, err := os.ReadFile(fixturePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
|
||||
device := &DeviceInfo{Name: "/dev/nvme0"}
|
||||
|
||||
require.True(t, sm.parseSmartOutput(device, data))
|
||||
assert.Equal(t, "nvme", device.Type)
|
||||
assert.Equal(t, "nvme", device.parserType)
|
||||
assert.True(t, device.typeVerified)
|
||||
}
|
||||
|
||||
func TestParseSmartOutputKeepsCustomType(t *testing.T) {
|
||||
fixturePath := filepath.Join("test-data", "smart", "sda.json")
|
||||
data, err := os.ReadFile(fixturePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
|
||||
device := &DeviceInfo{Name: "/dev/sda", Type: "sat+megaraid"}
|
||||
|
||||
require.True(t, sm.parseSmartOutput(device, data))
|
||||
assert.Equal(t, "sat+megaraid", device.Type)
|
||||
assert.Equal(t, "sat", device.parserType)
|
||||
assert.True(t, device.typeVerified)
|
||||
}
|
||||
|
||||
func TestParseSmartOutputResetsVerificationOnFailure(t *testing.T) {
|
||||
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
|
||||
device := &DeviceInfo{Name: "/dev/sda", Type: "sat", parserType: "sat", typeVerified: true}
|
||||
|
||||
assert.False(t, sm.parseSmartOutput(device, []byte("not json")))
|
||||
assert.False(t, device.typeVerified)
|
||||
assert.Equal(t, "sat", device.parserType)
|
||||
}
|
||||
|
||||
func assertAttrValue(t *testing.T, attributes []*smart.SmartAttribute, name string, expected uint64) {
|
||||
t.Helper()
|
||||
attr := findAttr(attributes, name)
|
||||
if attr == nil {
|
||||
t.Fatalf("expected attribute %s to be present", name)
|
||||
}
|
||||
if attr.RawValue != expected {
|
||||
t.Fatalf("unexpected attribute %s value: got %d, want %d", name, attr.RawValue, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func findAttr(attributes []*smart.SmartAttribute, name string) *smart.SmartAttribute {
|
||||
for _, attr := range attributes {
|
||||
if attr != nil && attr.Name == name {
|
||||
return attr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestIsVirtualDevice(t *testing.T) {
|
||||
sm := &SmartManager{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
vendor string
|
||||
product string
|
||||
model string
|
||||
expected bool
|
||||
}{
|
||||
{"regular drive", "SEAGATE", "ST1000DM003", "ST1000DM003-1CH162", false},
|
||||
{"qemu virtual", "QEMU", "QEMU HARDDISK", "QEMU HARDDISK", true},
|
||||
{"virtualbox virtual", "VBOX", "HARDDISK", "VBOX HARDDISK", true},
|
||||
{"vmware virtual", "VMWARE", "Virtual disk", "VMWARE Virtual disk", true},
|
||||
{"virtual in model", "ATA", "VIRTUAL", "VIRTUAL DISK", true},
|
||||
{"iet virtual", "IET", "VIRTUAL-DISK", "VIRTUAL-DISK", true},
|
||||
{"hyper-v virtual", "MSFT", "VIRTUAL HD", "VIRTUAL HD", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
data := &smart.SmartInfoForSata{
|
||||
ScsiVendor: tt.vendor,
|
||||
ScsiProduct: tt.product,
|
||||
ModelName: tt.model,
|
||||
}
|
||||
result := sm.isVirtualDevice(data)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsVirtualDeviceNvme(t *testing.T) {
|
||||
sm := &SmartManager{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
model string
|
||||
expected bool
|
||||
}{
|
||||
{"regular nvme", "Samsung SSD 970 EVO Plus 1TB", false},
|
||||
{"qemu virtual", "QEMU NVMe Ctrl", true},
|
||||
{"virtualbox virtual", "VBOX NVMe", true},
|
||||
{"vmware virtual", "VMWARE NVMe", true},
|
||||
{"virtual in model", "Virtual NVMe Device", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
data := &smart.SmartInfoForNvme{
|
||||
ModelName: tt.model,
|
||||
}
|
||||
result := sm.isVirtualDeviceNvme(data)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsVirtualDeviceScsi(t *testing.T) {
|
||||
sm := &SmartManager{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
vendor string
|
||||
product string
|
||||
model string
|
||||
expected bool
|
||||
}{
|
||||
{"regular scsi", "SEAGATE", "ST1000DM003", "ST1000DM003-1CH162", false},
|
||||
{"qemu virtual", "QEMU", "QEMU HARDDISK", "QEMU HARDDISK", true},
|
||||
{"virtualbox virtual", "VBOX", "HARDDISK", "VBOX HARDDISK", true},
|
||||
{"vmware virtual", "VMWARE", "Virtual disk", "VMWARE Virtual disk", true},
|
||||
{"virtual in model", "ATA", "VIRTUAL", "VIRTUAL DISK", true},
|
||||
{"iet virtual", "IET", "VIRTUAL-DISK", "VIRTUAL-DISK", true},
|
||||
{"hyper-v virtual", "MSFT", "VIRTUAL HD", "VIRTUAL HD", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
data := &smart.SmartInfoForScsi{
|
||||
ScsiVendor: tt.vendor,
|
||||
ScsiProduct: tt.product,
|
||||
ScsiModelName: tt.model,
|
||||
}
|
||||
result := sm.isVirtualDeviceScsi(data)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
40
agent/smart_windows.go
Normal file
40
agent/smart_windows.go
Normal file
@@ -0,0 +1,40 @@
|
||||
//go:build windows
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
//go:embed smartmontools/smartctl.exe
|
||||
var embeddedSmartctl []byte
|
||||
|
||||
var (
|
||||
smartctlOnce sync.Once
|
||||
smartctlPath string
|
||||
smartctlErr error
|
||||
)
|
||||
|
||||
func ensureEmbeddedSmartctl() (string, error) {
|
||||
smartctlOnce.Do(func() {
|
||||
destDir := filepath.Join(os.TempDir(), "beszel", "smartmontools")
|
||||
if err := os.MkdirAll(destDir, 0o755); err != nil {
|
||||
smartctlErr = fmt.Errorf("failed to create smartctl directory: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
destPath := filepath.Join(destDir, "smartctl.exe")
|
||||
if err := os.WriteFile(destPath, embeddedSmartctl, 0o755); err != nil {
|
||||
smartctlErr = fmt.Errorf("failed to write embedded smartctl: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
smartctlPath = destPath
|
||||
})
|
||||
|
||||
return smartctlPath, smartctlErr
|
||||
}
|
||||
241
agent/system.go
Normal file
241
agent/system.go
Normal file
@@ -0,0 +1,241 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel"
|
||||
"github.com/henrygd/beszel/agent/battery"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
|
||||
"github.com/shirou/gopsutil/v4/cpu"
|
||||
"github.com/shirou/gopsutil/v4/host"
|
||||
"github.com/shirou/gopsutil/v4/load"
|
||||
"github.com/shirou/gopsutil/v4/mem"
|
||||
)
|
||||
|
||||
// prevDisk stores previous per-device disk counters for a given cache interval
|
||||
type prevDisk struct {
|
||||
readBytes uint64
|
||||
writeBytes uint64
|
||||
at time.Time
|
||||
}
|
||||
|
||||
// Sets initial / non-changing values about the host system
|
||||
func (a *Agent) initializeSystemInfo() {
|
||||
a.systemInfo.AgentVersion = beszel.Version
|
||||
a.systemInfo.Hostname, _ = os.Hostname()
|
||||
|
||||
platform, _, version, _ := host.PlatformInformation()
|
||||
|
||||
if platform == "darwin" {
|
||||
a.systemInfo.KernelVersion = version
|
||||
a.systemInfo.Os = system.Darwin
|
||||
} else if strings.Contains(platform, "indows") {
|
||||
a.systemInfo.KernelVersion = fmt.Sprintf("%s %s", strings.Replace(platform, "Microsoft ", "", 1), version)
|
||||
a.systemInfo.Os = system.Windows
|
||||
} else if platform == "freebsd" {
|
||||
a.systemInfo.Os = system.Freebsd
|
||||
a.systemInfo.KernelVersion = version
|
||||
} else {
|
||||
a.systemInfo.Os = system.Linux
|
||||
}
|
||||
|
||||
if a.systemInfo.KernelVersion == "" {
|
||||
a.systemInfo.KernelVersion, _ = host.KernelVersion()
|
||||
}
|
||||
|
||||
// cpu model
|
||||
if info, err := cpu.Info(); err == nil && len(info) > 0 {
|
||||
a.systemInfo.CpuModel = info[0].ModelName
|
||||
}
|
||||
// cores / threads
|
||||
a.systemInfo.Cores, _ = cpu.Counts(false)
|
||||
if threads, err := cpu.Counts(true); err == nil {
|
||||
if threads > 0 && threads < a.systemInfo.Cores {
|
||||
// in lxc logical cores reflects container limits, so use that as cores if lower
|
||||
a.systemInfo.Cores = threads
|
||||
} else {
|
||||
a.systemInfo.Threads = threads
|
||||
}
|
||||
}
|
||||
|
||||
// zfs
|
||||
if _, err := getARCSize(); err != nil {
|
||||
slog.Debug("Not monitoring ZFS ARC", "err", err)
|
||||
} else {
|
||||
a.zfs = true
|
||||
}
|
||||
}
|
||||
|
||||
// Returns current info, stats about the host system
|
||||
func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
|
||||
var systemStats system.Stats
|
||||
|
||||
// battery
|
||||
if batteryPercent, batteryState, err := battery.GetBatteryStats(); err == nil {
|
||||
systemStats.Battery[0] = batteryPercent
|
||||
systemStats.Battery[1] = batteryState
|
||||
}
|
||||
|
||||
// cpu metrics
|
||||
cpuMetrics, err := getCpuMetrics(cacheTimeMs)
|
||||
if err == nil {
|
||||
systemStats.Cpu = twoDecimals(cpuMetrics.Total)
|
||||
systemStats.CpuBreakdown = []float64{
|
||||
twoDecimals(cpuMetrics.User),
|
||||
twoDecimals(cpuMetrics.System),
|
||||
twoDecimals(cpuMetrics.Iowait),
|
||||
twoDecimals(cpuMetrics.Steal),
|
||||
twoDecimals(cpuMetrics.Idle),
|
||||
}
|
||||
} else {
|
||||
slog.Error("Error getting cpu metrics", "err", err)
|
||||
}
|
||||
|
||||
// per-core cpu usage
|
||||
if perCoreUsage, err := getPerCoreCpuUsage(cacheTimeMs); err == nil {
|
||||
systemStats.CpuCoresUsage = perCoreUsage
|
||||
}
|
||||
|
||||
// load average
|
||||
if avgstat, err := load.Avg(); err == nil {
|
||||
systemStats.LoadAvg[0] = avgstat.Load1
|
||||
systemStats.LoadAvg[1] = avgstat.Load5
|
||||
systemStats.LoadAvg[2] = avgstat.Load15
|
||||
slog.Debug("Load average", "5m", avgstat.Load5, "15m", avgstat.Load15)
|
||||
} else {
|
||||
slog.Error("Error getting load average", "err", err)
|
||||
}
|
||||
|
||||
// memory
|
||||
if v, err := mem.VirtualMemory(); err == nil {
|
||||
// swap
|
||||
systemStats.Swap = bytesToGigabytes(v.SwapTotal)
|
||||
systemStats.SwapUsed = bytesToGigabytes(v.SwapTotal - v.SwapFree - v.SwapCached)
|
||||
// cache + buffers value for default mem calculation
|
||||
// note: gopsutil automatically adds SReclaimable to v.Cached
|
||||
cacheBuff := v.Cached + v.Buffers - v.Shared
|
||||
if cacheBuff <= 0 {
|
||||
cacheBuff = max(v.Total-v.Free-v.Used, 0)
|
||||
}
|
||||
// htop memory calculation overrides (likely outdated as of mid 2025)
|
||||
if a.memCalc == "htop" {
|
||||
// cacheBuff = v.Cached + v.Buffers - v.Shared
|
||||
v.Used = v.Total - (v.Free + cacheBuff)
|
||||
v.UsedPercent = float64(v.Used) / float64(v.Total) * 100.0
|
||||
}
|
||||
// if a.memCalc == "legacy" {
|
||||
// v.Used = v.Total - v.Free - v.Buffers - v.Cached
|
||||
// cacheBuff = v.Total - v.Free - v.Used
|
||||
// v.UsedPercent = float64(v.Used) / float64(v.Total) * 100.0
|
||||
// }
|
||||
// subtract ZFS ARC size from used memory and add as its own category
|
||||
if a.zfs {
|
||||
if arcSize, _ := getARCSize(); arcSize > 0 && arcSize < v.Used {
|
||||
v.Used = v.Used - arcSize
|
||||
v.UsedPercent = float64(v.Used) / float64(v.Total) * 100.0
|
||||
systemStats.MemZfsArc = bytesToGigabytes(arcSize)
|
||||
}
|
||||
}
|
||||
systemStats.Mem = bytesToGigabytes(v.Total)
|
||||
systemStats.MemBuffCache = bytesToGigabytes(cacheBuff)
|
||||
systemStats.MemUsed = bytesToGigabytes(v.Used)
|
||||
systemStats.MemPct = twoDecimals(v.UsedPercent)
|
||||
}
|
||||
|
||||
// disk usage
|
||||
a.updateDiskUsage(&systemStats)
|
||||
|
||||
// disk i/o (cache-aware per interval)
|
||||
a.updateDiskIo(cacheTimeMs, &systemStats)
|
||||
|
||||
// network stats (per cache interval)
|
||||
a.updateNetworkStats(cacheTimeMs, &systemStats)
|
||||
|
||||
// temperatures
|
||||
// TODO: maybe refactor to methods on systemStats
|
||||
a.updateTemperatures(&systemStats)
|
||||
|
||||
// GPU data
|
||||
if a.gpuManager != nil {
|
||||
// reset high gpu percent
|
||||
a.systemInfo.GpuPct = 0
|
||||
// get current GPU data
|
||||
if gpuData := a.gpuManager.GetCurrentData(cacheTimeMs); len(gpuData) > 0 {
|
||||
systemStats.GPUData = gpuData
|
||||
|
||||
// add temperatures
|
||||
if systemStats.Temperatures == nil {
|
||||
systemStats.Temperatures = make(map[string]float64, len(gpuData))
|
||||
}
|
||||
highestTemp := 0.0
|
||||
for _, gpu := range gpuData {
|
||||
if gpu.Temperature > 0 {
|
||||
systemStats.Temperatures[gpu.Name] = gpu.Temperature
|
||||
if a.sensorConfig.primarySensor == gpu.Name {
|
||||
a.systemInfo.DashboardTemp = gpu.Temperature
|
||||
}
|
||||
if gpu.Temperature > highestTemp {
|
||||
highestTemp = gpu.Temperature
|
||||
}
|
||||
}
|
||||
// update high gpu percent for dashboard
|
||||
a.systemInfo.GpuPct = max(a.systemInfo.GpuPct, gpu.Usage)
|
||||
}
|
||||
// use highest temp for dashboard temp if dashboard temp is unset
|
||||
if a.systemInfo.DashboardTemp == 0 {
|
||||
a.systemInfo.DashboardTemp = highestTemp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// update base system info
|
||||
a.systemInfo.ConnectionType = a.connectionManager.ConnectionType
|
||||
a.systemInfo.Cpu = systemStats.Cpu
|
||||
a.systemInfo.LoadAvg = systemStats.LoadAvg
|
||||
// TODO: remove these in future release in favor of load avg array
|
||||
a.systemInfo.LoadAvg1 = systemStats.LoadAvg[0]
|
||||
a.systemInfo.LoadAvg5 = systemStats.LoadAvg[1]
|
||||
a.systemInfo.LoadAvg15 = systemStats.LoadAvg[2]
|
||||
a.systemInfo.MemPct = systemStats.MemPct
|
||||
a.systemInfo.DiskPct = systemStats.DiskPct
|
||||
a.systemInfo.Uptime, _ = host.Uptime()
|
||||
// TODO: in future release, remove MB bandwidth values in favor of bytes
|
||||
a.systemInfo.Bandwidth = twoDecimals(systemStats.NetworkSent + systemStats.NetworkRecv)
|
||||
a.systemInfo.BandwidthBytes = systemStats.Bandwidth[0] + systemStats.Bandwidth[1]
|
||||
slog.Debug("sysinfo", "data", a.systemInfo)
|
||||
|
||||
return systemStats
|
||||
}
|
||||
|
||||
// Returns the size of the ZFS ARC memory cache in bytes
|
||||
func getARCSize() (uint64, error) {
|
||||
file, err := os.Open("/proc/spl/kstat/zfs/arcstats")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Scan the lines
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.HasPrefix(line, "size") {
|
||||
// Example line: size 4 15032385536
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < 3 {
|
||||
return 0, err
|
||||
}
|
||||
// Return the size as uint64
|
||||
return strconv.ParseUint(fields[2], 10, 64)
|
||||
}
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("failed to parse size field")
|
||||
}
|
||||
229
agent/systemd.go
Normal file
229
agent/systemd.go
Normal file
@@ -0,0 +1,229 @@
|
||||
//go:build linux
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"maps"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-systemd/v22/dbus"
|
||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||
)
|
||||
|
||||
var (
|
||||
errNoActiveTime = errors.New("no active time")
|
||||
)
|
||||
|
||||
// systemdManager manages the collection of systemd service statistics.
|
||||
type systemdManager struct {
|
||||
sync.Mutex
|
||||
serviceStatsMap map[string]*systemd.Service
|
||||
isRunning bool
|
||||
hasFreshStats bool
|
||||
}
|
||||
|
||||
// newSystemdManager creates a new systemdManager.
|
||||
func newSystemdManager() (*systemdManager, error) {
|
||||
conn, err := dbus.NewSystemConnectionContext(context.Background())
|
||||
if err != nil {
|
||||
slog.Warn("Error connecting to systemd", "err", err, "ref", "https://beszel.dev/guide/systemd")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
manager := &systemdManager{
|
||||
serviceStatsMap: make(map[string]*systemd.Service),
|
||||
}
|
||||
|
||||
manager.startWorker(conn)
|
||||
|
||||
return manager, nil
|
||||
}
|
||||
|
||||
func (sm *systemdManager) startWorker(conn *dbus.Conn) {
|
||||
if sm.isRunning {
|
||||
return
|
||||
}
|
||||
sm.isRunning = true
|
||||
// prime the service stats map with the current services
|
||||
_ = sm.getServiceStats(conn, true)
|
||||
// update the services every 10 minutes
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(time.Minute * 10)
|
||||
_ = sm.getServiceStats(nil, true)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// getServiceStats collects statistics for all running systemd services.
|
||||
func (sm *systemdManager) getServiceStats(conn *dbus.Conn, refresh bool) []*systemd.Service {
|
||||
// start := time.Now()
|
||||
// defer func() {
|
||||
// slog.Info("systemdManager.getServiceStats", "duration", time.Since(start))
|
||||
// }()
|
||||
|
||||
var services []*systemd.Service
|
||||
var err error
|
||||
|
||||
if !refresh {
|
||||
// return nil
|
||||
sm.Lock()
|
||||
defer sm.Unlock()
|
||||
for _, service := range sm.serviceStatsMap {
|
||||
services = append(services, service)
|
||||
}
|
||||
sm.hasFreshStats = false
|
||||
return services
|
||||
}
|
||||
|
||||
if conn == nil || !conn.Connected() {
|
||||
conn, err = dbus.NewSystemConnectionContext(context.Background())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer conn.Close()
|
||||
}
|
||||
|
||||
units, err := conn.ListUnitsByPatternsContext(context.Background(), []string{"loaded"}, []string{"*.service"})
|
||||
if err != nil {
|
||||
slog.Error("Error listing systemd service units", "err", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, unit := range units {
|
||||
service, err := sm.updateServiceStats(conn, unit)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
services = append(services, service)
|
||||
}
|
||||
sm.hasFreshStats = true
|
||||
return services
|
||||
}
|
||||
|
||||
// updateServiceStats updates the statistics for a single systemd service.
|
||||
func (sm *systemdManager) updateServiceStats(conn *dbus.Conn, unit dbus.UnitStatus) (*systemd.Service, error) {
|
||||
sm.Lock()
|
||||
defer sm.Unlock()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// if service has never been active (no active since time), skip it
|
||||
if activeEnterTsProp, err := conn.GetUnitTypePropertyContext(ctx, unit.Name, "Unit", "ActiveEnterTimestamp"); err == nil {
|
||||
if ts, ok := activeEnterTsProp.Value.Value().(uint64); !ok || ts == 0 || ts == math.MaxUint64 {
|
||||
return nil, errNoActiveTime
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
service, serviceExists := sm.serviceStatsMap[unit.Name]
|
||||
if !serviceExists {
|
||||
service = &systemd.Service{Name: unescapeServiceName(strings.TrimSuffix(unit.Name, ".service"))}
|
||||
sm.serviceStatsMap[unit.Name] = service
|
||||
}
|
||||
|
||||
memPeak := service.MemPeak
|
||||
if memPeakProp, err := conn.GetUnitTypePropertyContext(ctx, unit.Name, "Service", "MemoryPeak"); err == nil {
|
||||
// If memPeak is MaxUint64 the api is saying it's not available
|
||||
if v, ok := memPeakProp.Value.Value().(uint64); ok && v != math.MaxUint64 {
|
||||
memPeak = v
|
||||
}
|
||||
}
|
||||
|
||||
var memUsage uint64
|
||||
if memProp, err := conn.GetUnitTypePropertyContext(ctx, unit.Name, "Service", "MemoryCurrent"); err == nil {
|
||||
// If memUsage is MaxUint64 the api is saying it's not available
|
||||
if v, ok := memProp.Value.Value().(uint64); ok && v != math.MaxUint64 {
|
||||
memUsage = v
|
||||
}
|
||||
}
|
||||
|
||||
service.State = systemd.ParseServiceStatus(unit.ActiveState)
|
||||
service.Sub = systemd.ParseServiceSubState(unit.SubState)
|
||||
|
||||
// some systems always return 0 for mem peak, so we should update the peak if the current usage is greater
|
||||
if memUsage > memPeak {
|
||||
memPeak = memUsage
|
||||
}
|
||||
|
||||
var cpuUsage uint64
|
||||
if cpuProp, err := conn.GetUnitTypePropertyContext(ctx, unit.Name, "Service", "CPUUsageNSec"); err == nil {
|
||||
if v, ok := cpuProp.Value.Value().(uint64); ok {
|
||||
cpuUsage = v
|
||||
}
|
||||
}
|
||||
|
||||
service.Mem = memUsage
|
||||
if memPeak > service.MemPeak {
|
||||
service.MemPeak = memPeak
|
||||
}
|
||||
service.UpdateCPUPercent(cpuUsage)
|
||||
|
||||
return service, nil
|
||||
}
|
||||
|
||||
// getServiceDetails collects extended information for a specific systemd service.
|
||||
func (sm *systemdManager) getServiceDetails(serviceName string) (systemd.ServiceDetails, error) {
|
||||
conn, err := dbus.NewSystemConnectionContext(context.Background())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
unitName := serviceName
|
||||
if !strings.HasSuffix(unitName, ".service") {
|
||||
unitName += ".service"
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
props, err := conn.GetUnitPropertiesContext(ctx, unitName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Start with all unit properties
|
||||
details := make(systemd.ServiceDetails)
|
||||
maps.Copy(details, props)
|
||||
|
||||
// // Add service-specific properties
|
||||
servicePropNames := []string{
|
||||
"MainPID", "ExecMainPID", "TasksCurrent", "TasksMax",
|
||||
"MemoryCurrent", "MemoryPeak", "MemoryLimit", "CPUUsageNSec",
|
||||
"NRestarts", "ExecMainStartTimestampRealtime", "Result",
|
||||
}
|
||||
|
||||
for _, propName := range servicePropNames {
|
||||
if variant, err := conn.GetUnitTypePropertyContext(ctx, unitName, "Service", propName); err == nil {
|
||||
value := variant.Value.Value()
|
||||
// Check if the value is MaxUint64, which indicates unlimited/infinite
|
||||
if uint64Value, ok := value.(uint64); ok && uint64Value == math.MaxUint64 {
|
||||
// Set to nil to indicate unlimited - frontend will handle this appropriately
|
||||
details[propName] = nil
|
||||
} else {
|
||||
details[propName] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return details, nil
|
||||
}
|
||||
|
||||
// unescapeServiceName unescapes systemd service names that contain C-style escape sequences like \x2d
|
||||
func unescapeServiceName(name string) string {
|
||||
if !strings.Contains(name, "\\x") {
|
||||
return name
|
||||
}
|
||||
unescaped, err := strconv.Unquote("\"" + name + "\"")
|
||||
if err != nil {
|
||||
return name
|
||||
}
|
||||
return unescaped
|
||||
}
|
||||
28
agent/systemd_nonlinux.go
Normal file
28
agent/systemd_nonlinux.go
Normal file
@@ -0,0 +1,28 @@
|
||||
//go:build !linux
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||
)
|
||||
|
||||
// systemdManager manages the collection of systemd service statistics.
|
||||
type systemdManager struct {
|
||||
hasFreshStats bool
|
||||
}
|
||||
|
||||
// newSystemdManager creates a new systemdManager.
|
||||
func newSystemdManager() (*systemdManager, error) {
|
||||
return &systemdManager{}, nil
|
||||
}
|
||||
|
||||
// getServiceStats returns nil for non-linux systems.
|
||||
func (sm *systemdManager) getServiceStats(conn any, refresh bool) []*systemd.Service {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sm *systemdManager) getServiceDetails(string) (systemd.ServiceDetails, error) {
|
||||
return nil, errors.New("systemd manager unavailable")
|
||||
}
|
||||
53
agent/systemd_nonlinux_test.go
Normal file
53
agent/systemd_nonlinux_test.go
Normal file
@@ -0,0 +1,53 @@
|
||||
//go:build !linux && testing
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewSystemdManager(t *testing.T) {
|
||||
manager, err := newSystemdManager()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, manager)
|
||||
}
|
||||
|
||||
func TestSystemdManagerGetServiceStats(t *testing.T) {
|
||||
manager, err := newSystemdManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test with refresh = true
|
||||
result := manager.getServiceStats(true)
|
||||
assert.Nil(t, result)
|
||||
|
||||
// Test with refresh = false
|
||||
result = manager.getServiceStats(false)
|
||||
assert.Nil(t, result)
|
||||
}
|
||||
|
||||
func TestSystemdManagerGetServiceDetails(t *testing.T) {
|
||||
manager, err := newSystemdManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
result, err := manager.getServiceDetails("any-service")
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "systemd manager unavailable", err.Error())
|
||||
assert.Nil(t, result)
|
||||
|
||||
// Test with empty service name
|
||||
result, err = manager.getServiceDetails("")
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "systemd manager unavailable", err.Error())
|
||||
assert.Nil(t, result)
|
||||
}
|
||||
|
||||
func TestSystemdManagerFields(t *testing.T) {
|
||||
manager, err := newSystemdManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// The non-linux manager should be a simple struct with no special fields
|
||||
// We can't test private fields directly, but we can test the methods work
|
||||
assert.NotNil(t, manager)
|
||||
}
|
||||
48
agent/systemd_test.go
Normal file
48
agent/systemd_test.go
Normal file
@@ -0,0 +1,48 @@
|
||||
//go:build linux && testing
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestUnescapeServiceName(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{"nginx.service", "nginx.service"}, // No escaping needed
|
||||
{"test\\x2dwith\\x2ddashes.service", "test-with-dashes.service"}, // \x2d is dash
|
||||
{"service\\x20with\\x20spaces.service", "service with spaces.service"}, // \x20 is space
|
||||
{"mixed\\x2dand\\x2dnormal", "mixed-and-normal"}, // Mixed escaped and normal
|
||||
{"no-escape-here", "no-escape-here"}, // No escape sequences
|
||||
{"", ""}, // Empty string
|
||||
{"\\x2d\\x2d", "--"}, // Multiple escapes
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.input, func(t *testing.T) {
|
||||
result := unescapeServiceName(test.input)
|
||||
assert.Equal(t, test.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnescapeServiceNameInvalid(t *testing.T) {
|
||||
// Test invalid escape sequences - should return original string
|
||||
invalidInputs := []string{
|
||||
"invalid\\x", // Incomplete escape
|
||||
"invalid\\xZZ", // Invalid hex
|
||||
"invalid\\x2", // Incomplete hex
|
||||
"invalid\\xyz", // Not a valid escape
|
||||
}
|
||||
|
||||
for _, input := range invalidInputs {
|
||||
t.Run(input, func(t *testing.T) {
|
||||
result := unescapeServiceName(input)
|
||||
assert.Equal(t, input, result, "Invalid escape sequences should return original string")
|
||||
})
|
||||
}
|
||||
}
|
||||
24
agent/test-data/container.json
Normal file
24
agent/test-data/container.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"cpu_stats": {
|
||||
"cpu_usage": {
|
||||
"total_usage": 312055276000
|
||||
},
|
||||
"system_cpu_usage": 1366399830000000
|
||||
},
|
||||
"memory_stats": {
|
||||
"usage": 507400192,
|
||||
"stats": {
|
||||
"inactive_file": 165130240
|
||||
}
|
||||
},
|
||||
"networks": {
|
||||
"eth0": {
|
||||
"tx_bytes": 20376558,
|
||||
"rx_bytes": 537029455
|
||||
},
|
||||
"eth1": {
|
||||
"tx_bytes": 2003766,
|
||||
"rx_bytes": 6241
|
||||
}
|
||||
}
|
||||
}
|
||||
24
agent/test-data/container2.json
Normal file
24
agent/test-data/container2.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"cpu_stats": {
|
||||
"cpu_usage": {
|
||||
"total_usage": 314891801000
|
||||
},
|
||||
"system_cpu_usage": 1368474900000000
|
||||
},
|
||||
"memory_stats": {
|
||||
"usage": 507400192,
|
||||
"stats": {
|
||||
"inactive_file": 165130240
|
||||
}
|
||||
},
|
||||
"networks": {
|
||||
"eth0": {
|
||||
"tx_bytes": 20376558,
|
||||
"rx_bytes": 537029455
|
||||
},
|
||||
"eth1": {
|
||||
"tx_bytes": 2003766,
|
||||
"rx_bytes": 6241
|
||||
}
|
||||
}
|
||||
}
|
||||
272
agent/test-data/smart/nvme0.json
Normal file
272
agent/test-data/smart/nvme0.json
Normal file
@@ -0,0 +1,272 @@
|
||||
{
|
||||
"json_format_version": [
|
||||
1,
|
||||
0
|
||||
],
|
||||
"smartctl": {
|
||||
"version": [
|
||||
7,
|
||||
5
|
||||
],
|
||||
"pre_release": false,
|
||||
"svn_revision": "5714",
|
||||
"platform_info": "x86_64-linux-6.17.1-2-cachyos",
|
||||
"build_info": "(local build)",
|
||||
"argv": [
|
||||
"smartctl",
|
||||
"-aj",
|
||||
"/dev/nvme0"
|
||||
],
|
||||
"exit_status": 0
|
||||
},
|
||||
"local_time": {
|
||||
"time_t": 1761507494,
|
||||
"asctime": "Sun Oct 26 15:38:14 2025 EDT"
|
||||
},
|
||||
"device": {
|
||||
"name": "/dev/nvme0",
|
||||
"info_name": "/dev/nvme0",
|
||||
"type": "nvme",
|
||||
"protocol": "NVMe"
|
||||
},
|
||||
"model_name": "PELADN 512GB",
|
||||
"serial_number": "2024031600129",
|
||||
"firmware_version": "VC2S038E",
|
||||
"nvme_pci_vendor": {
|
||||
"id": 4332,
|
||||
"subsystem_id": 4332
|
||||
},
|
||||
"nvme_ieee_oui_identifier": 57420,
|
||||
"nvme_controller_id": 1,
|
||||
"nvme_version": {
|
||||
"string": "1.4",
|
||||
"value": 66560
|
||||
},
|
||||
"nvme_number_of_namespaces": 1,
|
||||
"nvme_namespaces": [
|
||||
{
|
||||
"id": 1,
|
||||
"size": {
|
||||
"blocks": 1000215216,
|
||||
"bytes": 512110190592
|
||||
},
|
||||
"capacity": {
|
||||
"blocks": 1000215216,
|
||||
"bytes": 512110190592
|
||||
},
|
||||
"utilization": {
|
||||
"blocks": 1000215216,
|
||||
"bytes": 512110190592
|
||||
},
|
||||
"formatted_lba_size": 512,
|
||||
"eui64": {
|
||||
"oui": 57420,
|
||||
"ext_id": 112094110470
|
||||
},
|
||||
"features": {
|
||||
"value": 0,
|
||||
"thin_provisioning": false,
|
||||
"na_fields": false,
|
||||
"dealloc_or_unwritten_block_error": false,
|
||||
"uid_reuse": false,
|
||||
"np_fields": false,
|
||||
"other": 0
|
||||
},
|
||||
"lba_formats": [
|
||||
{
|
||||
"formatted": true,
|
||||
"data_bytes": 512,
|
||||
"metadata_bytes": 0,
|
||||
"relative_performance": 0
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"user_capacity": {
|
||||
"blocks": 1000215216,
|
||||
"bytes": 512110190592
|
||||
},
|
||||
"logical_block_size": 512,
|
||||
"smart_support": {
|
||||
"available": true,
|
||||
"enabled": true
|
||||
},
|
||||
"nvme_firmware_update_capabilities": {
|
||||
"value": 2,
|
||||
"slots": 1,
|
||||
"first_slot_is_read_only": false,
|
||||
"activiation_without_reset": false,
|
||||
"multiple_update_detection": false,
|
||||
"other": 0
|
||||
},
|
||||
"nvme_optional_admin_commands": {
|
||||
"value": 23,
|
||||
"security_send_receive": true,
|
||||
"format_nvm": true,
|
||||
"firmware_download": true,
|
||||
"namespace_management": false,
|
||||
"self_test": true,
|
||||
"directives": false,
|
||||
"mi_send_receive": false,
|
||||
"virtualization_management": false,
|
||||
"doorbell_buffer_config": false,
|
||||
"get_lba_status": false,
|
||||
"command_and_feature_lockdown": false,
|
||||
"other": 0
|
||||
},
|
||||
"nvme_optional_nvm_commands": {
|
||||
"value": 94,
|
||||
"compare": false,
|
||||
"write_uncorrectable": true,
|
||||
"dataset_management": true,
|
||||
"write_zeroes": true,
|
||||
"save_select_feature_nonzero": true,
|
||||
"reservations": false,
|
||||
"timestamp": true,
|
||||
"verify": false,
|
||||
"copy": false,
|
||||
"other": 0
|
||||
},
|
||||
"nvme_log_page_attributes": {
|
||||
"value": 2,
|
||||
"smart_health_per_namespace": false,
|
||||
"commands_effects_log": true,
|
||||
"extended_get_log_page_cmd": false,
|
||||
"telemetry_log": false,
|
||||
"persistent_event_log": false,
|
||||
"supported_log_pages_log": false,
|
||||
"telemetry_data_area_4": false,
|
||||
"other": 0
|
||||
},
|
||||
"nvme_maximum_data_transfer_pages": 32,
|
||||
"nvme_composite_temperature_threshold": {
|
||||
"warning": 100,
|
||||
"critical": 110
|
||||
},
|
||||
"temperature": {
|
||||
"op_limit_max": 100,
|
||||
"critical_limit_max": 110,
|
||||
"current": 61
|
||||
},
|
||||
"nvme_power_states": [
|
||||
{
|
||||
"non_operational_state": false,
|
||||
"relative_read_latency": 0,
|
||||
"relative_read_throughput": 0,
|
||||
"relative_write_latency": 0,
|
||||
"relative_write_throughput": 0,
|
||||
"entry_latency_us": 230000,
|
||||
"exit_latency_us": 50000,
|
||||
"max_power": {
|
||||
"value": 800,
|
||||
"scale": 2,
|
||||
"units_per_watt": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"non_operational_state": false,
|
||||
"relative_read_latency": 1,
|
||||
"relative_read_throughput": 1,
|
||||
"relative_write_latency": 1,
|
||||
"relative_write_throughput": 1,
|
||||
"entry_latency_us": 4000,
|
||||
"exit_latency_us": 50000,
|
||||
"max_power": {
|
||||
"value": 400,
|
||||
"scale": 2,
|
||||
"units_per_watt": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"non_operational_state": false,
|
||||
"relative_read_latency": 2,
|
||||
"relative_read_throughput": 2,
|
||||
"relative_write_latency": 2,
|
||||
"relative_write_throughput": 2,
|
||||
"entry_latency_us": 4000,
|
||||
"exit_latency_us": 250000,
|
||||
"max_power": {
|
||||
"value": 300,
|
||||
"scale": 2,
|
||||
"units_per_watt": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"non_operational_state": true,
|
||||
"relative_read_latency": 3,
|
||||
"relative_read_throughput": 3,
|
||||
"relative_write_latency": 3,
|
||||
"relative_write_throughput": 3,
|
||||
"entry_latency_us": 5000,
|
||||
"exit_latency_us": 10000,
|
||||
"max_power": {
|
||||
"value": 300,
|
||||
"scale": 1,
|
||||
"units_per_watt": 10000
|
||||
}
|
||||
},
|
||||
{
|
||||
"non_operational_state": true,
|
||||
"relative_read_latency": 4,
|
||||
"relative_read_throughput": 4,
|
||||
"relative_write_latency": 4,
|
||||
"relative_write_throughput": 4,
|
||||
"entry_latency_us": 54000,
|
||||
"exit_latency_us": 45000,
|
||||
"max_power": {
|
||||
"value": 50,
|
||||
"scale": 1,
|
||||
"units_per_watt": 10000
|
||||
}
|
||||
}
|
||||
],
|
||||
"smart_status": {
|
||||
"passed": true,
|
||||
"nvme": {
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"nvme_smart_health_information_log": {
|
||||
"nsid": -1,
|
||||
"critical_warning": 0,
|
||||
"temperature": 61,
|
||||
"available_spare": 100,
|
||||
"available_spare_threshold": 32,
|
||||
"percentage_used": 0,
|
||||
"data_units_read": 6573104,
|
||||
"data_units_written": 16040567,
|
||||
"host_reads": 63241130,
|
||||
"host_writes": 253050006,
|
||||
"controller_busy_time": 0,
|
||||
"power_cycles": 430,
|
||||
"power_on_hours": 4399,
|
||||
"unsafe_shutdowns": 44,
|
||||
"media_errors": 0,
|
||||
"num_err_log_entries": 0,
|
||||
"warning_temp_time": 0,
|
||||
"critical_comp_time": 0
|
||||
},
|
||||
"spare_available": {
|
||||
"current_percent": 100,
|
||||
"threshold_percent": 32
|
||||
},
|
||||
"endurance_used": {
|
||||
"current_percent": 0
|
||||
},
|
||||
"power_cycle_count": 430,
|
||||
"power_on_time": {
|
||||
"hours": 4399
|
||||
},
|
||||
"nvme_error_information_log": {
|
||||
"size": 8,
|
||||
"read": 8,
|
||||
"unread": 0
|
||||
},
|
||||
"nvme_self_test_log": {
|
||||
"nsid": -1,
|
||||
"current_self_test_operation": {
|
||||
"value": 0,
|
||||
"string": "No self-test in progress"
|
||||
}
|
||||
}
|
||||
}
|
||||
36
agent/test-data/smart/scan.json
Normal file
36
agent/test-data/smart/scan.json
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"json_format_version": [
|
||||
1,
|
||||
0
|
||||
],
|
||||
"smartctl": {
|
||||
"version": [
|
||||
7,
|
||||
5
|
||||
],
|
||||
"pre_release": false,
|
||||
"svn_revision": "5714",
|
||||
"platform_info": "x86_64-linux-6.17.1-2-cachyos",
|
||||
"build_info": "(local build)",
|
||||
"argv": [
|
||||
"smartctl",
|
||||
"--scan",
|
||||
"-j"
|
||||
],
|
||||
"exit_status": 0
|
||||
},
|
||||
"devices": [
|
||||
{
|
||||
"name": "/dev/sda",
|
||||
"info_name": "/dev/sda [SAT]",
|
||||
"type": "sat",
|
||||
"protocol": "ATA"
|
||||
},
|
||||
{
|
||||
"name": "/dev/nvme0",
|
||||
"info_name": "/dev/nvme0",
|
||||
"type": "nvme",
|
||||
"protocol": "NVMe"
|
||||
}
|
||||
]
|
||||
}
|
||||
125
agent/test-data/smart/scsi.json
Normal file
125
agent/test-data/smart/scsi.json
Normal file
@@ -0,0 +1,125 @@
|
||||
{
|
||||
"json_format_version": [
|
||||
1,
|
||||
0
|
||||
],
|
||||
"smartctl": {
|
||||
"version": [
|
||||
7,
|
||||
3
|
||||
],
|
||||
"svn_revision": "5338",
|
||||
"platform_info": "x86_64-linux-6.12.43+deb12-amd64",
|
||||
"build_info": "(local build)",
|
||||
"argv": [
|
||||
"smartctl",
|
||||
"-aj",
|
||||
"/dev/sde"
|
||||
],
|
||||
"exit_status": 0
|
||||
},
|
||||
"local_time": {
|
||||
"time_t": 1761502142,
|
||||
"asctime": "Sun Oct 21 21:09:02 2025 MSK"
|
||||
},
|
||||
"device": {
|
||||
"name": "/dev/sde",
|
||||
"info_name": "/dev/sde",
|
||||
"type": "scsi",
|
||||
"protocol": "SCSI"
|
||||
},
|
||||
"scsi_vendor": "YADRO",
|
||||
"scsi_product": "WUH721414AL4204",
|
||||
"scsi_model_name": "YADRO WUH721414AL4204",
|
||||
"scsi_revision": "C240",
|
||||
"scsi_version": "SPC-4",
|
||||
"user_capacity": {
|
||||
"blocks": 3418095616,
|
||||
"bytes": 14000519643136
|
||||
},
|
||||
"logical_block_size": 4096,
|
||||
"scsi_lb_provisioning": {
|
||||
"name": "fully provisioned",
|
||||
"value": 0,
|
||||
"management_enabled": {
|
||||
"name": "LBPME",
|
||||
"value": 0
|
||||
},
|
||||
"read_zeros": {
|
||||
"name": "LBPRZ",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"rotation_rate": 7200,
|
||||
"form_factor": {
|
||||
"scsi_value": 2,
|
||||
"name": "3.5 inches"
|
||||
},
|
||||
"logical_unit_id": "0x5000cca29063dc00",
|
||||
"serial_number": "9YHSDH9B",
|
||||
"device_type": {
|
||||
"scsi_terminology": "Peripheral Device Type [PDT]",
|
||||
"scsi_value": 0,
|
||||
"name": "disk"
|
||||
},
|
||||
"scsi_transport_protocol": {
|
||||
"name": "SAS (SPL-4)",
|
||||
"value": 6
|
||||
},
|
||||
"smart_support": {
|
||||
"available": true,
|
||||
"enabled": true
|
||||
},
|
||||
"temperature_warning": {
|
||||
"enabled": true
|
||||
},
|
||||
"smart_status": {
|
||||
"passed": true
|
||||
},
|
||||
"temperature": {
|
||||
"current": 34,
|
||||
"drive_trip": 85
|
||||
},
|
||||
"power_on_time": {
|
||||
"hours": 458,
|
||||
"minutes": 25
|
||||
},
|
||||
"scsi_start_stop_cycle_counter": {
|
||||
"year_of_manufacture": "2022",
|
||||
"week_of_manufacture": "41",
|
||||
"specified_cycle_count_over_device_lifetime": 50000,
|
||||
"accumulated_start_stop_cycles": 2,
|
||||
"specified_load_unload_count_over_device_lifetime": 600000,
|
||||
"accumulated_load_unload_cycles": 418
|
||||
},
|
||||
"scsi_grown_defect_list": 0,
|
||||
"scsi_error_counter_log": {
|
||||
"read": {
|
||||
"errors_corrected_by_eccfast": 0,
|
||||
"errors_corrected_by_eccdelayed": 0,
|
||||
"errors_corrected_by_rereads_rewrites": 0,
|
||||
"total_errors_corrected": 0,
|
||||
"correction_algorithm_invocations": 346,
|
||||
"gigabytes_processed": "3,641",
|
||||
"total_uncorrected_errors": 0
|
||||
},
|
||||
"write": {
|
||||
"errors_corrected_by_eccfast": 0,
|
||||
"errors_corrected_by_eccdelayed": 0,
|
||||
"errors_corrected_by_rereads_rewrites": 0,
|
||||
"total_errors_corrected": 0,
|
||||
"correction_algorithm_invocations": 4052,
|
||||
"gigabytes_processed": "2124,590",
|
||||
"total_uncorrected_errors": 0
|
||||
},
|
||||
"verify": {
|
||||
"errors_corrected_by_eccfast": 0,
|
||||
"errors_corrected_by_eccdelayed": 0,
|
||||
"errors_corrected_by_rereads_rewrites": 0,
|
||||
"total_errors_corrected": 0,
|
||||
"correction_algorithm_invocations": 223,
|
||||
"gigabytes_processed": "0,000",
|
||||
"total_uncorrected_errors": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
1013
agent/test-data/smart/sda.json
Normal file
1013
agent/test-data/smart/sda.json
Normal file
File diff suppressed because it is too large
Load Diff
130
agent/tools/fetchsmartctl/main.go
Normal file
130
agent/tools/fetchsmartctl/main.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Download smartctl.exe from the given URL and save it to the given destination.
|
||||
// This is used to embed smartctl.exe in the Windows build.
|
||||
|
||||
func main() {
|
||||
url := flag.String("url", "", "URL to download smartctl.exe from (required)")
|
||||
out := flag.String("out", "", "Destination path for smartctl.exe (required)")
|
||||
sha := flag.String("sha", "", "Optional SHA1/SHA256 checksum for integrity validation")
|
||||
force := flag.Bool("force", false, "Force re-download even if destination exists")
|
||||
flag.Parse()
|
||||
|
||||
if *url == "" || *out == "" {
|
||||
fatalf("-url and -out are required")
|
||||
}
|
||||
|
||||
if !*force {
|
||||
if info, err := os.Stat(*out); err == nil && info.Size() > 0 {
|
||||
fmt.Println("smartctl.exe already present, skipping download")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := downloadFile(*url, *out, *sha); err != nil {
|
||||
fatalf("download failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func downloadFile(url, dest, shaHex string) error {
|
||||
// Prepare destination
|
||||
if err := os.MkdirAll(filepath.Dir(dest), 0o755); err != nil {
|
||||
return fmt.Errorf("create dir: %w", err)
|
||||
}
|
||||
|
||||
// HTTP client
|
||||
client := &http.Client{Timeout: 60 * time.Second}
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("new request: %w", err)
|
||||
}
|
||||
req.Header.Set("User-Agent", "beszel-fetchsmartctl/1.0")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("http get: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return fmt.Errorf("unexpected HTTP status: %s", resp.Status)
|
||||
}
|
||||
|
||||
tmp := dest + ".tmp"
|
||||
f, err := os.OpenFile(tmp, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open tmp: %w", err)
|
||||
}
|
||||
|
||||
// Determine hash algorithm based on length (SHA1=40, SHA256=64)
|
||||
var hasher hash.Hash
|
||||
if shaHex := strings.TrimSpace(shaHex); shaHex != "" {
|
||||
cleanSha := strings.ToLower(strings.ReplaceAll(shaHex, " ", ""))
|
||||
switch len(cleanSha) {
|
||||
case 40:
|
||||
hasher = sha1.New()
|
||||
case 64:
|
||||
hasher = sha256.New()
|
||||
default:
|
||||
f.Close()
|
||||
os.Remove(tmp)
|
||||
return fmt.Errorf("unsupported hash length: %d (expected 40 for SHA1 or 64 for SHA256)", len(cleanSha))
|
||||
}
|
||||
}
|
||||
|
||||
var mw io.Writer = f
|
||||
if hasher != nil {
|
||||
mw = io.MultiWriter(f, hasher)
|
||||
}
|
||||
if _, err := io.Copy(mw, resp.Body); err != nil {
|
||||
f.Close()
|
||||
os.Remove(tmp)
|
||||
return fmt.Errorf("write tmp: %w", err)
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
os.Remove(tmp)
|
||||
return fmt.Errorf("close tmp: %w", err)
|
||||
}
|
||||
|
||||
if hasher != nil && shaHex != "" {
|
||||
cleanSha := strings.ToLower(strings.ReplaceAll(strings.TrimSpace(shaHex), " ", ""))
|
||||
got := strings.ToLower(hex.EncodeToString(hasher.Sum(nil)))
|
||||
if got != cleanSha {
|
||||
os.Remove(tmp)
|
||||
return fmt.Errorf("hash mismatch: got %s want %s", got, cleanSha)
|
||||
}
|
||||
}
|
||||
|
||||
// Make executable and move into place
|
||||
if err := os.Chmod(tmp, 0o755); err != nil {
|
||||
os.Remove(tmp)
|
||||
return fmt.Errorf("chmod: %w", err)
|
||||
}
|
||||
if err := os.Rename(tmp, dest); err != nil {
|
||||
os.Remove(tmp)
|
||||
return fmt.Errorf("rename: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("smartctl.exe downloaded to", dest)
|
||||
return nil
|
||||
}
|
||||
|
||||
func fatalf(format string, a ...any) {
|
||||
fmt.Fprintf(os.Stderr, format+"\n", a...)
|
||||
os.Exit(1)
|
||||
}
|
||||
168
agent/update.go
Normal file
168
agent/update.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/henrygd/beszel/internal/ghupdate"
|
||||
)
|
||||
|
||||
// restarter knows how to restart the beszel-agent service.
|
||||
type restarter interface {
|
||||
Restart() error
|
||||
}
|
||||
|
||||
type systemdRestarter struct{ cmd string }
|
||||
|
||||
func (s *systemdRestarter) Restart() error {
|
||||
// Only restart if the service is active
|
||||
if err := exec.Command(s.cmd, "is-active", "beszel-agent.service").Run(); err != nil {
|
||||
return nil
|
||||
}
|
||||
ghupdate.ColorPrint(ghupdate.ColorYellow, "Restarting beszel-agent.service via systemd…")
|
||||
return exec.Command(s.cmd, "restart", "beszel-agent.service").Run()
|
||||
}
|
||||
|
||||
type openRCRestarter struct{ cmd string }
|
||||
|
||||
func (o *openRCRestarter) Restart() error {
|
||||
if err := exec.Command(o.cmd, "beszel-agent", "status").Run(); err != nil {
|
||||
return nil
|
||||
}
|
||||
ghupdate.ColorPrint(ghupdate.ColorYellow, "Restarting beszel-agent via OpenRC…")
|
||||
return exec.Command(o.cmd, "beszel-agent", "restart").Run()
|
||||
}
|
||||
|
||||
type openWRTRestarter struct{ cmd string }
|
||||
|
||||
func (w *openWRTRestarter) Restart() error {
|
||||
// https://openwrt.org/docs/guide-user/base-system/managing_services?s[]=service
|
||||
if err := exec.Command("/etc/init.d/beszel-agent", "running").Run(); err != nil {
|
||||
return nil
|
||||
}
|
||||
ghupdate.ColorPrint(ghupdate.ColorYellow, "Restarting beszel-agent via procd…")
|
||||
return exec.Command("/etc/init.d/beszel-agent", "restart").Run()
|
||||
}
|
||||
|
||||
type freeBSDRestarter struct{ cmd string }
|
||||
|
||||
func (f *freeBSDRestarter) Restart() error {
|
||||
if err := exec.Command(f.cmd, "beszel-agent", "status").Run(); err != nil {
|
||||
return nil
|
||||
}
|
||||
ghupdate.ColorPrint(ghupdate.ColorYellow, "Restarting beszel-agent via FreeBSD rc…")
|
||||
return exec.Command(f.cmd, "beszel-agent", "restart").Run()
|
||||
}
|
||||
|
||||
func detectRestarter() restarter {
|
||||
if path, err := exec.LookPath("systemctl"); err == nil {
|
||||
return &systemdRestarter{cmd: path}
|
||||
}
|
||||
if path, err := exec.LookPath("rc-service"); err == nil {
|
||||
return &openRCRestarter{cmd: path}
|
||||
}
|
||||
if path, err := exec.LookPath("procd"); err == nil {
|
||||
return &openWRTRestarter{cmd: path}
|
||||
}
|
||||
if path, err := exec.LookPath("service"); err == nil {
|
||||
if runtime.GOOS == "freebsd" {
|
||||
return &freeBSDRestarter{cmd: path}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update checks GitHub for a newer release of beszel-agent, applies it,
|
||||
// fixes SELinux context if needed, and restarts the service.
|
||||
func Update(useMirror bool) error {
|
||||
exePath, _ := os.Executable()
|
||||
|
||||
dataDir, err := getDataDir()
|
||||
if err != nil {
|
||||
dataDir = os.TempDir()
|
||||
}
|
||||
updated, err := ghupdate.Update(ghupdate.Config{
|
||||
ArchiveExecutable: "beszel-agent",
|
||||
DataDir: dataDir,
|
||||
UseMirror: useMirror,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if !updated {
|
||||
return nil
|
||||
}
|
||||
|
||||
// make sure the file is executable
|
||||
if err := os.Chmod(exePath, 0755); err != nil {
|
||||
ghupdate.ColorPrintf(ghupdate.ColorYellow, "Warning: failed to set executable permissions: %v", err)
|
||||
}
|
||||
// set ownership to beszel:beszel if possible
|
||||
if chownPath, err := exec.LookPath("chown"); err == nil {
|
||||
if err := exec.Command(chownPath, "beszel:beszel", exePath).Run(); err != nil {
|
||||
ghupdate.ColorPrintf(ghupdate.ColorYellow, "Warning: failed to set file ownership: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// 6) Fix SELinux context if necessary
|
||||
if err := handleSELinuxContext(exePath); err != nil {
|
||||
ghupdate.ColorPrintf(ghupdate.ColorYellow, "Warning: SELinux context handling: %v", err)
|
||||
}
|
||||
|
||||
// 7) Restart service if running under a recognised init system
|
||||
if r := detectRestarter(); r != nil {
|
||||
if err := r.Restart(); err != nil {
|
||||
ghupdate.ColorPrintf(ghupdate.ColorYellow, "Warning: failed to restart service: %v", err)
|
||||
ghupdate.ColorPrint(ghupdate.ColorYellow, "Please restart the service manually.")
|
||||
} else {
|
||||
ghupdate.ColorPrint(ghupdate.ColorGreen, "Service restarted successfully")
|
||||
}
|
||||
} else {
|
||||
ghupdate.ColorPrint(ghupdate.ColorYellow, "No supported init system detected; please restart manually if needed.")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleSELinuxContext restores or applies the correct SELinux label to the binary.
|
||||
func handleSELinuxContext(path string) error {
|
||||
out, err := exec.Command("getenforce").Output()
|
||||
if err != nil {
|
||||
// SELinux not enabled or getenforce not available
|
||||
return nil
|
||||
}
|
||||
state := strings.TrimSpace(string(out))
|
||||
if state == "Disabled" {
|
||||
return nil
|
||||
}
|
||||
|
||||
ghupdate.ColorPrint(ghupdate.ColorYellow, "SELinux is enabled; applying context…")
|
||||
var errs []string
|
||||
|
||||
// Try persistent context via semanage+restorecon
|
||||
if semanagePath, err := exec.LookPath("semanage"); err == nil {
|
||||
if err := exec.Command(semanagePath, "fcontext", "-a", "-t", "bin_t", path).Run(); err != nil {
|
||||
errs = append(errs, "semanage fcontext failed: "+err.Error())
|
||||
} else if restoreconPath, err := exec.LookPath("restorecon"); err == nil {
|
||||
if err := exec.Command(restoreconPath, "-v", path).Run(); err != nil {
|
||||
errs = append(errs, "restorecon failed: "+err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to temporary context via chcon
|
||||
if chconPath, err := exec.LookPath("chcon"); err == nil {
|
||||
if err := exec.Command(chconPath, "-t", "bin_t", path).Run(); err != nil {
|
||||
errs = append(errs, "chcon failed: "+err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("SELinux context errors: %s", strings.Join(errs, "; "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
18
beszel.go
Normal file
18
beszel.go
Normal file
@@ -0,0 +1,18 @@
|
||||
// Package beszel provides core application constants and version information
|
||||
// which are used throughout the application.
|
||||
package beszel
|
||||
|
||||
import "github.com/blang/semver"
|
||||
|
||||
const (
|
||||
// Version is the current version of the application.
|
||||
Version = "0.16.0-beta.1"
|
||||
// AppName is the name of the application.
|
||||
AppName = "beszel"
|
||||
)
|
||||
|
||||
// MinVersionCbor is the minimum supported version for CBOR compatibility.
|
||||
var MinVersionCbor = semver.MustParse("0.12.0")
|
||||
|
||||
// MinVersionAgentResponse is the minimum supported version for AgentResponse compatibility.
|
||||
var MinVersionAgentResponse = semver.MustParse("0.13.0")
|
||||
@@ -1,69 +0,0 @@
|
||||
version: 2
|
||||
|
||||
project_name: beszel
|
||||
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
|
||||
builds:
|
||||
- id: beszel
|
||||
binary: beszel
|
||||
main: cmd/hub/hub.go
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
- arm
|
||||
|
||||
- id: beszel-agent
|
||||
binary: beszel-agent
|
||||
main: cmd/agent/agent.go
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
- freebsd
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
- arm
|
||||
- mips64
|
||||
ignore:
|
||||
- goos: freebsd
|
||||
goarch: arm
|
||||
|
||||
archives:
|
||||
- id: beszel
|
||||
format: tar.gz
|
||||
builds:
|
||||
- beszel-agent
|
||||
name_template: >-
|
||||
{{ .Binary }}_
|
||||
{{- .Os }}_
|
||||
{{- .Arch }}
|
||||
- id: beszel-agent
|
||||
format: tar.gz
|
||||
builds:
|
||||
- beszel
|
||||
name_template: >-
|
||||
{{ .Binary }}_
|
||||
{{- .Os }}_
|
||||
{{- .Arch }}
|
||||
# use zip for windows archives
|
||||
# format_overrides:
|
||||
# - goos: windows
|
||||
# format: zip
|
||||
|
||||
changelog:
|
||||
disable: true
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- '^docs:'
|
||||
- '^test:'
|
||||
@@ -1,35 +0,0 @@
|
||||
# Default OS/ARCH values
|
||||
OS ?= $(shell go env GOOS)
|
||||
ARCH ?= $(shell go env GOARCH)
|
||||
# Skip building the web UI if true
|
||||
SKIP_WEB ?= false
|
||||
|
||||
.PHONY: tidy build-agent build-hub build clean lint
|
||||
.DEFAULT_GOAL := build
|
||||
|
||||
tidy:
|
||||
go mod tidy
|
||||
|
||||
build-web-ui:
|
||||
@if command -v bun >/dev/null 2>&1; then \
|
||||
bun install --cwd ./site && \
|
||||
bun run --cwd ./site build; \
|
||||
else \
|
||||
npm install --prefix ./site && \
|
||||
npm run --prefix ./site build; \
|
||||
fi
|
||||
|
||||
build-agent: tidy
|
||||
CGO_ENABLED=0 GOOS=$(OS) GOARCH=$(ARCH) go build -o ./build/beszel-agent_$(OS)_$(ARCH) -ldflags "-w -s" beszel/cmd/agent
|
||||
|
||||
build-hub: tidy $(if $(filter false,$(SKIP_WEB)),build-web-ui)
|
||||
CGO_ENABLED=0 GOOS=$(OS) GOARCH=$(ARCH) go build -o ./build/beszel_$(OS)_$(ARCH) -ldflags "-w -s" beszel/cmd/hub
|
||||
|
||||
build: build-agent build-hub
|
||||
|
||||
clean:
|
||||
go clean
|
||||
rm -rf ./build
|
||||
|
||||
lint:
|
||||
golangci-lint run
|
||||
@@ -1,41 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"beszel"
|
||||
"beszel/internal/agent"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// handle flags / subcommands
|
||||
if len(os.Args) > 1 {
|
||||
switch os.Args[1] {
|
||||
case "-v":
|
||||
fmt.Println(beszel.AppName+"-agent", beszel.Version)
|
||||
case "update":
|
||||
agent.Update()
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
var pubKey []byte
|
||||
if pubKeyEnv, exists := os.LookupEnv("KEY"); exists {
|
||||
pubKey = []byte(pubKeyEnv)
|
||||
} else {
|
||||
log.Fatal("KEY environment variable is not set")
|
||||
}
|
||||
|
||||
addr := ":45876"
|
||||
if portEnvVar, exists := os.LookupEnv("PORT"); exists {
|
||||
// allow passing an address in the form of "127.0.0.1:45876"
|
||||
if !strings.Contains(portEnvVar, ":") {
|
||||
portEnvVar = ":" + portEnvVar
|
||||
}
|
||||
addr = portEnvVar
|
||||
}
|
||||
|
||||
agent.NewAgent().Run(pubKey, addr)
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"beszel"
|
||||
"beszel/internal/hub"
|
||||
_ "beszel/migrations"
|
||||
|
||||
"github.com/pocketbase/pocketbase"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := pocketbase.NewWithConfig(pocketbase.Config{
|
||||
DefaultDataDir: beszel.AppName + "_data",
|
||||
})
|
||||
app.RootCmd.Version = beszel.Version
|
||||
app.RootCmd.Use = beszel.AppName
|
||||
app.RootCmd.Short = ""
|
||||
|
||||
// add update command
|
||||
app.RootCmd.AddCommand(&cobra.Command{
|
||||
Use: "update",
|
||||
Short: "Update " + beszel.AppName + " to the latest version",
|
||||
Run: hub.Update,
|
||||
})
|
||||
|
||||
hub.NewHub(app).Run()
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
FROM --platform=$BUILDPLATFORM golang:alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
# RUN go mod download
|
||||
COPY *.go ./
|
||||
COPY cmd ./cmd
|
||||
COPY internal ./internal
|
||||
|
||||
# Build
|
||||
ARG TARGETOS TARGETARCH
|
||||
RUN CGO_ENABLED=0 GOGC=75 GOOS=$TARGETOS GOARCH=$TARGETARCH go build -ldflags "-w -s" -o /agent ./cmd/agent
|
||||
|
||||
# ? -------------------------
|
||||
FROM scratch
|
||||
|
||||
COPY --from=builder /agent /agent
|
||||
|
||||
ENTRYPOINT ["/agent"]
|
||||
104
beszel/go.mod
104
beszel/go.mod
@@ -1,104 +0,0 @@
|
||||
module beszel
|
||||
|
||||
go 1.22.4
|
||||
|
||||
require (
|
||||
github.com/blang/semver v3.5.1+incompatible
|
||||
github.com/containrrr/shoutrrr v0.8.0
|
||||
github.com/gliderlabs/ssh v0.3.7
|
||||
github.com/goccy/go-json v0.10.3
|
||||
github.com/labstack/echo/v5 v5.0.0-20230722203903-ec5b858dab61
|
||||
github.com/pocketbase/dbx v1.10.1
|
||||
github.com/pocketbase/pocketbase v0.22.22
|
||||
github.com/rhysd/go-github-selfupdate v1.2.3
|
||||
github.com/shirou/gopsutil/v4 v4.24.9
|
||||
github.com/spf13/cast v1.7.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
golang.org/x/crypto v0.28.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7 // indirect
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.41 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.33 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.66.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 // indirect
|
||||
github.com/aws/smithy-go v1.22.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/disintegration/imaging v1.6.2 // indirect
|
||||
github.com/domodwyer/mailyak/v3 v3.6.2 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/ebitengine/purego v0.8.0 // indirect
|
||||
github.com/fatih/color v1.17.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.6 // indirect
|
||||
github.com/ganigeorgiev/fexpr v0.4.1 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-ozzo/ozzo-validation/v4 v4.3.0 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/google/go-github/v30 v30.1.0 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.24 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/tcnksm/go-gitconfig v0.1.2 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.8.0 // indirect
|
||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/fasttemplate v1.2.2 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
gocloud.dev v0.40.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect
|
||||
golang.org/x/image v0.21.0 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/sync v0.8.0 // indirect
|
||||
golang.org/x/sys v0.26.0 // indirect
|
||||
golang.org/x/term v0.25.0 // indirect
|
||||
golang.org/x/text v0.19.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
|
||||
google.golang.org/api v0.201.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
|
||||
google.golang.org/grpc v1.67.1 // indirect
|
||||
google.golang.org/protobuf v1.35.1 // indirect
|
||||
modernc.org/gc/v3 v3.0.0-20241004144649-1aea3fae8852 // indirect
|
||||
modernc.org/libc v1.61.0 // indirect
|
||||
modernc.org/mathutil v1.6.0 // indirect
|
||||
modernc.org/memory v1.8.0 // indirect
|
||||
modernc.org/sqlite v1.33.1 // indirect
|
||||
modernc.org/strutil v1.2.0 // indirect
|
||||
modernc.org/token v1.1.0 // indirect
|
||||
)
|
||||
437
beszel/go.sum
437
beszel/go.sum
@@ -1,437 +0,0 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE=
|
||||
cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U=
|
||||
cloud.google.com/go/auth v0.9.8 h1:+CSJ0Gw9iVeSENVCKJoLHhdUykDgXSc4Qn+gu2BRtR8=
|
||||
cloud.google.com/go/auth v0.9.8/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc=
|
||||
cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0=
|
||||
cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo=
|
||||
cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k=
|
||||
cloud.google.com/go/iam v1.1.13 h1:7zWBXG9ERbMLrzQBRhFliAV+kjcRToDTgQT3CTwYyv4=
|
||||
cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus=
|
||||
cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs=
|
||||
cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0=
|
||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ=
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
|
||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
|
||||
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcTduI=
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 h1:pT3hpW0cOHRJx8Y0DfJUEQuqPild8jRGmSFmBgvydr0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6/go.mod h1:j/I2++U0xX+cr44QjHay4Cvxj6FUbnxrgmqN3H1jTZA=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.0 h1:FosVYWcqEtWNxHn8gB/Vs6jOlNwSoyOCA/g/sxyySOQ=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.0/go.mod h1:pYhbtvg1siOOg8h5an77rXle9tVG8T+BWLWAo7cOukc=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.41 h1:7gXo+Axmp+R4Z+AK8YFQO0ZV3L0gizGINCOWxSLY9W8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.41/go.mod h1:u4Eb8d3394YLubphT4jLEwN1rLNq2wFOlT6OuxFwPzU=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 h1:TMH3f/SCAWdNtXXVPPu5D6wrr4G5hI1rAxbcocKfC7Q=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17/go.mod h1:1ZRXLdTpzdJb9fwTMXiLipENRxkGMTn1sfKexGllQCw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.33 h1:X+4YY5kZRI/cOoSMVMGTqFXHAMg1bvvay7IBcqHpybQ=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.33/go.mod h1:DPynzu+cn92k5UQ6tZhX+wfTB4ah6QDU/NgdHqatmvk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21/go.mod h1:1SR0GbLlnN3QUmYaflZNiH1ql+1qrSiB2vwcJ+4UM60=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21 h1:7edmS3VOBDhK00b/MwGtGglCm7hhwNYnjJs/PgFdMQE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21/go.mod h1:Q9o5h4HoIWG8XfzxqiuK/CGUbepCJ8uTlaE3bAbxytQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.2 h1:4FMHqLfk0efmTqhXVRL5xYRqlEBNBiRI7N6w4jsEdd4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.2/go.mod h1:LWoqeWlK9OZeJxsROW2RqrSPvQHKTpp69r/iDjwsSaw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 h1:s7NA1SOw8q/5c0wr8477yOPp0z+uBaXBnLE0XYb0POA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2/go.mod h1:fnjjWyAW/Pj5HYOxl9LJqWtEwS7W2qgcRLWP+uWbss0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2 h1:t7iUP9+4wdc5lt3E41huP+GvQZJD38WLsgVp4iOtAjg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2/go.mod h1:/niFCtmuQNxqx9v8WAPq5qh7EH25U4BF6tjoyq9bObM=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.66.0 h1:xA6XhTF7PE89BCNHJbQi8VvPzcgMtmGC5dr8S8N7lHk=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.66.0/go.mod h1:cB6oAuus7YXRZhWCc1wIwPywwZ1XwweNp2TVAEGYeB8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 h1:bSYXVyUzoTHoKalBmwaZxs97HU9DWWI3ehHSAMa7xOk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.2/go.mod h1:skMqY7JElusiOUjMJMOv1jJsP7YUg7DrhgqZZWuzu1U=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 h1:AhmO1fHINP9vFYUE0LHzCWg/LfUWUF+zFPEcY9QXb7o=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2/go.mod h1:o8aQygT2+MVP0NaV6kbdE1YnnIM8RRVQzoeUH45GOdI=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 h1:CiS7i0+FUe+/YY1GvIBLLrR/XNGZ4CtM1Ll0XavNuVo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.32.2/go.mod h1:HtaiBI8CjYoNVde8arShXb94UbQQi9L4EMr6D+xGBwo=
|
||||
github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM=
|
||||
github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/containrrr/shoutrrr v0.8.0 h1:mfG2ATzIS7NR2Ec6XL+xyoHzN97H8WPjir8aYzJUSec=
|
||||
github.com/containrrr/shoutrrr v0.8.0/go.mod h1:ioyQAyu1LJY6sILuNyKaQaw+9Ttik5QePU8atnAdO2o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI=
|
||||
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
||||
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
||||
github.com/domodwyer/mailyak/v3 v3.6.2 h1:x3tGMsyFhTCaxp6ycgR0FE/bu5QiNp+hetUuCOBXMn8=
|
||||
github.com/domodwyer/mailyak/v3 v3.6.2/go.mod h1:lOm/u9CyCVWHeaAmHIdF4RiKVxKUT/H5XX10lIKAL6c=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/ebitengine/purego v0.8.0 h1:JbqvnEzRvPpxhCJzJJ2y0RbiZ8nyjccVUrSM3q+GvvE=
|
||||
github.com/ebitengine/purego v0.8.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
|
||||
github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gabriel-vasile/mimetype v1.4.6 h1:3+PzJTKLkvgjeTbts6msPJt4DixhT4YtFNf1gtGe3zc=
|
||||
github.com/gabriel-vasile/mimetype v1.4.6/go.mod h1:JX1qVKqZd40hUPpAfiNTe0Sne7hdfKSbOqqmkq8GCXc=
|
||||
github.com/ganigeorgiev/fexpr v0.4.1 h1:hpUgbUEEWIZhSDBtf4M9aUNfQQ0BZkGRaMePy7Gcx5k=
|
||||
github.com/ganigeorgiev/fexpr v0.4.1/go.mod h1:RyGiGqmeXhEQ6+mlGdnUleLHgtzzu/VGO2WtJkF5drE=
|
||||
github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE=
|
||||
github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-ozzo/ozzo-validation/v4 v4.3.0 h1:byhDUpfEwjsVQb1vBunvIjh2BHQ9ead57VkAEY4V+Es=
|
||||
github.com/go-ozzo/ozzo-validation/v4 v4.3.0/go.mod h1:2NKgrcHl3z6cJs+3Oo940FPRiTzuqKbvfrL2RxCj6Ew=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
|
||||
github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-github/v30 v30.1.0 h1:VLDx+UolQICEOKu2m4uAoMti1SxuEBAl7RSEG16L+Oo=
|
||||
github.com/google/go-github/v30 v30.1.0/go.mod h1:n8jBpHl45a/rlBUtRJMOG4GhNADUQFEufcolZ95JfU8=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA=
|
||||
github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
|
||||
github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI=
|
||||
github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=
|
||||
github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
|
||||
github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
|
||||
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf h1:WfD7VjIE6z8dIvMsI4/s+1qr5EL+zoIGev1BQj1eoJ8=
|
||||
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf/go.mod h1:hyb9oH7vZsitZCiBt0ZvifOrB+qc8PS5IiilCIb87rg=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc=
|
||||
github.com/jarcoal/httpmock v1.3.0/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/labstack/echo/v5 v5.0.0-20230722203903-ec5b858dab61 h1:FwuzbVh87iLiUQj1+uQUsuw9x5t9m5n5g7rG7o4svW4=
|
||||
github.com/labstack/echo/v5 v5.0.0-20230722203903-ec5b858dab61/go.mod h1:paQfF1YtHe+GrGg5fOgjsjoCX/UKDr9bc1DoWpZfns8=
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0=
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
|
||||
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
|
||||
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU=
|
||||
github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts=
|
||||
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
|
||||
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pocketbase/dbx v1.10.1 h1:cw+vsyfCJD8YObOVeqb93YErnlxwYMkNZ4rwN0G0AaA=
|
||||
github.com/pocketbase/dbx v1.10.1/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs=
|
||||
github.com/pocketbase/pocketbase v0.22.22 h1:iA128U+cmM9euxPpuCN7blmQ2FZNzOix2aUUcnbbQu8=
|
||||
github.com/pocketbase/pocketbase v0.22.22/go.mod h1:u+l7T04g7eBXetoodXLch3WoV/QonRf1qYq+2vuTKuI=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rhysd/go-github-selfupdate v1.2.3 h1:iaa+J202f+Nc+A8zi75uccC8Wg3omaM7HDeimXA22Ag=
|
||||
github.com/rhysd/go-github-selfupdate v1.2.3/go.mod h1:mp/N8zj6jFfBQy/XMYoWsmfzxazpPAODuqarmPDe2Rg=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shirou/gopsutil/v4 v4.24.9 h1:KIV+/HaHD5ka5f570RZq+2SaeFsb/pq+fp2DGNWYoOI=
|
||||
github.com/shirou/gopsutil/v4 v4.24.9/go.mod h1:3fkaHNeYsUFCGZ8+9vZVWtbyM1k2eRnlL+bWO8Bxa/Q=
|
||||
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
|
||||
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tcnksm/go-gitconfig v0.1.2 h1:iiDhRitByXAEyjgBqsKi9QU4o2TNtv9kPP3RgPgXBPw=
|
||||
github.com/tcnksm/go-gitconfig v0.1.2/go.mod h1:/8EhP4H7oJZdIPyT+/UIsG87kTzrzM4UsLGSItWYCpE=
|
||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
|
||||
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
|
||||
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
|
||||
github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
|
||||
github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
|
||||
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
|
||||
go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw=
|
||||
go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8=
|
||||
go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc=
|
||||
go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8=
|
||||
go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4=
|
||||
go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ=
|
||||
gocloud.dev v0.40.0 h1:f8LgP+4WDqOG/RXoUcyLpeIAGOcAbZrZbDQCUee10ng=
|
||||
gocloud.dev v0.40.0/go.mod h1:drz+VyYNBvrMTW0KZiBAYEdl8lbNZx+OQ7oQvdrFmSQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
|
||||
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY=
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8=
|
||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.21.0 h1:c5qV36ajHpdj4Qi0GnE0jUc/yuo33OLFaa0d+crTD5s=
|
||||
golang.org/x/image v0.21.0/go.mod h1:vUbsLavqK/W303ZroQQVKQ+Af3Yl6Uz1Ppu5J/cLz78=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
|
||||
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
||||
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
|
||||
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
|
||||
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
|
||||
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
|
||||
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY=
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
|
||||
google.golang.org/api v0.201.0 h1:+7AD9JNM3tREtawRMu8sOjSbb8VYcYXJG/2eEOmfDu0=
|
||||
google.golang.org/api v0.201.0/go.mod h1:HVY0FCHVs89xIW9fzf/pBvOEm+OolHa86G/txFezyq4=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9 h1:nFS3IivktIU5Mk6KQa+v6RKkHUpdQpphqGNLxqNnbEk=
|
||||
google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:tEzYTYZxbmVNOu0OAFH9HzdJtLn6h4Aj89zzlBCdHms=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
|
||||
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
|
||||
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ=
|
||||
modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ=
|
||||
modernc.org/ccgo/v4 v4.21.0 h1:kKPI3dF7RIag8YcToh5ZwDcVMIv6VGa0ED5cvh0LMW4=
|
||||
modernc.org/ccgo/v4 v4.21.0/go.mod h1:h6kt6H/A2+ew/3MW/p6KEoQmrq/i3pr0J/SiwiaF/g0=
|
||||
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
|
||||
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
|
||||
modernc.org/gc/v2 v2.5.0 h1:bJ9ChznK1L1mUtAQtxi0wi5AtAs5jQuw4PrPHO5pb6M=
|
||||
modernc.org/gc/v2 v2.5.0/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU=
|
||||
modernc.org/gc/v3 v3.0.0-20241004144649-1aea3fae8852 h1:IYXPPTTjjoSHvUClZIYexDiO7g+4x+XveKT4gCIAwiY=
|
||||
modernc.org/gc/v3 v3.0.0-20241004144649-1aea3fae8852/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4=
|
||||
modernc.org/libc v1.61.0 h1:eGFcvWpqlnoGwzZeZe3PWJkkKbM/3SUGyk1DVZQ0TpE=
|
||||
modernc.org/libc v1.61.0/go.mod h1:DvxVX89wtGTu+r72MLGhygpfi3aUGgZRdAYGCAVVud0=
|
||||
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
|
||||
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
|
||||
modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E=
|
||||
modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU=
|
||||
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
|
||||
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
||||
modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc=
|
||||
modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss=
|
||||
modernc.org/sqlite v1.33.1 h1:trb6Z3YYoeM9eDL1O8do81kP+0ejv+YzgyFo+Gwy0nM=
|
||||
modernc.org/sqlite v1.33.1/go.mod h1:pXV2xHxhzXZsgT/RtTFAPY6JJDEvOTcTdwADQCCWD4k=
|
||||
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
|
||||
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
@@ -1,108 +0,0 @@
|
||||
// Package agent handles the agent's SSH server and system stats collection.
|
||||
package agent
|
||||
|
||||
import (
|
||||
"beszel"
|
||||
"beszel/internal/entities/system"
|
||||
"context"
|
||||
"log/slog"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/shirou/gopsutil/v4/common"
|
||||
)
|
||||
|
||||
type Agent struct {
|
||||
debug bool // true if LOG_LEVEL is set to debug
|
||||
zfs bool // true if system has arcstats
|
||||
memCalc string // Memory calculation formula
|
||||
fsNames []string // List of filesystem device names being monitored
|
||||
fsStats map[string]*system.FsStats // Keeps track of disk stats for each filesystem
|
||||
netInterfaces map[string]struct{} // Stores all valid network interfaces
|
||||
netIoStats system.NetIoStats // Keeps track of bandwidth usage
|
||||
dockerManager *dockerManager // Manages Docker API requests
|
||||
sensorsContext context.Context // Sensors context to override sys location
|
||||
sensorsWhitelist map[string]struct{} // List of sensors to monitor
|
||||
systemInfo system.Info // Host system info
|
||||
}
|
||||
|
||||
func NewAgent() *Agent {
|
||||
return &Agent{
|
||||
sensorsContext: context.Background(),
|
||||
memCalc: os.Getenv("MEM_CALC"),
|
||||
fsStats: make(map[string]*system.FsStats),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Agent) Run(pubKey []byte, addr string) {
|
||||
// Set up slog with a log level determined by the LOG_LEVEL env var
|
||||
if logLevelStr, exists := os.LookupEnv("LOG_LEVEL"); exists {
|
||||
switch strings.ToLower(logLevelStr) {
|
||||
case "debug":
|
||||
a.debug = true
|
||||
slog.SetLogLoggerLevel(slog.LevelDebug)
|
||||
case "warn":
|
||||
slog.SetLogLoggerLevel(slog.LevelWarn)
|
||||
case "error":
|
||||
slog.SetLogLoggerLevel(slog.LevelError)
|
||||
}
|
||||
}
|
||||
|
||||
slog.Debug(beszel.Version)
|
||||
|
||||
// Set sensors context (allows overriding sys location for sensors)
|
||||
if sysSensors, exists := os.LookupEnv("SYS_SENSORS"); exists {
|
||||
slog.Info("SYS_SENSORS", "path", sysSensors)
|
||||
a.sensorsContext = context.WithValue(a.sensorsContext,
|
||||
common.EnvKey, common.EnvMap{common.HostSysEnvKey: sysSensors},
|
||||
)
|
||||
}
|
||||
|
||||
// Set sensors whitelist
|
||||
if sensors, exists := os.LookupEnv("SENSORS"); exists {
|
||||
a.sensorsWhitelist = make(map[string]struct{})
|
||||
for _, sensor := range strings.Split(sensors, ",") {
|
||||
if sensor != "" {
|
||||
a.sensorsWhitelist[sensor] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// initialize system info / docker manager
|
||||
a.initializeSystemInfo()
|
||||
a.initializeDiskInfo()
|
||||
a.initializeNetIoStats()
|
||||
a.dockerManager = newDockerManager()
|
||||
|
||||
// if debugging, print stats
|
||||
if a.debug {
|
||||
slog.Debug("Stats", "data", a.gatherStats())
|
||||
}
|
||||
|
||||
a.startServer(pubKey, addr)
|
||||
}
|
||||
|
||||
func (a *Agent) gatherStats() system.CombinedData {
|
||||
slog.Debug("Getting stats")
|
||||
systemData := system.CombinedData{
|
||||
Stats: a.getSystemStats(),
|
||||
Info: a.systemInfo,
|
||||
}
|
||||
slog.Debug("System stats", "data", systemData)
|
||||
// add docker stats
|
||||
if containerStats, err := a.dockerManager.getDockerStats(); err == nil {
|
||||
systemData.Containers = containerStats
|
||||
slog.Debug("Docker stats", "data", systemData.Containers)
|
||||
} else {
|
||||
slog.Debug("Error getting docker stats", "err", err)
|
||||
}
|
||||
// add extra filesystems
|
||||
systemData.Stats.ExtraFs = make(map[string]*system.FsStats)
|
||||
for name, stats := range a.fsStats {
|
||||
if !stats.Root && stats.DiskTotal > 0 {
|
||||
systemData.Stats.ExtraFs[name] = stats
|
||||
}
|
||||
}
|
||||
slog.Debug("Extra filesystems", "data", systemData.Stats.ExtraFs)
|
||||
return systemData
|
||||
}
|
||||
@@ -1,169 +0,0 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"beszel/internal/entities/system"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/shirou/gopsutil/v4/disk"
|
||||
)
|
||||
|
||||
// Sets up the filesystems to monitor for disk usage and I/O.
|
||||
func (a *Agent) initializeDiskInfo() {
|
||||
filesystem := os.Getenv("FILESYSTEM")
|
||||
efPath := "/extra-filesystems"
|
||||
hasRoot := false
|
||||
|
||||
partitions, err := disk.Partitions(false)
|
||||
if err != nil {
|
||||
slog.Error("Error getting disk partitions", "err", err)
|
||||
}
|
||||
slog.Debug("Disk", "partitions", partitions)
|
||||
|
||||
// ioContext := context.WithValue(a.sensorsContext,
|
||||
// common.EnvKey, common.EnvMap{common.HostProcEnvKey: "/tmp/testproc"},
|
||||
// )
|
||||
// diskIoCounters, err := disk.IOCountersWithContext(ioContext)
|
||||
|
||||
diskIoCounters, err := disk.IOCounters()
|
||||
if err != nil {
|
||||
slog.Error("Error getting diskstats", "err", err)
|
||||
}
|
||||
slog.Debug("Disk I/O", "diskstats", diskIoCounters)
|
||||
|
||||
// Helper function to add a filesystem to fsStats if it doesn't exist
|
||||
addFsStat := func(device, mountpoint string, root bool) {
|
||||
key := filepath.Base(device)
|
||||
if _, exists := a.fsStats[key]; !exists {
|
||||
if root {
|
||||
slog.Info("Detected root device", "name", key)
|
||||
// check if root device is in /proc/diskstats, use fallback if not
|
||||
if _, exists := diskIoCounters[key]; !exists {
|
||||
slog.Warn("Device not found in diskstats", "name", key)
|
||||
key = findFallbackIoDevice(filesystem, diskIoCounters, a.fsStats)
|
||||
slog.Info("Using I/O fallback", "name", key)
|
||||
}
|
||||
}
|
||||
a.fsStats[key] = &system.FsStats{Root: root, Mountpoint: mountpoint}
|
||||
}
|
||||
}
|
||||
|
||||
// Use FILESYSTEM env var to find root filesystem
|
||||
if filesystem != "" {
|
||||
for _, p := range partitions {
|
||||
if strings.HasSuffix(p.Device, filesystem) || p.Mountpoint == filesystem {
|
||||
addFsStat(p.Device, p.Mountpoint, true)
|
||||
hasRoot = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasRoot {
|
||||
slog.Warn("Partition details not found", "filesystem", filesystem)
|
||||
}
|
||||
}
|
||||
|
||||
// Add EXTRA_FILESYSTEMS env var values to fsStats
|
||||
if extraFilesystems, exists := os.LookupEnv("EXTRA_FILESYSTEMS"); exists {
|
||||
for _, fs := range strings.Split(extraFilesystems, ",") {
|
||||
found := false
|
||||
for _, p := range partitions {
|
||||
if strings.HasSuffix(p.Device, fs) || p.Mountpoint == fs {
|
||||
addFsStat(p.Device, p.Mountpoint, false)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// if not in partitions, test if we can get disk usage
|
||||
if !found {
|
||||
if _, err := disk.Usage(fs); err == nil {
|
||||
addFsStat(filepath.Base(fs), fs, false)
|
||||
} else {
|
||||
slog.Error("Invalid filesystem", "name", fs, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process partitions for various mount points
|
||||
for _, p := range partitions {
|
||||
// fmt.Println(p.Device, p.Mountpoint)
|
||||
// Binary root fallback or docker root fallback
|
||||
if !hasRoot && (p.Mountpoint == "/" || (p.Mountpoint == "/etc/hosts" && strings.HasPrefix(p.Device, "/dev") && !strings.Contains(p.Device, "mapper"))) {
|
||||
addFsStat(p.Device, "/", true)
|
||||
hasRoot = true
|
||||
}
|
||||
|
||||
// Check if device is in /extra-filesystems
|
||||
if strings.HasPrefix(p.Mountpoint, efPath) {
|
||||
addFsStat(p.Device, p.Mountpoint, false)
|
||||
}
|
||||
}
|
||||
|
||||
// Check all folders in /extra-filesystems and add them if not already present
|
||||
if folders, err := os.ReadDir(efPath); err == nil {
|
||||
existingMountpoints := make(map[string]bool)
|
||||
for _, stats := range a.fsStats {
|
||||
existingMountpoints[stats.Mountpoint] = true
|
||||
}
|
||||
for _, folder := range folders {
|
||||
if folder.IsDir() {
|
||||
mountpoint := filepath.Join(efPath, folder.Name())
|
||||
slog.Debug("/extra-filesystems", "mountpoint", mountpoint)
|
||||
if !existingMountpoints[mountpoint] {
|
||||
a.fsStats[folder.Name()] = &system.FsStats{Mountpoint: mountpoint}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no root filesystem set, use fallback
|
||||
if !hasRoot {
|
||||
rootDevice := findFallbackIoDevice(filepath.Base(filesystem), diskIoCounters, a.fsStats)
|
||||
slog.Info("Root disk", "mountpoint", "/", "io", rootDevice)
|
||||
a.fsStats[rootDevice] = &system.FsStats{Root: true, Mountpoint: "/"}
|
||||
}
|
||||
|
||||
a.initializeDiskIoStats(diskIoCounters)
|
||||
}
|
||||
|
||||
// Returns the device with the most reads in /proc/diskstats,
|
||||
// or the device specified by the filesystem argument if it exists
|
||||
func findFallbackIoDevice(filesystem string, diskIoCounters map[string]disk.IOCountersStat, fsStats map[string]*system.FsStats) string {
|
||||
var maxReadBytes uint64
|
||||
maxReadDevice := "/"
|
||||
for _, d := range diskIoCounters {
|
||||
if d.Name == filesystem {
|
||||
return d.Name
|
||||
}
|
||||
if d.ReadBytes > maxReadBytes {
|
||||
// don't use if device already exists in fsStats
|
||||
if _, exists := fsStats[d.Name]; !exists {
|
||||
maxReadBytes = d.ReadBytes
|
||||
maxReadDevice = d.Name
|
||||
}
|
||||
}
|
||||
}
|
||||
return maxReadDevice
|
||||
}
|
||||
|
||||
// Sets start values for disk I/O stats.
|
||||
func (a *Agent) initializeDiskIoStats(diskIoCounters map[string]disk.IOCountersStat) {
|
||||
for device, stats := range a.fsStats {
|
||||
// skip if not in diskIoCounters
|
||||
d, exists := diskIoCounters[device]
|
||||
if !exists {
|
||||
slog.Warn("Device not found in diskstats", "name", device)
|
||||
continue
|
||||
}
|
||||
// populate initial values
|
||||
stats.Time = time.Now()
|
||||
stats.TotalRead = d.ReadBytes
|
||||
stats.TotalWrite = d.WriteBytes
|
||||
// add to list of valid io device names
|
||||
a.fsNames = append(a.fsNames, device)
|
||||
}
|
||||
}
|
||||
@@ -1,281 +0,0 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"beszel/internal/entities/container"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/blang/semver"
|
||||
)
|
||||
|
||||
type dockerManager struct {
|
||||
client *http.Client // Client to query Docker API
|
||||
wg sync.WaitGroup // WaitGroup to wait for all goroutines to finish
|
||||
sem chan struct{} // Semaphore to limit concurrent container requests
|
||||
containerStatsMutex sync.RWMutex // Mutex to prevent concurrent access to containerStatsMap
|
||||
apiContainerList *[]container.ApiInfo // List of containers from Docker API
|
||||
containerStatsMap map[string]*container.Stats // Keeps track of container stats
|
||||
validIds map[string]struct{} // Map of valid container ids, used to prune invalid containers from containerStatsMap
|
||||
}
|
||||
|
||||
// Add goroutine to the queue
|
||||
func (d *dockerManager) queue() {
|
||||
d.sem <- struct{}{}
|
||||
d.wg.Add(1)
|
||||
}
|
||||
|
||||
// Remove goroutine from the queue
|
||||
func (d *dockerManager) dequeue() {
|
||||
<-d.sem
|
||||
d.wg.Done()
|
||||
}
|
||||
|
||||
// Returns stats for all running containers
|
||||
func (dm *dockerManager) getDockerStats() ([]*container.Stats, error) {
|
||||
resp, err := dm.client.Get("http://localhost/containers/json")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&dm.apiContainerList); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
containersLength := len(*dm.apiContainerList)
|
||||
|
||||
// store valid ids to clean up old container ids from map
|
||||
if dm.validIds == nil {
|
||||
dm.validIds = make(map[string]struct{}, containersLength)
|
||||
} else {
|
||||
clear(dm.validIds)
|
||||
}
|
||||
|
||||
var failedContainters []container.ApiInfo
|
||||
|
||||
for _, ctr := range *dm.apiContainerList {
|
||||
ctr.IdShort = ctr.Id[:12]
|
||||
dm.validIds[ctr.IdShort] = struct{}{}
|
||||
// check if container is less than 1 minute old (possible restart)
|
||||
// note: can't use Created field because it's not updated on restart
|
||||
if strings.Contains(ctr.Status, "second") {
|
||||
// if so, remove old container data
|
||||
dm.deleteContainerStatsSync(ctr.IdShort)
|
||||
}
|
||||
dm.queue()
|
||||
go func() {
|
||||
defer dm.dequeue()
|
||||
err := dm.updateContainerStats(ctr)
|
||||
if err != nil {
|
||||
dm.containerStatsMutex.Lock()
|
||||
delete(dm.containerStatsMap, ctr.IdShort)
|
||||
failedContainters = append(failedContainters, ctr)
|
||||
dm.containerStatsMutex.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
dm.wg.Wait()
|
||||
|
||||
// retry failed containers separately so we can run them in parallel (docker 24 bug)
|
||||
if len(failedContainters) > 0 {
|
||||
slog.Debug("Retrying failed containers", "count", len(failedContainters))
|
||||
// time.Sleep(time.Millisecond * 1100)
|
||||
for _, ctr := range failedContainters {
|
||||
dm.wg.Add(1)
|
||||
go func() {
|
||||
defer dm.wg.Done()
|
||||
err = dm.updateContainerStats(ctr)
|
||||
if err != nil {
|
||||
slog.Error("Error getting container stats", "err", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
dm.wg.Wait()
|
||||
}
|
||||
|
||||
// populate final stats and remove old / invalid container stats
|
||||
stats := make([]*container.Stats, 0, containersLength)
|
||||
for id, v := range dm.containerStatsMap {
|
||||
if _, exists := dm.validIds[id]; !exists {
|
||||
delete(dm.containerStatsMap, id)
|
||||
} else {
|
||||
stats = append(stats, v)
|
||||
}
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// Updates stats for individual container
|
||||
func (dm *dockerManager) updateContainerStats(ctr container.ApiInfo) error {
|
||||
name := ctr.Names[0][1:]
|
||||
|
||||
resp, err := dm.client.Get("http://localhost/containers/" + ctr.IdShort + "/stats?stream=0&one-shot=1")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
dm.containerStatsMutex.Lock()
|
||||
defer dm.containerStatsMutex.Unlock()
|
||||
|
||||
// add empty values if they doesn't exist in map
|
||||
stats, initialized := dm.containerStatsMap[ctr.IdShort]
|
||||
if !initialized {
|
||||
stats = &container.Stats{Name: name}
|
||||
dm.containerStatsMap[ctr.IdShort] = stats
|
||||
}
|
||||
|
||||
// reset current stats
|
||||
stats.Cpu = 0
|
||||
stats.Mem = 0
|
||||
stats.NetworkSent = 0
|
||||
stats.NetworkRecv = 0
|
||||
|
||||
// docker host container stats response
|
||||
var res container.ApiStats
|
||||
if err := json.NewDecoder(resp.Body).Decode(&res); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check if container has valid data, otherwise may be in restart loop (#103)
|
||||
if res.MemoryStats.Usage == 0 {
|
||||
return fmt.Errorf("%s - no memory stats - see https://github.com/henrygd/beszel/issues/144", name)
|
||||
}
|
||||
|
||||
// memory (https://docs.docker.com/reference/cli/docker/container/stats/)
|
||||
memCache := res.MemoryStats.Stats.InactiveFile
|
||||
if memCache == 0 {
|
||||
memCache = res.MemoryStats.Stats.Cache
|
||||
}
|
||||
usedMemory := res.MemoryStats.Usage - memCache
|
||||
|
||||
// cpu
|
||||
cpuDelta := res.CPUStats.CPUUsage.TotalUsage - stats.PrevCpu[0]
|
||||
systemDelta := res.CPUStats.SystemUsage - stats.PrevCpu[1]
|
||||
cpuPct := float64(cpuDelta) / float64(systemDelta) * 100
|
||||
if cpuPct > 100 {
|
||||
return fmt.Errorf("%s cpu pct greater than 100: %+v", name, cpuPct)
|
||||
}
|
||||
stats.PrevCpu = [2]uint64{res.CPUStats.CPUUsage.TotalUsage, res.CPUStats.SystemUsage}
|
||||
|
||||
// network
|
||||
var total_sent, total_recv uint64
|
||||
for _, v := range res.Networks {
|
||||
total_sent += v.TxBytes
|
||||
total_recv += v.RxBytes
|
||||
}
|
||||
var sent_delta, recv_delta float64
|
||||
// prevent first run from sending all prev sent/recv bytes
|
||||
if initialized {
|
||||
secondsElapsed := time.Since(stats.PrevNet.Time).Seconds()
|
||||
sent_delta = float64(total_sent-stats.PrevNet.Sent) / secondsElapsed
|
||||
recv_delta = float64(total_recv-stats.PrevNet.Recv) / secondsElapsed
|
||||
}
|
||||
stats.PrevNet.Sent = total_sent
|
||||
stats.PrevNet.Recv = total_recv
|
||||
stats.PrevNet.Time = time.Now()
|
||||
|
||||
stats.Cpu = twoDecimals(cpuPct)
|
||||
stats.Mem = bytesToMegabytes(float64(usedMemory))
|
||||
stats.NetworkSent = bytesToMegabytes(sent_delta)
|
||||
stats.NetworkRecv = bytesToMegabytes(recv_delta)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete container stats from map using mutex
|
||||
func (dm *dockerManager) deleteContainerStatsSync(id string) {
|
||||
dm.containerStatsMutex.Lock()
|
||||
defer dm.containerStatsMutex.Unlock()
|
||||
delete(dm.containerStatsMap, id)
|
||||
}
|
||||
|
||||
// Creates a new http client for Docker API
|
||||
func newDockerManager() *dockerManager {
|
||||
dockerHost := "unix:///var/run/docker.sock"
|
||||
if dockerHostEnv, exists := os.LookupEnv("DOCKER_HOST"); exists {
|
||||
slog.Info("DOCKER_HOST", "host", dockerHostEnv)
|
||||
dockerHost = dockerHostEnv
|
||||
}
|
||||
|
||||
parsedURL, err := url.Parse(dockerHost)
|
||||
if err != nil {
|
||||
slog.Error("Error parsing DOCKER_HOST", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
transport := &http.Transport{
|
||||
DisableCompression: true,
|
||||
MaxConnsPerHost: 0,
|
||||
}
|
||||
|
||||
switch parsedURL.Scheme {
|
||||
case "unix":
|
||||
transport.DialContext = func(ctx context.Context, proto, addr string) (net.Conn, error) {
|
||||
return (&net.Dialer{}).DialContext(ctx, "unix", parsedURL.Path)
|
||||
}
|
||||
case "tcp", "http", "https":
|
||||
transport.DialContext = func(ctx context.Context, proto, addr string) (net.Conn, error) {
|
||||
return (&net.Dialer{}).DialContext(ctx, "tcp", parsedURL.Host)
|
||||
}
|
||||
default:
|
||||
slog.Error("Invalid DOCKER_HOST", "scheme", parsedURL.Scheme)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// configurable timeout
|
||||
timeout := time.Millisecond * 2100
|
||||
if t, set := os.LookupEnv("DOCKER_TIMEOUT"); set {
|
||||
timeout, err = time.ParseDuration(t)
|
||||
if err != nil {
|
||||
slog.Error(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
slog.Info("DOCKER_TIMEOUT", "timeout", timeout)
|
||||
}
|
||||
|
||||
dockerClient := &dockerManager{
|
||||
client: &http.Client{
|
||||
Timeout: timeout,
|
||||
Transport: transport,
|
||||
},
|
||||
containerStatsMap: make(map[string]*container.Stats),
|
||||
}
|
||||
|
||||
// Make sure sem is initialized
|
||||
concurrency := 200
|
||||
defer func() { dockerClient.sem = make(chan struct{}, concurrency) }()
|
||||
|
||||
// Check docker version
|
||||
// (versions before 25.0.0 have a bug with one-shot which requires all requests to be made in one batch)
|
||||
var versionInfo struct {
|
||||
Version string `json:"Version"`
|
||||
}
|
||||
resp, err := dockerClient.client.Get("http://localhost/version")
|
||||
if err != nil {
|
||||
return dockerClient
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&versionInfo); err != nil {
|
||||
return dockerClient
|
||||
}
|
||||
|
||||
// if version > 24, one-shot works correctly and we can limit concurrent operations
|
||||
if dockerVersion, err := semver.Parse(versionInfo.Version); err == nil && dockerVersion.Major > 24 {
|
||||
concurrency = 5
|
||||
}
|
||||
slog.Debug("Docker", "version", versionInfo.Version, "concurrency", concurrency)
|
||||
|
||||
return dockerClient
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
psutilNet "github.com/shirou/gopsutil/v4/net"
|
||||
)
|
||||
|
||||
func (a *Agent) initializeNetIoStats() {
|
||||
// reset valid network interfaces
|
||||
a.netInterfaces = make(map[string]struct{}, 0)
|
||||
|
||||
// map of network interface names passed in via NICS env var
|
||||
var nicsMap map[string]struct{}
|
||||
nics, nicsEnvExists := os.LookupEnv("NICS")
|
||||
if nicsEnvExists {
|
||||
nicsMap = make(map[string]struct{}, 0)
|
||||
for _, nic := range strings.Split(nics, ",") {
|
||||
nicsMap[nic] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// reset network I/O stats
|
||||
a.netIoStats.BytesSent = 0
|
||||
a.netIoStats.BytesRecv = 0
|
||||
|
||||
// get intial network I/O stats
|
||||
if netIO, err := psutilNet.IOCounters(true); err == nil {
|
||||
a.netIoStats.Time = time.Now()
|
||||
for _, v := range netIO {
|
||||
switch {
|
||||
// skip if nics exists and the interface is not in the list
|
||||
case nicsEnvExists:
|
||||
if _, nameInNics := nicsMap[v.Name]; !nameInNics {
|
||||
continue
|
||||
}
|
||||
// otherwise run the interface name through the skipNetworkInterface function
|
||||
default:
|
||||
if a.skipNetworkInterface(v) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
slog.Info("Detected network interface", "name", v.Name, "sent", v.BytesSent, "recv", v.BytesRecv)
|
||||
a.netIoStats.BytesSent += v.BytesSent
|
||||
a.netIoStats.BytesRecv += v.BytesRecv
|
||||
// store as a valid network interface
|
||||
a.netInterfaces[v.Name] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Agent) skipNetworkInterface(v psutilNet.IOCountersStat) bool {
|
||||
switch {
|
||||
case strings.HasPrefix(v.Name, "lo"),
|
||||
strings.HasPrefix(v.Name, "docker"),
|
||||
strings.HasPrefix(v.Name, "br-"),
|
||||
strings.HasPrefix(v.Name, "veth"),
|
||||
v.BytesRecv == 0,
|
||||
v.BytesSent == 0:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
sshServer "github.com/gliderlabs/ssh"
|
||||
)
|
||||
|
||||
func (a *Agent) startServer(pubKey []byte, addr string) {
|
||||
sshServer.Handle(a.handleSession)
|
||||
|
||||
slog.Info("Starting SSH server", "address", addr)
|
||||
if err := sshServer.ListenAndServe(addr, nil, sshServer.NoPty(),
|
||||
sshServer.PublicKeyAuth(func(ctx sshServer.Context, key sshServer.PublicKey) bool {
|
||||
allowed, _, _, _, _ := sshServer.ParseAuthorizedKey(pubKey)
|
||||
return sshServer.KeysEqual(key, allowed)
|
||||
}),
|
||||
); err != nil {
|
||||
slog.Error("Error starting SSH server", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Agent) handleSession(s sshServer.Session) {
|
||||
stats := a.gatherStats()
|
||||
if err := json.NewEncoder(s).Encode(stats); err != nil {
|
||||
slog.Error("Error encoding stats", "err", err)
|
||||
s.Exit(1)
|
||||
return
|
||||
}
|
||||
s.Exit(0)
|
||||
}
|
||||
@@ -1,244 +0,0 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"beszel"
|
||||
"beszel/internal/entities/system"
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/shirou/gopsutil/v4/cpu"
|
||||
"github.com/shirou/gopsutil/v4/disk"
|
||||
"github.com/shirou/gopsutil/v4/host"
|
||||
"github.com/shirou/gopsutil/v4/mem"
|
||||
psutilNet "github.com/shirou/gopsutil/v4/net"
|
||||
"github.com/shirou/gopsutil/v4/sensors"
|
||||
)
|
||||
|
||||
// Sets initial / non-changing values about the host system
|
||||
func (a *Agent) initializeSystemInfo() {
|
||||
a.systemInfo.AgentVersion = beszel.Version
|
||||
a.systemInfo.Hostname, _ = os.Hostname()
|
||||
a.systemInfo.KernelVersion, _ = host.KernelVersion()
|
||||
|
||||
// cpu model
|
||||
if info, err := cpu.Info(); err == nil && len(info) > 0 {
|
||||
a.systemInfo.CpuModel = info[0].ModelName
|
||||
}
|
||||
// cores / threads
|
||||
a.systemInfo.Cores, _ = cpu.Counts(false)
|
||||
if threads, err := cpu.Counts(true); err == nil {
|
||||
if threads > 0 && threads < a.systemInfo.Cores {
|
||||
// in lxc logical cores reflects container limits, so use that as cores if lower
|
||||
a.systemInfo.Cores = threads
|
||||
} else {
|
||||
a.systemInfo.Threads = threads
|
||||
}
|
||||
}
|
||||
|
||||
// zfs
|
||||
if _, err := getARCSize(); err == nil {
|
||||
a.zfs = true
|
||||
} else {
|
||||
slog.Debug("Not monitoring ZFS ARC", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns current info, stats about the host system
|
||||
func (a *Agent) getSystemStats() system.Stats {
|
||||
systemStats := system.Stats{}
|
||||
|
||||
// cpu percent
|
||||
cpuPct, err := cpu.Percent(0, false)
|
||||
if err != nil {
|
||||
slog.Error("Error getting cpu percent", "err", err)
|
||||
} else if len(cpuPct) > 0 {
|
||||
systemStats.Cpu = twoDecimals(cpuPct[0])
|
||||
}
|
||||
|
||||
// memory
|
||||
if v, err := mem.VirtualMemory(); err == nil {
|
||||
// swap
|
||||
systemStats.Swap = bytesToGigabytes(v.SwapTotal)
|
||||
systemStats.SwapUsed = bytesToGigabytes(v.SwapTotal - v.SwapFree - v.SwapCached)
|
||||
// cache + buffers value for default mem calculation
|
||||
cacheBuff := v.Total - v.Free - v.Used
|
||||
// htop memory calculation overrides
|
||||
if a.memCalc == "htop" {
|
||||
// note: gopsutil automatically adds SReclaimable to v.Cached
|
||||
cacheBuff = v.Cached + v.Buffers - v.Shared
|
||||
v.Used = v.Total - (v.Free + cacheBuff)
|
||||
v.UsedPercent = float64(v.Used) / float64(v.Total) * 100.0
|
||||
}
|
||||
// subtract ZFS ARC size from used memory and add as its own category
|
||||
if a.zfs {
|
||||
if arcSize, _ := getARCSize(); arcSize > 0 && arcSize < v.Used {
|
||||
v.Used = v.Used - arcSize
|
||||
v.UsedPercent = float64(v.Used) / float64(v.Total) * 100.0
|
||||
systemStats.MemZfsArc = bytesToGigabytes(arcSize)
|
||||
}
|
||||
}
|
||||
systemStats.Mem = bytesToGigabytes(v.Total)
|
||||
systemStats.MemBuffCache = bytesToGigabytes(cacheBuff)
|
||||
systemStats.MemUsed = bytesToGigabytes(v.Used)
|
||||
systemStats.MemPct = twoDecimals(v.UsedPercent)
|
||||
}
|
||||
|
||||
// disk usage
|
||||
for _, stats := range a.fsStats {
|
||||
if d, err := disk.Usage(stats.Mountpoint); err == nil {
|
||||
stats.DiskTotal = bytesToGigabytes(d.Total)
|
||||
stats.DiskUsed = bytesToGigabytes(d.Used)
|
||||
if stats.Root {
|
||||
systemStats.DiskTotal = bytesToGigabytes(d.Total)
|
||||
systemStats.DiskUsed = bytesToGigabytes(d.Used)
|
||||
systemStats.DiskPct = twoDecimals(d.UsedPercent)
|
||||
}
|
||||
} else {
|
||||
// reset stats if error (likely unmounted)
|
||||
slog.Error("Error getting disk stats", "name", stats.Mountpoint, "err", err)
|
||||
stats.DiskTotal = 0
|
||||
stats.DiskUsed = 0
|
||||
stats.TotalRead = 0
|
||||
stats.TotalWrite = 0
|
||||
}
|
||||
}
|
||||
|
||||
// disk i/o
|
||||
if ioCounters, err := disk.IOCounters(a.fsNames...); err == nil {
|
||||
for _, d := range ioCounters {
|
||||
stats := a.fsStats[d.Name]
|
||||
if stats == nil {
|
||||
continue
|
||||
}
|
||||
secondsElapsed := time.Since(stats.Time).Seconds()
|
||||
readPerSecond := float64(d.ReadBytes-stats.TotalRead) / secondsElapsed
|
||||
writePerSecond := float64(d.WriteBytes-stats.TotalWrite) / secondsElapsed
|
||||
stats.Time = time.Now()
|
||||
stats.DiskReadPs = bytesToMegabytes(readPerSecond)
|
||||
stats.DiskWritePs = bytesToMegabytes(writePerSecond)
|
||||
stats.TotalRead = d.ReadBytes
|
||||
stats.TotalWrite = d.WriteBytes
|
||||
// if root filesystem, update system stats
|
||||
if stats.Root {
|
||||
systemStats.DiskReadPs = stats.DiskReadPs
|
||||
systemStats.DiskWritePs = stats.DiskWritePs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// network stats
|
||||
if netIO, err := psutilNet.IOCounters(true); err == nil {
|
||||
secondsElapsed := time.Since(a.netIoStats.Time).Seconds()
|
||||
a.netIoStats.Time = time.Now()
|
||||
bytesSent := uint64(0)
|
||||
bytesRecv := uint64(0)
|
||||
// sum all bytes sent and received
|
||||
for _, v := range netIO {
|
||||
// skip if not in valid network interfaces list
|
||||
if _, exists := a.netInterfaces[v.Name]; !exists {
|
||||
continue
|
||||
}
|
||||
bytesSent += v.BytesSent
|
||||
bytesRecv += v.BytesRecv
|
||||
}
|
||||
// add to systemStats
|
||||
sentPerSecond := float64(bytesSent-a.netIoStats.BytesSent) / secondsElapsed
|
||||
recvPerSecond := float64(bytesRecv-a.netIoStats.BytesRecv) / secondsElapsed
|
||||
networkSentPs := bytesToMegabytes(sentPerSecond)
|
||||
networkRecvPs := bytesToMegabytes(recvPerSecond)
|
||||
// add check for issue (#150) where sent is a massive number
|
||||
if networkSentPs > 10_000 || networkRecvPs > 10_000 {
|
||||
slog.Warn("Invalid network stats. Resetting.", "sent", networkSentPs, "recv", networkRecvPs)
|
||||
for _, v := range netIO {
|
||||
if _, exists := a.netInterfaces[v.Name]; !exists {
|
||||
continue
|
||||
}
|
||||
slog.Info(v.Name, "recv", v.BytesRecv, "sent", v.BytesSent)
|
||||
}
|
||||
// reset network I/O stats
|
||||
a.initializeNetIoStats()
|
||||
} else {
|
||||
systemStats.NetworkSent = networkSentPs
|
||||
systemStats.NetworkRecv = networkRecvPs
|
||||
// update netIoStats
|
||||
a.netIoStats.BytesSent = bytesSent
|
||||
a.netIoStats.BytesRecv = bytesRecv
|
||||
}
|
||||
}
|
||||
|
||||
// temperatures (skip if sensors whitelist is set to empty string)
|
||||
if a.sensorsWhitelist != nil && len(a.sensorsWhitelist) == 0 {
|
||||
slog.Debug("Skipping temperature collection")
|
||||
} else {
|
||||
temps, err := sensors.TemperaturesWithContext(a.sensorsContext)
|
||||
if err != nil {
|
||||
slog.Debug("Sensor error", "err", err)
|
||||
}
|
||||
slog.Debug("Temperature", "sensors", temps)
|
||||
if len(temps) > 0 {
|
||||
systemStats.Temperatures = make(map[string]float64, len(temps))
|
||||
for i, sensor := range temps {
|
||||
// skip if temperature is 0
|
||||
if sensor.Temperature <= 0 || sensor.Temperature >= 200 {
|
||||
continue
|
||||
}
|
||||
if _, ok := systemStats.Temperatures[sensor.SensorKey]; ok {
|
||||
// if key already exists, append int to key
|
||||
systemStats.Temperatures[sensor.SensorKey+"_"+strconv.Itoa(i)] = twoDecimals(sensor.Temperature)
|
||||
} else {
|
||||
systemStats.Temperatures[sensor.SensorKey] = twoDecimals(sensor.Temperature)
|
||||
}
|
||||
}
|
||||
// remove sensors from systemStats if whitelist exists and sensor is not in whitelist
|
||||
// (do this here instead of in initial loop so we have correct keys if int was appended)
|
||||
if a.sensorsWhitelist != nil {
|
||||
for key := range systemStats.Temperatures {
|
||||
if _, nameInWhitelist := a.sensorsWhitelist[key]; !nameInWhitelist {
|
||||
delete(systemStats.Temperatures, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// update base system info
|
||||
a.systemInfo.Cpu = systemStats.Cpu
|
||||
a.systemInfo.MemPct = systemStats.MemPct
|
||||
a.systemInfo.DiskPct = systemStats.DiskPct
|
||||
a.systemInfo.Uptime, _ = host.Uptime()
|
||||
a.systemInfo.Bandwidth = twoDecimals(systemStats.NetworkSent + systemStats.NetworkRecv)
|
||||
slog.Debug("sysinfo", "data", a.systemInfo)
|
||||
|
||||
return systemStats
|
||||
}
|
||||
|
||||
// Returns the size of the ZFS ARC memory cache in bytes
|
||||
func getARCSize() (uint64, error) {
|
||||
file, err := os.Open("/proc/spl/kstat/zfs/arcstats")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Scan the lines
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.HasPrefix(line, "size") {
|
||||
// Example line: size 4 15032385536
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < 3 {
|
||||
return 0, err
|
||||
}
|
||||
// Return the size as uint64
|
||||
return strconv.ParseUint(fields[2], 10, 64)
|
||||
}
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("failed to parse size field")
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"beszel"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/rhysd/go-github-selfupdate/selfupdate"
|
||||
)
|
||||
|
||||
// Update updates beszel-agent to the latest version
|
||||
func Update() {
|
||||
var latest *selfupdate.Release
|
||||
var found bool
|
||||
var err error
|
||||
currentVersion := semver.MustParse(beszel.Version)
|
||||
fmt.Println("beszel-agent", currentVersion)
|
||||
fmt.Println("Checking for updates...")
|
||||
updater, _ := selfupdate.NewUpdater(selfupdate.Config{
|
||||
Filters: []string{"beszel-agent"},
|
||||
})
|
||||
latest, found, err = updater.DetectLatest("henrygd/beszel")
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("Error checking for updates:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if !found {
|
||||
fmt.Println("No updates found")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
fmt.Println("Latest version:", latest.Version)
|
||||
|
||||
if latest.Version.LTE(currentVersion) {
|
||||
fmt.Println("You are up to date")
|
||||
return
|
||||
}
|
||||
|
||||
var binaryPath string
|
||||
fmt.Printf("Updating from %s to %s...\n", currentVersion, latest.Version)
|
||||
binaryPath, err = os.Executable()
|
||||
if err != nil {
|
||||
fmt.Println("Error getting binary path:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = selfupdate.UpdateTo(latest.AssetURL, binaryPath)
|
||||
if err != nil {
|
||||
fmt.Println("Please try rerunning with sudo. Error:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Successfully updated to %s\n\n%s\n", latest.Version, strings.TrimSpace(latest.ReleaseNotes))
|
||||
}
|
||||
@@ -1,529 +0,0 @@
|
||||
// Package alerts handles alert management and delivery.
|
||||
package alerts
|
||||
|
||||
import (
|
||||
"beszel/internal/entities/system"
|
||||
"fmt"
|
||||
"net/mail"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containrrr/shoutrrr"
|
||||
"github.com/goccy/go-json"
|
||||
"github.com/labstack/echo/v5"
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase"
|
||||
"github.com/pocketbase/pocketbase/apis"
|
||||
"github.com/pocketbase/pocketbase/models"
|
||||
"github.com/pocketbase/pocketbase/tools/mailer"
|
||||
"github.com/pocketbase/pocketbase/tools/types"
|
||||
"github.com/spf13/cast"
|
||||
)
|
||||
|
||||
type AlertManager struct {
|
||||
app *pocketbase.PocketBase
|
||||
}
|
||||
|
||||
type AlertMessageData struct {
|
||||
UserID string
|
||||
Title string
|
||||
Message string
|
||||
Link string
|
||||
LinkText string
|
||||
}
|
||||
|
||||
type UserNotificationSettings struct {
|
||||
Emails []string `json:"emails"`
|
||||
Webhooks []string `json:"webhooks"`
|
||||
}
|
||||
|
||||
type SystemAlertStats struct {
|
||||
Cpu float64 `json:"cpu"`
|
||||
Mem float64 `json:"mp"`
|
||||
Disk float64 `json:"dp"`
|
||||
NetSent float64 `json:"ns"`
|
||||
NetRecv float64 `json:"nr"`
|
||||
Temperatures map[string]float32 `json:"t"`
|
||||
}
|
||||
|
||||
type SystemAlertData struct {
|
||||
systemRecord *models.Record
|
||||
alertRecord *models.Record
|
||||
name string
|
||||
unit string
|
||||
val float64
|
||||
threshold float64
|
||||
triggered bool
|
||||
time time.Time
|
||||
count uint8
|
||||
min uint8
|
||||
mapSums map[string]float32
|
||||
descriptor string // override descriptor in notification body (for temp sensor, disk partition, etc)
|
||||
}
|
||||
|
||||
func NewAlertManager(app *pocketbase.PocketBase) *AlertManager {
|
||||
return &AlertManager{
|
||||
app: app,
|
||||
}
|
||||
}
|
||||
|
||||
func (am *AlertManager) HandleSystemAlerts(systemRecord *models.Record, systemInfo system.Info, temperatures map[string]float64, extraFs map[string]*system.FsStats) error {
|
||||
// start := time.Now()
|
||||
// defer func() {
|
||||
// log.Println("alert stats took", time.Since(start))
|
||||
// }()
|
||||
alertRecords, err := am.app.Dao().FindRecordsByExpr("alerts",
|
||||
dbx.NewExp("system={:system}", dbx.Params{"system": systemRecord.Id}),
|
||||
)
|
||||
if err != nil || len(alertRecords) == 0 {
|
||||
// log.Println("no alerts found for system")
|
||||
return nil
|
||||
}
|
||||
|
||||
var validAlerts []SystemAlertData
|
||||
now := systemRecord.Updated.Time().UTC()
|
||||
oldestTime := now
|
||||
|
||||
for _, alertRecord := range alertRecords {
|
||||
name := alertRecord.GetString("name")
|
||||
var val float64
|
||||
unit := "%"
|
||||
|
||||
switch name {
|
||||
case "CPU":
|
||||
val = systemInfo.Cpu
|
||||
case "Memory":
|
||||
val = systemInfo.MemPct
|
||||
case "Bandwidth":
|
||||
val = systemInfo.Bandwidth
|
||||
unit = " MB/s"
|
||||
case "Disk":
|
||||
maxUsedPct := systemInfo.DiskPct
|
||||
for _, fs := range extraFs {
|
||||
usedPct := fs.DiskUsed / fs.DiskTotal * 100
|
||||
if usedPct > maxUsedPct {
|
||||
maxUsedPct = usedPct
|
||||
}
|
||||
}
|
||||
val = maxUsedPct
|
||||
case "Temperature":
|
||||
if temperatures == nil {
|
||||
continue
|
||||
}
|
||||
for _, temp := range temperatures {
|
||||
if temp > val {
|
||||
val = temp
|
||||
}
|
||||
}
|
||||
unit = "°C"
|
||||
}
|
||||
|
||||
triggered := alertRecord.GetBool("triggered")
|
||||
threshold := alertRecord.GetFloat("value")
|
||||
|
||||
// CONTINUE
|
||||
// IF alert is not triggered and curValue is less than threshold
|
||||
// OR alert is triggered and curValue is greater than threshold
|
||||
if (!triggered && val <= threshold) || (triggered && val > threshold) {
|
||||
// log.Printf("Skipping alert %s: val %f | threshold %f | triggered %v\n", name, val, threshold, triggered)
|
||||
continue
|
||||
}
|
||||
|
||||
min := max(1, cast.ToUint8(alertRecord.Get("min")))
|
||||
// add time to alert time to make sure it's slighty after record creation
|
||||
time := now.Add(-time.Duration(min) * time.Minute)
|
||||
if time.Before(oldestTime) {
|
||||
oldestTime = time
|
||||
}
|
||||
|
||||
validAlerts = append(validAlerts, SystemAlertData{
|
||||
systemRecord: systemRecord,
|
||||
alertRecord: alertRecord,
|
||||
name: name,
|
||||
unit: unit,
|
||||
val: val,
|
||||
threshold: threshold,
|
||||
triggered: triggered,
|
||||
time: time,
|
||||
min: min,
|
||||
})
|
||||
}
|
||||
|
||||
systemStats := []struct {
|
||||
Stats []byte `db:"stats"`
|
||||
Created types.DateTime `db:"created"`
|
||||
}{}
|
||||
|
||||
err = am.app.Dao().DB().
|
||||
Select("stats", "created").
|
||||
From("system_stats").
|
||||
Where(dbx.NewExp(
|
||||
"system={:system} AND type='1m' AND created > {:created}",
|
||||
dbx.Params{
|
||||
"system": systemRecord.Id,
|
||||
// subtract some time to give us a bit of buffer
|
||||
"created": oldestTime.Add(-time.Second * 90),
|
||||
},
|
||||
)).
|
||||
OrderBy("created").
|
||||
All(&systemStats)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// get oldest record creation time from first record in the slice
|
||||
oldestRecordTime := systemStats[0].Created.Time()
|
||||
// log.Println("oldestRecordTime", oldestRecordTime.String())
|
||||
|
||||
// delete from validAlerts if time is older than oldestRecord
|
||||
for i := 0; i < len(validAlerts); i++ {
|
||||
if validAlerts[i].time.Before(oldestRecordTime) {
|
||||
// log.Println("deleting alert - time is older than oldestRecord", validAlerts[i].name, oldestRecordTime, validAlerts[i].time)
|
||||
validAlerts = append(validAlerts[:i], validAlerts[i+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
if len(validAlerts) == 0 {
|
||||
// log.Println("no valid alerts found")
|
||||
return nil
|
||||
}
|
||||
|
||||
var stats SystemAlertStats
|
||||
|
||||
// we can skip the latest systemStats record since it's the current value
|
||||
for i := 0; i < len(systemStats); i++ {
|
||||
stat := systemStats[i]
|
||||
// subtract 10 seconds to give a small time buffer
|
||||
systemStatsCreation := stat.Created.Time().Add(-time.Second * 10)
|
||||
if err := json.Unmarshal(stat.Stats, &stats); err != nil {
|
||||
return err
|
||||
}
|
||||
// log.Println("stats", stats)
|
||||
for j := range validAlerts {
|
||||
alert := &validAlerts[j]
|
||||
// reset alert val on first iteration
|
||||
if i == 0 {
|
||||
alert.val = 0
|
||||
}
|
||||
// continue if system_stats is older than alert time range
|
||||
if systemStatsCreation.Before(alert.time) {
|
||||
continue
|
||||
}
|
||||
// add to alert value
|
||||
switch alert.name {
|
||||
case "CPU":
|
||||
alert.val += stats.Cpu
|
||||
case "Memory":
|
||||
alert.val += stats.Mem
|
||||
case "Bandwidth":
|
||||
alert.val += stats.NetSent + stats.NetRecv
|
||||
case "Disk":
|
||||
if alert.mapSums == nil {
|
||||
alert.mapSums = make(map[string]float32, len(extraFs)+1)
|
||||
}
|
||||
// add root disk
|
||||
if _, ok := alert.mapSums["root"]; !ok {
|
||||
alert.mapSums["root"] = 0.0
|
||||
}
|
||||
alert.mapSums["root"] += float32(stats.Disk)
|
||||
// add extra disks
|
||||
for key, fs := range extraFs {
|
||||
if _, ok := alert.mapSums[key]; !ok {
|
||||
alert.mapSums[key] = 0.0
|
||||
}
|
||||
alert.mapSums[key] += float32(fs.DiskUsed / fs.DiskTotal * 100)
|
||||
}
|
||||
case "Temperature":
|
||||
if alert.mapSums == nil {
|
||||
alert.mapSums = make(map[string]float32, len(stats.Temperatures))
|
||||
}
|
||||
for key, temp := range stats.Temperatures {
|
||||
if _, ok := alert.mapSums[key]; !ok {
|
||||
alert.mapSums[key] = float32(0)
|
||||
}
|
||||
alert.mapSums[key] += temp
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
alert.count++
|
||||
}
|
||||
}
|
||||
// sum up vals for each alert
|
||||
for _, alert := range validAlerts {
|
||||
switch alert.name {
|
||||
case "Disk":
|
||||
maxPct := float32(0)
|
||||
for key, value := range alert.mapSums {
|
||||
sumPct := float32(value)
|
||||
if sumPct > maxPct {
|
||||
maxPct = sumPct
|
||||
alert.descriptor = fmt.Sprintf("Usage of %s", key)
|
||||
}
|
||||
}
|
||||
alert.val = float64(maxPct / float32(alert.count))
|
||||
case "Temperature":
|
||||
maxTemp := float32(0)
|
||||
for key, value := range alert.mapSums {
|
||||
sumTemp := float32(value) / float32(alert.count)
|
||||
if sumTemp > maxTemp {
|
||||
maxTemp = sumTemp
|
||||
alert.descriptor = fmt.Sprintf("Highest sensor %s", key)
|
||||
}
|
||||
}
|
||||
alert.val = float64(maxTemp)
|
||||
default:
|
||||
alert.val = alert.val / float64(alert.count)
|
||||
}
|
||||
minCount := float32(alert.min) / 1.2
|
||||
// log.Println("alert", alert.name, "val", alert.val, "threshold", alert.threshold, "triggered", alert.triggered)
|
||||
// log.Printf("%s: val %f | count %d | min-count %f | threshold %f\n", alert.name, alert.val, alert.count, minCount, alert.threshold)
|
||||
// pass through alert if count is greater than or equal to minCount
|
||||
if float32(alert.count) >= minCount {
|
||||
if !alert.triggered && alert.val > alert.threshold {
|
||||
alert.triggered = true
|
||||
go am.sendSystemAlert(alert)
|
||||
} else if alert.triggered && alert.val <= alert.threshold {
|
||||
alert.triggered = false
|
||||
go am.sendSystemAlert(alert)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (am *AlertManager) sendSystemAlert(alert SystemAlertData) {
|
||||
// log.Printf("Sending alert %s: val %f | count %d | threshold %f\n", alert.name, alert.val, alert.count, alert.threshold)
|
||||
systemName := alert.systemRecord.GetString("name")
|
||||
|
||||
// change Disk to Disk usage
|
||||
if alert.name == "Disk" {
|
||||
alert.name += " usage"
|
||||
}
|
||||
|
||||
// make title alert name lowercase if not CPU
|
||||
titleAlertName := alert.name
|
||||
if titleAlertName != "CPU" {
|
||||
titleAlertName = strings.ToLower(titleAlertName)
|
||||
}
|
||||
|
||||
var subject string
|
||||
if alert.triggered {
|
||||
subject = fmt.Sprintf("%s %s above threshold", systemName, titleAlertName)
|
||||
} else {
|
||||
subject = fmt.Sprintf("%s %s below threshold", systemName, titleAlertName)
|
||||
}
|
||||
minutesLabel := "minute"
|
||||
if alert.min > 1 {
|
||||
minutesLabel += "s"
|
||||
}
|
||||
if alert.descriptor == "" {
|
||||
alert.descriptor = alert.name
|
||||
}
|
||||
body := fmt.Sprintf("%s averaged %.2f%s for the previous %v %s.", alert.descriptor, alert.val, alert.unit, alert.min, minutesLabel)
|
||||
|
||||
alert.alertRecord.Set("triggered", alert.triggered)
|
||||
if err := am.app.Dao().SaveRecord(alert.alertRecord); err != nil {
|
||||
// app.Logger().Error("failed to save alert record", "err", err.Error())
|
||||
return
|
||||
}
|
||||
// expand the user relation and send the alert
|
||||
if errs := am.app.Dao().ExpandRecord(alert.alertRecord, []string{"user"}, nil); len(errs) > 0 {
|
||||
// app.Logger().Error("failed to expand user relation", "errs", errs)
|
||||
return
|
||||
}
|
||||
if user := alert.alertRecord.ExpandedOne("user"); user != nil {
|
||||
am.sendAlert(AlertMessageData{
|
||||
UserID: user.GetId(),
|
||||
Title: subject,
|
||||
Message: body,
|
||||
Link: am.app.Settings().Meta.AppUrl + "/system/" + url.PathEscape(systemName),
|
||||
LinkText: "View " + systemName,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (am *AlertManager) HandleStatusAlerts(newStatus string, oldSystemRecord *models.Record) error {
|
||||
var alertStatus string
|
||||
switch newStatus {
|
||||
case "up":
|
||||
if oldSystemRecord.GetString("status") == "down" {
|
||||
alertStatus = "up"
|
||||
}
|
||||
case "down":
|
||||
if oldSystemRecord.GetString("status") == "up" {
|
||||
alertStatus = "down"
|
||||
}
|
||||
}
|
||||
if alertStatus == "" {
|
||||
return nil
|
||||
}
|
||||
// check if use
|
||||
alertRecords, err := am.app.Dao().FindRecordsByExpr("alerts",
|
||||
dbx.HashExp{
|
||||
"system": oldSystemRecord.GetId(),
|
||||
"name": "Status",
|
||||
},
|
||||
)
|
||||
if err != nil || len(alertRecords) == 0 {
|
||||
// log.Println("no alerts found for system")
|
||||
return nil
|
||||
}
|
||||
for _, alertRecord := range alertRecords {
|
||||
// expand the user relation
|
||||
if errs := am.app.Dao().ExpandRecord(alertRecord, []string{"user"}, nil); len(errs) > 0 {
|
||||
return fmt.Errorf("failed to expand: %v", errs)
|
||||
}
|
||||
user := alertRecord.ExpandedOne("user")
|
||||
if user == nil {
|
||||
return nil
|
||||
}
|
||||
emoji := "\U0001F534"
|
||||
if alertStatus == "up" {
|
||||
emoji = "\u2705"
|
||||
}
|
||||
// send alert
|
||||
systemName := oldSystemRecord.GetString("name")
|
||||
am.sendAlert(AlertMessageData{
|
||||
UserID: user.GetId(),
|
||||
Title: fmt.Sprintf("Connection to %s is %s %v", systemName, alertStatus, emoji),
|
||||
Message: fmt.Sprintf("Connection to %s is %s", systemName, alertStatus),
|
||||
Link: am.app.Settings().Meta.AppUrl + "/system/" + url.PathEscape(systemName),
|
||||
LinkText: "View " + systemName,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (am *AlertManager) sendAlert(data AlertMessageData) {
|
||||
// get user settings
|
||||
record, err := am.app.Dao().FindFirstRecordByFilter(
|
||||
"user_settings", "user={:user}",
|
||||
dbx.Params{"user": data.UserID},
|
||||
)
|
||||
if err != nil {
|
||||
am.app.Logger().Error("Failed to get user settings", "err", err.Error())
|
||||
return
|
||||
}
|
||||
// unmarshal user settings
|
||||
userAlertSettings := UserNotificationSettings{
|
||||
Emails: []string{},
|
||||
Webhooks: []string{},
|
||||
}
|
||||
if err := record.UnmarshalJSONField("settings", &userAlertSettings); err != nil {
|
||||
am.app.Logger().Error("Failed to unmarshal user settings", "err", err.Error())
|
||||
}
|
||||
// send alerts via webhooks
|
||||
for _, webhook := range userAlertSettings.Webhooks {
|
||||
if err := am.SendShoutrrrAlert(webhook, data.Title, data.Message, data.Link, data.LinkText); err != nil {
|
||||
am.app.Logger().Error("Failed to send shoutrrr alert", "err", err.Error())
|
||||
}
|
||||
}
|
||||
// send alerts via email
|
||||
if len(userAlertSettings.Emails) == 0 {
|
||||
// log.Println("No email addresses found")
|
||||
return
|
||||
}
|
||||
addresses := []mail.Address{}
|
||||
for _, email := range userAlertSettings.Emails {
|
||||
addresses = append(addresses, mail.Address{Address: email})
|
||||
}
|
||||
message := mailer.Message{
|
||||
To: addresses,
|
||||
Subject: data.Title,
|
||||
Text: data.Message + fmt.Sprintf("\n\n%s", data.Link),
|
||||
From: mail.Address{
|
||||
Address: am.app.Settings().Meta.SenderAddress,
|
||||
Name: am.app.Settings().Meta.SenderName,
|
||||
},
|
||||
}
|
||||
if err := am.app.NewMailClient().Send(&message); err != nil {
|
||||
am.app.Logger().Error("Failed to send alert: ", "err", err.Error())
|
||||
} else {
|
||||
am.app.Logger().Info("Sent email alert", "to", message.To, "subj", message.Subject)
|
||||
}
|
||||
}
|
||||
|
||||
// SendShoutrrrAlert sends an alert via a Shoutrrr URL
|
||||
func (am *AlertManager) SendShoutrrrAlert(notificationUrl, title, message, link, linkText string) error {
|
||||
// services that support title param
|
||||
supportsTitle := []string{"bark", "discord", "gotify", "ifttt", "join", "matrix", "ntfy", "opsgenie", "pushbullet", "pushover", "slack", "teams", "telegram", "zulip"}
|
||||
|
||||
// Parse the URL
|
||||
parsedURL, err := url.Parse(notificationUrl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing URL: %v", err)
|
||||
}
|
||||
scheme := parsedURL.Scheme
|
||||
queryParams := parsedURL.Query()
|
||||
|
||||
// Add title
|
||||
if sliceContains(supportsTitle, scheme) {
|
||||
queryParams.Add("title", title)
|
||||
} else if scheme == "mattermost" {
|
||||
// use markdown title for mattermost
|
||||
message = "##### " + title + "\n\n" + message
|
||||
} else if scheme == "generic" && queryParams.Has("template") {
|
||||
// add title as property if using generic with template json
|
||||
titleKey := queryParams.Get("titlekey")
|
||||
if titleKey == "" {
|
||||
titleKey = "title"
|
||||
}
|
||||
queryParams.Add("$"+titleKey, title)
|
||||
} else {
|
||||
// otherwise just add title to message
|
||||
message = title + "\n\n" + message
|
||||
}
|
||||
|
||||
// Add link
|
||||
if scheme == "ntfy" {
|
||||
// if ntfy, add link to actions
|
||||
queryParams.Add("Actions", fmt.Sprintf("view, %s, %s", linkText, link))
|
||||
} else {
|
||||
// else add link directly to the message
|
||||
message += "\n\n" + link
|
||||
}
|
||||
|
||||
// Encode the modified query parameters back into the URL
|
||||
parsedURL.RawQuery = queryParams.Encode()
|
||||
// log.Println("URL after modification:", parsedURL.String())
|
||||
|
||||
err = shoutrrr.Send(parsedURL.String(), message)
|
||||
|
||||
if err == nil {
|
||||
am.app.Logger().Info("Sent shoutrrr alert", "title", title)
|
||||
} else {
|
||||
am.app.Logger().Error("Error sending shoutrrr alert", "err", err.Error())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Contains checks if a string is present in a slice of strings
|
||||
func sliceContains(slice []string, item string) bool {
|
||||
for _, v := range slice {
|
||||
if v == item {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (am *AlertManager) SendTestNotification(c echo.Context) error {
|
||||
requestData := apis.RequestInfo(c)
|
||||
if requestData.AuthRecord == nil {
|
||||
return apis.NewForbiddenError("Forbidden", nil)
|
||||
}
|
||||
url := c.QueryParam("url")
|
||||
// log.Println("url", url)
|
||||
if url == "" {
|
||||
return c.JSON(200, map[string]string{"err": "URL is required"})
|
||||
}
|
||||
err := am.SendShoutrrrAlert(url, "Test Alert", "This is a notification from Beszel.", am.app.Settings().Meta.AppUrl, "View Beszel")
|
||||
if err != nil {
|
||||
return c.JSON(200, map[string]string{"err": err.Error()})
|
||||
}
|
||||
return c.JSON(200, map[string]bool{"err": false})
|
||||
}
|
||||
@@ -1,134 +0,0 @@
|
||||
package container
|
||||
|
||||
import "time"
|
||||
|
||||
// Docker container info from /containers/json
|
||||
type ApiInfo struct {
|
||||
Id string
|
||||
IdShort string
|
||||
Names []string
|
||||
Status string
|
||||
// Image string
|
||||
// ImageID string
|
||||
// Command string
|
||||
// Created int64
|
||||
// Ports []Port
|
||||
// SizeRw int64 `json:",omitempty"`
|
||||
// SizeRootFs int64 `json:",omitempty"`
|
||||
// Labels map[string]string
|
||||
// State string
|
||||
// HostConfig struct {
|
||||
// NetworkMode string `json:",omitempty"`
|
||||
// Annotations map[string]string `json:",omitempty"`
|
||||
// }
|
||||
// NetworkSettings *SummaryNetworkSettings
|
||||
// Mounts []MountPoint
|
||||
}
|
||||
|
||||
// Docker container resources from /containers/{id}/stats
|
||||
type ApiStats struct {
|
||||
// Common stats
|
||||
// Read time.Time `json:"read"`
|
||||
// PreRead time.Time `json:"preread"`
|
||||
|
||||
// Linux specific stats, not populated on Windows.
|
||||
// PidsStats PidsStats `json:"pids_stats,omitempty"`
|
||||
// BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
|
||||
|
||||
// Windows specific stats, not populated on Linux.
|
||||
// NumProcs uint32 `json:"num_procs"`
|
||||
// StorageStats StorageStats `json:"storage_stats,omitempty"`
|
||||
// Networks request version >=1.21
|
||||
Networks map[string]NetworkStats
|
||||
|
||||
// Shared stats
|
||||
CPUStats CPUStats `json:"cpu_stats,omitempty"`
|
||||
// PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous"
|
||||
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
|
||||
}
|
||||
|
||||
type CPUStats struct {
|
||||
// CPU Usage. Linux and Windows.
|
||||
CPUUsage CPUUsage `json:"cpu_usage"`
|
||||
|
||||
// System Usage. Linux only.
|
||||
SystemUsage uint64 `json:"system_cpu_usage,omitempty"`
|
||||
|
||||
// Online CPUs. Linux only.
|
||||
// OnlineCPUs uint32 `json:"online_cpus,omitempty"`
|
||||
|
||||
// Throttling Data. Linux only.
|
||||
// ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
|
||||
}
|
||||
|
||||
type CPUUsage struct {
|
||||
// Total CPU time consumed.
|
||||
// Units: nanoseconds (Linux)
|
||||
// Units: 100's of nanoseconds (Windows)
|
||||
TotalUsage uint64 `json:"total_usage"`
|
||||
|
||||
// Total CPU time consumed per core (Linux). Not used on Windows.
|
||||
// Units: nanoseconds.
|
||||
// PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
|
||||
|
||||
// Time spent by tasks of the cgroup in kernel mode (Linux).
|
||||
// Time spent by all container processes in kernel mode (Windows).
|
||||
// Units: nanoseconds (Linux).
|
||||
// Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers.
|
||||
// UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
|
||||
|
||||
// Time spent by tasks of the cgroup in user mode (Linux).
|
||||
// Time spent by all container processes in user mode (Windows).
|
||||
// Units: nanoseconds (Linux).
|
||||
// Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers
|
||||
// UsageInUsermode uint64 `json:"usage_in_usermode"`
|
||||
}
|
||||
|
||||
type MemoryStats struct {
|
||||
// current res_counter usage for memory
|
||||
Usage uint64 `json:"usage,omitempty"`
|
||||
// all the stats exported via memory.stat.
|
||||
Stats MemoryStatsStats `json:"stats,omitempty"`
|
||||
// maximum usage ever recorded.
|
||||
// MaxUsage uint64 `json:"max_usage,omitempty"`
|
||||
// TODO(vishh): Export these as stronger types.
|
||||
// number of times memory usage hits limits.
|
||||
// Failcnt uint64 `json:"failcnt,omitempty"`
|
||||
// Limit uint64 `json:"limit,omitempty"`
|
||||
|
||||
// // committed bytes
|
||||
// Commit uint64 `json:"commitbytes,omitempty"`
|
||||
// // peak committed bytes
|
||||
// CommitPeak uint64 `json:"commitpeakbytes,omitempty"`
|
||||
// // private working set
|
||||
// PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"`
|
||||
}
|
||||
|
||||
type MemoryStatsStats struct {
|
||||
Cache uint64 `json:"cache,omitempty"`
|
||||
InactiveFile uint64 `json:"inactive_file,omitempty"`
|
||||
}
|
||||
|
||||
type NetworkStats struct {
|
||||
// Bytes received. Windows and Linux.
|
||||
RxBytes uint64 `json:"rx_bytes"`
|
||||
// Bytes sent. Windows and Linux.
|
||||
TxBytes uint64 `json:"tx_bytes"`
|
||||
}
|
||||
|
||||
type prevNetStats struct {
|
||||
Sent uint64
|
||||
Recv uint64
|
||||
Time time.Time
|
||||
}
|
||||
|
||||
// Docker container stats
|
||||
type Stats struct {
|
||||
Name string `json:"n"`
|
||||
Cpu float64 `json:"c"`
|
||||
Mem float64 `json:"m"`
|
||||
NetworkSent float64 `json:"ns"`
|
||||
NetworkRecv float64 `json:"nr"`
|
||||
PrevCpu [2]uint64 `json:"-"`
|
||||
PrevNet prevNetStats `json:"-"`
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"beszel/internal/entities/container"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Stats struct {
|
||||
Cpu float64 `json:"cpu"`
|
||||
MaxCpu float64 `json:"cpum,omitempty"`
|
||||
Mem float64 `json:"m"`
|
||||
MemUsed float64 `json:"mu"`
|
||||
MemPct float64 `json:"mp"`
|
||||
MemBuffCache float64 `json:"mb"`
|
||||
MemZfsArc float64 `json:"mz,omitempty"` // ZFS ARC memory
|
||||
Swap float64 `json:"s,omitempty"`
|
||||
SwapUsed float64 `json:"su,omitempty"`
|
||||
DiskTotal float64 `json:"d"`
|
||||
DiskUsed float64 `json:"du"`
|
||||
DiskPct float64 `json:"dp"`
|
||||
DiskReadPs float64 `json:"dr"`
|
||||
DiskWritePs float64 `json:"dw"`
|
||||
MaxDiskReadPs float64 `json:"drm,omitempty"`
|
||||
MaxDiskWritePs float64 `json:"dwm,omitempty"`
|
||||
NetworkSent float64 `json:"ns"`
|
||||
NetworkRecv float64 `json:"nr"`
|
||||
MaxNetworkSent float64 `json:"nsm,omitempty"`
|
||||
MaxNetworkRecv float64 `json:"nrm,omitempty"`
|
||||
Temperatures map[string]float64 `json:"t,omitempty"`
|
||||
ExtraFs map[string]*FsStats `json:"efs,omitempty"`
|
||||
}
|
||||
|
||||
type FsStats struct {
|
||||
Time time.Time `json:"-"`
|
||||
Root bool `json:"-"`
|
||||
Mountpoint string `json:"-"`
|
||||
DiskTotal float64 `json:"d"`
|
||||
DiskUsed float64 `json:"du"`
|
||||
TotalRead uint64 `json:"-"`
|
||||
TotalWrite uint64 `json:"-"`
|
||||
DiskReadPs float64 `json:"r"`
|
||||
DiskWritePs float64 `json:"w"`
|
||||
MaxDiskReadPS float64 `json:"rm,omitempty"`
|
||||
MaxDiskWritePS float64 `json:"wm,omitempty"`
|
||||
}
|
||||
|
||||
type NetIoStats struct {
|
||||
BytesRecv uint64
|
||||
BytesSent uint64
|
||||
Time time.Time
|
||||
Name string
|
||||
}
|
||||
|
||||
type Info struct {
|
||||
Hostname string `json:"h"`
|
||||
KernelVersion string `json:"k,omitempty"`
|
||||
Cores int `json:"c"`
|
||||
Threads int `json:"t,omitempty"`
|
||||
CpuModel string `json:"m"`
|
||||
Uptime uint64 `json:"u"`
|
||||
Cpu float64 `json:"cpu"`
|
||||
MemPct float64 `json:"mp"`
|
||||
DiskPct float64 `json:"dp"`
|
||||
Bandwidth float64 `json:"b"`
|
||||
AgentVersion string `json:"v"`
|
||||
}
|
||||
|
||||
// Final data structure to return to the hub
|
||||
type CombinedData struct {
|
||||
Stats Stats `json:"stats"`
|
||||
Info Info `json:"info"`
|
||||
Containers []*container.Stats `json:"container"`
|
||||
}
|
||||
@@ -1,222 +0,0 @@
|
||||
package hub
|
||||
|
||||
import (
|
||||
"beszel/internal/entities/system"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/labstack/echo/v5"
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/apis"
|
||||
"github.com/pocketbase/pocketbase/models"
|
||||
"github.com/spf13/cast"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Systems []SystemConfig `yaml:"systems"`
|
||||
}
|
||||
|
||||
type SystemConfig struct {
|
||||
Name string `yaml:"name"`
|
||||
Host string `yaml:"host"`
|
||||
Port uint16 `yaml:"port"`
|
||||
Users []string `yaml:"users"`
|
||||
}
|
||||
|
||||
// Syncs systems with the config.yml file
|
||||
func (h *Hub) syncSystemsWithConfig() error {
|
||||
configPath := filepath.Join(h.app.DataDir(), "config.yml")
|
||||
configData, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var config Config
|
||||
err = yaml.Unmarshal(configData, &config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse config.yml: %v", err)
|
||||
}
|
||||
|
||||
if len(config.Systems) == 0 {
|
||||
log.Println("No systems defined in config.yml.")
|
||||
return nil
|
||||
}
|
||||
|
||||
var firstUser *models.Record
|
||||
|
||||
// Create a map of email to user ID
|
||||
userEmailToID := make(map[string]string)
|
||||
users, err := h.app.Dao().FindRecordsByExpr("users", dbx.NewExp("id != ''"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(users) > 0 {
|
||||
firstUser = users[0]
|
||||
for _, user := range users {
|
||||
userEmailToID[user.GetString("email")] = user.Id
|
||||
}
|
||||
}
|
||||
|
||||
// add default settings for systems if not defined in config
|
||||
for i := range config.Systems {
|
||||
system := &config.Systems[i]
|
||||
if system.Port == 0 {
|
||||
system.Port = 45876
|
||||
}
|
||||
if len(users) > 0 && len(system.Users) == 0 {
|
||||
// default to first user if none are defined
|
||||
system.Users = []string{firstUser.Id}
|
||||
} else {
|
||||
// Convert email addresses to user IDs
|
||||
userIDs := make([]string, 0, len(system.Users))
|
||||
for _, email := range system.Users {
|
||||
if id, ok := userEmailToID[email]; ok {
|
||||
userIDs = append(userIDs, id)
|
||||
} else {
|
||||
log.Printf("User %s not found", email)
|
||||
}
|
||||
}
|
||||
system.Users = userIDs
|
||||
}
|
||||
}
|
||||
|
||||
// Get existing systems
|
||||
existingSystems, err := h.app.Dao().FindRecordsByExpr("systems", dbx.NewExp("id != ''"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a map of existing systems for easy lookup
|
||||
existingSystemsMap := make(map[string]*models.Record)
|
||||
for _, system := range existingSystems {
|
||||
key := system.GetString("host") + ":" + system.GetString("port")
|
||||
existingSystemsMap[key] = system
|
||||
}
|
||||
|
||||
// Process systems from config
|
||||
for _, sysConfig := range config.Systems {
|
||||
key := sysConfig.Host + ":" + strconv.Itoa(int(sysConfig.Port))
|
||||
if existingSystem, ok := existingSystemsMap[key]; ok {
|
||||
// Update existing system
|
||||
existingSystem.Set("name", sysConfig.Name)
|
||||
existingSystem.Set("users", sysConfig.Users)
|
||||
existingSystem.Set("port", sysConfig.Port)
|
||||
if err := h.app.Dao().SaveRecord(existingSystem); err != nil {
|
||||
return err
|
||||
}
|
||||
delete(existingSystemsMap, key)
|
||||
} else {
|
||||
// Create new system
|
||||
systemsCollection, err := h.app.Dao().FindCollectionByNameOrId("systems")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find systems collection: %v", err)
|
||||
}
|
||||
newSystem := models.NewRecord(systemsCollection)
|
||||
newSystem.Set("name", sysConfig.Name)
|
||||
newSystem.Set("host", sysConfig.Host)
|
||||
newSystem.Set("port", sysConfig.Port)
|
||||
newSystem.Set("users", sysConfig.Users)
|
||||
newSystem.Set("info", system.Info{})
|
||||
newSystem.Set("status", "pending")
|
||||
if err := h.app.Dao().SaveRecord(newSystem); err != nil {
|
||||
return fmt.Errorf("failed to create new system: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete systems not in config
|
||||
for _, system := range existingSystemsMap {
|
||||
if err := h.app.Dao().DeleteRecord(system); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Println("Systems synced with config.yml")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Generates content for the config.yml file as a YAML string
|
||||
func (h *Hub) generateConfigYAML() (string, error) {
|
||||
// Fetch all systems from the database
|
||||
systems, err := h.app.Dao().FindRecordsByFilter("systems", "id != ''", "name", -1, 0)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Create a Config struct to hold the data
|
||||
config := Config{
|
||||
Systems: make([]SystemConfig, 0, len(systems)),
|
||||
}
|
||||
|
||||
// Fetch all users at once
|
||||
allUserIDs := make([]string, 0)
|
||||
for _, system := range systems {
|
||||
allUserIDs = append(allUserIDs, system.GetStringSlice("users")...)
|
||||
}
|
||||
userEmailMap, err := h.getUserEmailMap(allUserIDs)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Populate the Config struct with system data
|
||||
for _, system := range systems {
|
||||
userIDs := system.GetStringSlice("users")
|
||||
userEmails := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
if email, ok := userEmailMap[userID]; ok {
|
||||
userEmails = append(userEmails, email)
|
||||
}
|
||||
}
|
||||
|
||||
sysConfig := SystemConfig{
|
||||
Name: system.GetString("name"),
|
||||
Host: system.GetString("host"),
|
||||
Port: cast.ToUint16(system.Get("port")),
|
||||
Users: userEmails,
|
||||
}
|
||||
config.Systems = append(config.Systems, sysConfig)
|
||||
}
|
||||
|
||||
// Marshal the Config struct to YAML
|
||||
yamlData, err := yaml.Marshal(&config)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Add a header to the YAML
|
||||
yamlData = append([]byte("# Values for port and users are optional.\n# Defaults are port 45876 and the first created user.\n\n"), yamlData...)
|
||||
|
||||
return string(yamlData), nil
|
||||
}
|
||||
|
||||
// New helper function to get a map of user IDs to emails
|
||||
func (h *Hub) getUserEmailMap(userIDs []string) (map[string]string, error) {
|
||||
users, err := h.app.Dao().FindRecordsByIds("users", userIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
userEmailMap := make(map[string]string, len(users))
|
||||
for _, user := range users {
|
||||
userEmailMap[user.Id] = user.GetString("email")
|
||||
}
|
||||
|
||||
return userEmailMap, nil
|
||||
}
|
||||
|
||||
// Returns the current config.yml file as a JSON object
|
||||
func (h *Hub) getYamlConfig(c echo.Context) error {
|
||||
requestData := apis.RequestInfo(c)
|
||||
if requestData.AuthRecord == nil || requestData.AuthRecord.GetString("role") != "admin" {
|
||||
return apis.NewForbiddenError("Forbidden", nil)
|
||||
}
|
||||
configContent, err := h.generateConfigYAML()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.JSON(200, map[string]string{"config": configContent})
|
||||
}
|
||||
@@ -1,528 +0,0 @@
|
||||
// Package hub handles updating systems and serving the web UI.
|
||||
package hub
|
||||
|
||||
import (
|
||||
"beszel"
|
||||
"beszel/internal/alerts"
|
||||
"beszel/internal/entities/system"
|
||||
"beszel/internal/records"
|
||||
"beszel/internal/users"
|
||||
"beszel/site"
|
||||
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
"github.com/labstack/echo/v5"
|
||||
"github.com/pocketbase/pocketbase"
|
||||
"github.com/pocketbase/pocketbase/apis"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/models"
|
||||
"github.com/pocketbase/pocketbase/plugins/migratecmd"
|
||||
"github.com/pocketbase/pocketbase/tools/cron"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
type Hub struct {
|
||||
app *pocketbase.PocketBase
|
||||
connectionLock *sync.Mutex
|
||||
systemConnections map[string]*ssh.Client
|
||||
sshClientConfig *ssh.ClientConfig
|
||||
pubKey string
|
||||
am *alerts.AlertManager
|
||||
um *users.UserManager
|
||||
rm *records.RecordManager
|
||||
systemStats *models.Collection
|
||||
containerStats *models.Collection
|
||||
}
|
||||
|
||||
func NewHub(app *pocketbase.PocketBase) *Hub {
|
||||
return &Hub{
|
||||
app: app,
|
||||
connectionLock: &sync.Mutex{},
|
||||
systemConnections: make(map[string]*ssh.Client),
|
||||
am: alerts.NewAlertManager(app),
|
||||
um: users.NewUserManager(app),
|
||||
rm: records.NewRecordManager(app),
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hub) Run() {
|
||||
// loosely check if it was executed using "go run"
|
||||
isGoRun := strings.HasPrefix(os.Args[0], os.TempDir())
|
||||
|
||||
// enable auto creation of migration files when making collection changes in the Admin UI
|
||||
migratecmd.MustRegister(h.app, h.app.RootCmd, migratecmd.Config{
|
||||
// (the isGoRun check is to enable it only during development)
|
||||
Automigrate: isGoRun,
|
||||
Dir: "../../migrations",
|
||||
})
|
||||
|
||||
// initial setup
|
||||
h.app.OnBeforeServe().Add(func(e *core.ServeEvent) error {
|
||||
// create ssh client config
|
||||
err := h.createSSHClientConfig()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// set auth settings
|
||||
usersCollection, err := h.app.Dao().FindCollectionByNameOrId("users")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
usersAuthOptions := usersCollection.AuthOptions()
|
||||
usersAuthOptions.AllowUsernameAuth = false
|
||||
if os.Getenv("DISABLE_PASSWORD_AUTH") == "true" {
|
||||
usersAuthOptions.AllowEmailAuth = false
|
||||
} else {
|
||||
usersAuthOptions.AllowEmailAuth = true
|
||||
}
|
||||
usersCollection.SetOptions(usersAuthOptions)
|
||||
if err := h.app.Dao().SaveCollection(usersCollection); err != nil {
|
||||
return err
|
||||
}
|
||||
// sync systems with config
|
||||
return h.syncSystemsWithConfig()
|
||||
})
|
||||
|
||||
// serve web ui
|
||||
h.app.OnBeforeServe().Add(func(e *core.ServeEvent) error {
|
||||
switch isGoRun {
|
||||
case true:
|
||||
proxy := httputil.NewSingleHostReverseProxy(&url.URL{
|
||||
Scheme: "http",
|
||||
Host: "localhost:5173",
|
||||
})
|
||||
e.Router.Any("/*", echo.WrapHandler(proxy))
|
||||
default:
|
||||
csp, cspExists := os.LookupEnv("CSP")
|
||||
e.Router.Any("/*", func(c echo.Context) error {
|
||||
if cspExists {
|
||||
c.Response().Header().Del("X-Frame-Options")
|
||||
c.Response().Header().Set("Content-Security-Policy", csp)
|
||||
}
|
||||
indexFallback := !strings.HasPrefix(c.Request().URL.Path, "/static/")
|
||||
return apis.StaticDirectoryHandler(site.Dist, indexFallback)(c)
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// set up scheduled jobs / ticker for system updates
|
||||
h.app.OnBeforeServe().Add(func(e *core.ServeEvent) error {
|
||||
// 15 second ticker for system updates
|
||||
go h.startSystemUpdateTicker()
|
||||
// set up cron jobs
|
||||
scheduler := cron.New()
|
||||
// delete old records once every hour
|
||||
scheduler.MustAdd("delete old records", "8 * * * *", h.rm.DeleteOldRecords)
|
||||
// create longer records every 10 minutes
|
||||
scheduler.MustAdd("create longer records", "*/10 * * * *", func() {
|
||||
if systemStats, containerStats, err := h.getCollections(); err == nil {
|
||||
h.rm.CreateLongerRecords([]*models.Collection{systemStats, containerStats})
|
||||
}
|
||||
})
|
||||
scheduler.Start()
|
||||
return nil
|
||||
})
|
||||
|
||||
// custom api routes
|
||||
h.app.OnBeforeServe().Add(func(e *core.ServeEvent) error {
|
||||
// returns public key
|
||||
e.Router.GET("/api/beszel/getkey", func(c echo.Context) error {
|
||||
requestData := apis.RequestInfo(c)
|
||||
if requestData.AuthRecord == nil {
|
||||
return apis.NewForbiddenError("Forbidden", nil)
|
||||
}
|
||||
return c.JSON(http.StatusOK, map[string]string{"key": h.pubKey, "v": beszel.Version})
|
||||
})
|
||||
// check if first time setup on login page
|
||||
e.Router.GET("/api/beszel/first-run", func(c echo.Context) error {
|
||||
adminNum, err := h.app.Dao().TotalAdmins()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.JSON(http.StatusOK, map[string]bool{"firstRun": adminNum == 0})
|
||||
})
|
||||
// send test notification
|
||||
e.Router.GET("/api/beszel/send-test-notification", h.am.SendTestNotification)
|
||||
// API endpoint to get config.yml content
|
||||
e.Router.GET("/api/beszel/config-yaml", h.getYamlConfig)
|
||||
return nil
|
||||
})
|
||||
|
||||
// system creation defaults
|
||||
h.app.OnModelBeforeCreate("systems").Add(func(e *core.ModelEvent) error {
|
||||
record := e.Model.(*models.Record)
|
||||
record.Set("info", system.Info{})
|
||||
record.Set("status", "pending")
|
||||
return nil
|
||||
})
|
||||
|
||||
// immediately create connection for new systems
|
||||
h.app.OnModelAfterCreate("systems").Add(func(e *core.ModelEvent) error {
|
||||
go h.updateSystem(e.Model.(*models.Record))
|
||||
return nil
|
||||
})
|
||||
|
||||
// handle default values for user / user_settings creation
|
||||
h.app.OnModelBeforeCreate("users").Add(h.um.InitializeUserRole)
|
||||
h.app.OnModelBeforeCreate("user_settings").Add(h.um.InitializeUserSettings)
|
||||
|
||||
// empty info for systems that are paused
|
||||
h.app.OnModelBeforeUpdate("systems").Add(func(e *core.ModelEvent) error {
|
||||
if e.Model.(*models.Record).GetString("status") == "paused" {
|
||||
e.Model.(*models.Record).Set("info", system.Info{})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// do things after a systems record is updated
|
||||
h.app.OnModelAfterUpdate("systems").Add(func(e *core.ModelEvent) error {
|
||||
newRecord := e.Model.(*models.Record)
|
||||
oldRecord := newRecord.OriginalCopy()
|
||||
newStatus := newRecord.GetString("status")
|
||||
|
||||
// if system is disconnected and connection exists, remove it
|
||||
if newStatus == "down" || newStatus == "paused" {
|
||||
h.deleteSystemConnection(newRecord)
|
||||
}
|
||||
|
||||
// if system is set to pending (unpause), try to connect immediately
|
||||
if newStatus == "pending" {
|
||||
go h.updateSystem(newRecord)
|
||||
} else {
|
||||
h.am.HandleStatusAlerts(newStatus, oldRecord)
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
// do things after a systems record is deleted
|
||||
h.app.OnModelAfterDelete("systems").Add(func(e *core.ModelEvent) error {
|
||||
// if system connection exists, close it
|
||||
h.deleteSystemConnection(e.Model.(*models.Record))
|
||||
return nil
|
||||
})
|
||||
|
||||
if err := h.app.Start(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hub) startSystemUpdateTicker() {
|
||||
ticker := time.NewTicker(15 * time.Second)
|
||||
for range ticker.C {
|
||||
h.updateSystems()
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hub) updateSystems() {
|
||||
records, err := h.app.Dao().FindRecordsByFilter(
|
||||
"2hz5ncl8tizk5nx", // systems collection
|
||||
"status != 'paused'", // filter
|
||||
"updated", // sort
|
||||
-1, // limit
|
||||
0, // offset
|
||||
)
|
||||
// log.Println("records", len(records))
|
||||
if err != nil || len(records) == 0 {
|
||||
// h.app.Logger().Error("Failed to query systems")
|
||||
return
|
||||
}
|
||||
fiftySecondsAgo := time.Now().UTC().Add(-50 * time.Second)
|
||||
batchSize := len(records)/4 + 1
|
||||
done := 0
|
||||
for _, record := range records {
|
||||
// break if batch size reached or if the system was updated less than 50 seconds ago
|
||||
if done >= batchSize || record.GetDateTime("updated").Time().After(fiftySecondsAgo) {
|
||||
break
|
||||
}
|
||||
// don't increment for down systems to avoid them jamming the queue
|
||||
// because they're always first when sorted by least recently updated
|
||||
if record.GetString("status") != "down" {
|
||||
done++
|
||||
}
|
||||
go h.updateSystem(record)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hub) updateSystem(record *models.Record) {
|
||||
var client *ssh.Client
|
||||
var err error
|
||||
|
||||
// check if system connection data exists
|
||||
if _, ok := h.systemConnections[record.Id]; ok {
|
||||
client = h.systemConnections[record.Id]
|
||||
} else {
|
||||
// create system connection
|
||||
client, err = h.createSystemConnection(record)
|
||||
if err != nil {
|
||||
if record.GetString("status") != "down" {
|
||||
h.app.Logger().Error("Failed to connect:", "err", err.Error(), "system", record.GetString("host"), "port", record.GetString("port"))
|
||||
h.updateSystemStatus(record, "down")
|
||||
}
|
||||
return
|
||||
}
|
||||
h.connectionLock.Lock()
|
||||
h.systemConnections[record.Id] = client
|
||||
h.connectionLock.Unlock()
|
||||
}
|
||||
// get system stats from agent
|
||||
var systemData system.CombinedData
|
||||
if err := h.requestJsonFromAgent(client, &systemData); err != nil {
|
||||
if err.Error() == "bad client" {
|
||||
// if previous connection was closed, try again
|
||||
h.app.Logger().Error("Existing SSH connection closed. Retrying...", "host", record.GetString("host"), "port", record.GetString("port"))
|
||||
h.deleteSystemConnection(record)
|
||||
h.updateSystem(record)
|
||||
return
|
||||
}
|
||||
h.app.Logger().Error("Failed to get system stats: ", "err", err.Error())
|
||||
h.updateSystemStatus(record, "down")
|
||||
return
|
||||
}
|
||||
// update system record
|
||||
dao := h.app.Dao()
|
||||
record.Set("status", "up")
|
||||
record.Set("info", systemData.Info)
|
||||
if err := dao.SaveRecord(record); err != nil {
|
||||
h.app.Logger().Error("Failed to update record: ", "err", err.Error())
|
||||
}
|
||||
// add system_stats and container_stats records
|
||||
if systemStats, containerStats, err := h.getCollections(); err != nil {
|
||||
h.app.Logger().Error("Failed to get collections: ", "err", err.Error())
|
||||
} else {
|
||||
// add new system_stats record
|
||||
systemStatsRecord := models.NewRecord(systemStats)
|
||||
systemStatsRecord.Set("system", record.Id)
|
||||
systemStatsRecord.Set("stats", systemData.Stats)
|
||||
systemStatsRecord.Set("type", "1m")
|
||||
if err := dao.SaveRecord(systemStatsRecord); err != nil {
|
||||
h.app.Logger().Error("Failed to save record: ", "err", err.Error())
|
||||
}
|
||||
// add new container_stats record
|
||||
if len(systemData.Containers) > 0 {
|
||||
containerStatsRecord := models.NewRecord(containerStats)
|
||||
containerStatsRecord.Set("system", record.Id)
|
||||
containerStatsRecord.Set("stats", systemData.Containers)
|
||||
containerStatsRecord.Set("type", "1m")
|
||||
if err := dao.SaveRecord(containerStatsRecord); err != nil {
|
||||
h.app.Logger().Error("Failed to save record: ", "err", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// system info alerts (todo: extra fs alerts)
|
||||
if err := h.am.HandleSystemAlerts(record, systemData.Info, systemData.Stats.Temperatures, systemData.Stats.ExtraFs); err != nil {
|
||||
h.app.Logger().Error("System alerts error", "err", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// return system_stats and container_stats collections
|
||||
func (h *Hub) getCollections() (*models.Collection, *models.Collection, error) {
|
||||
if h.systemStats == nil {
|
||||
systemStats, err := h.app.Dao().FindCollectionByNameOrId("system_stats")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
h.systemStats = systemStats
|
||||
}
|
||||
if h.containerStats == nil {
|
||||
containerStats, err := h.app.Dao().FindCollectionByNameOrId("container_stats")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
h.containerStats = containerStats
|
||||
}
|
||||
return h.systemStats, h.containerStats, nil
|
||||
}
|
||||
|
||||
// set system to specified status and save record
|
||||
func (h *Hub) updateSystemStatus(record *models.Record, status string) {
|
||||
if record.GetString("status") != status {
|
||||
record.Set("status", status)
|
||||
if err := h.app.Dao().SaveRecord(record); err != nil {
|
||||
h.app.Logger().Error("Failed to update record: ", "err", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hub) deleteSystemConnection(record *models.Record) {
|
||||
if _, ok := h.systemConnections[record.Id]; ok {
|
||||
if h.systemConnections[record.Id] != nil {
|
||||
h.systemConnections[record.Id].Close()
|
||||
}
|
||||
h.connectionLock.Lock()
|
||||
defer h.connectionLock.Unlock()
|
||||
delete(h.systemConnections, record.Id)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hub) createSystemConnection(record *models.Record) (*ssh.Client, error) {
|
||||
client, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", record.GetString("host"), record.GetString("port")), h.sshClientConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (h *Hub) createSSHClientConfig() error {
|
||||
key, err := h.getSSHKey()
|
||||
if err != nil {
|
||||
h.app.Logger().Error("Failed to get SSH key: ", "err", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the Signer for this private key.
|
||||
signer, err := ssh.ParsePrivateKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
h.sshClientConfig = &ssh.ClientConfig{
|
||||
User: "u",
|
||||
Auth: []ssh.AuthMethod{
|
||||
ssh.PublicKeys(signer),
|
||||
},
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetches system stats from the agent and decodes the json data into the provided struct
|
||||
func (h *Hub) requestJsonFromAgent(client *ssh.Client, systemData *system.CombinedData) error {
|
||||
session, err := newSessionWithTimeout(client, 5*time.Second)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad client")
|
||||
}
|
||||
defer session.Close()
|
||||
|
||||
stdout, err := session.StdoutPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := session.Shell(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(stdout).Decode(systemData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// wait for the session to complete
|
||||
if err := session.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds timeout to SSH session creation to avoid hanging in case of network issues
|
||||
func newSessionWithTimeout(client *ssh.Client, timeout time.Duration) (*ssh.Session, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
// use goroutine to create the session
|
||||
sessionChan := make(chan *ssh.Session, 1)
|
||||
errChan := make(chan error, 1)
|
||||
go func() {
|
||||
if session, err := client.NewSession(); err != nil {
|
||||
errChan <- err
|
||||
} else {
|
||||
sessionChan <- session
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case session := <-sessionChan:
|
||||
return session, nil
|
||||
case err := <-errChan:
|
||||
return nil, err
|
||||
case <-ctx.Done():
|
||||
return nil, fmt.Errorf("session creation timed out")
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hub) getSSHKey() ([]byte, error) {
|
||||
dataDir := h.app.DataDir()
|
||||
// check if the key pair already exists
|
||||
existingKey, err := os.ReadFile(dataDir + "/id_ed25519")
|
||||
if err == nil {
|
||||
if pubKey, err := os.ReadFile(h.app.DataDir() + "/id_ed25519.pub"); err == nil {
|
||||
h.pubKey = strings.TrimSuffix(string(pubKey), "\n")
|
||||
}
|
||||
// return existing private key
|
||||
return existingKey, nil
|
||||
}
|
||||
|
||||
// Generate the Ed25519 key pair
|
||||
pubKey, privKey, err := ed25519.GenerateKey(nil)
|
||||
if err != nil {
|
||||
// h.app.Logger().Error("Error generating key pair:", "err", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the private key in OpenSSH format
|
||||
privKeyBytes, err := ssh.MarshalPrivateKey(privKey, "")
|
||||
if err != nil {
|
||||
// h.app.Logger().Error("Error marshaling private key:", "err", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Save the private key to a file
|
||||
privateFile, err := os.Create(dataDir + "/id_ed25519")
|
||||
if err != nil {
|
||||
// h.app.Logger().Error("Error creating private key file:", "err", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
defer privateFile.Close()
|
||||
|
||||
if err := pem.Encode(privateFile, privKeyBytes); err != nil {
|
||||
// h.app.Logger().Error("Error writing private key to file:", "err", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Generate the public key in OpenSSH format
|
||||
publicKey, err := ssh.NewPublicKey(pubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubKeyBytes := ssh.MarshalAuthorizedKey(publicKey)
|
||||
h.pubKey = strings.TrimSuffix(string(pubKeyBytes), "\n")
|
||||
|
||||
// Save the public key to a file
|
||||
publicFile, err := os.Create(dataDir + "/id_ed25519.pub")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer publicFile.Close()
|
||||
|
||||
if _, err := publicFile.Write(pubKeyBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h.app.Logger().Info("ed25519 SSH key pair generated successfully.")
|
||||
h.app.Logger().Info("Private key saved to: " + dataDir + "/id_ed25519")
|
||||
h.app.Logger().Info("Public key saved to: " + dataDir + "/id_ed25519.pub")
|
||||
|
||||
existingKey, err = os.ReadFile(dataDir + "/id_ed25519")
|
||||
if err == nil {
|
||||
return existingKey, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
package hub
|
||||
|
||||
import (
|
||||
"beszel"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/rhysd/go-github-selfupdate/selfupdate"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Update updates beszel to the latest version
|
||||
func Update(_ *cobra.Command, _ []string) {
|
||||
var latest *selfupdate.Release
|
||||
var found bool
|
||||
var err error
|
||||
currentVersion := semver.MustParse(beszel.Version)
|
||||
fmt.Println("beszel", currentVersion)
|
||||
fmt.Println("Checking for updates...")
|
||||
updater, _ := selfupdate.NewUpdater(selfupdate.Config{
|
||||
Filters: []string{"beszel_"},
|
||||
})
|
||||
latest, found, err = updater.DetectLatest("henrygd/beszel")
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("Error checking for updates:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if !found {
|
||||
fmt.Println("No updates found")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
fmt.Println("Latest version:", latest.Version)
|
||||
|
||||
if latest.Version.LTE(currentVersion) {
|
||||
fmt.Println("You are up to date")
|
||||
return
|
||||
}
|
||||
|
||||
var binaryPath string
|
||||
fmt.Printf("Updating from %s to %s...\n", currentVersion, latest.Version)
|
||||
binaryPath, err = os.Executable()
|
||||
if err != nil {
|
||||
fmt.Println("Error getting binary path:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = selfupdate.UpdateTo(latest.AssetURL, binaryPath)
|
||||
if err != nil {
|
||||
fmt.Println("Please try rerunning with sudo. Error:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Successfully updated to %s\n\n%s\n", latest.Version, strings.TrimSpace(latest.ReleaseNotes))
|
||||
}
|
||||
@@ -1,337 +0,0 @@
|
||||
// Package records handles creating longer records and deleting old records.
|
||||
package records
|
||||
|
||||
import (
|
||||
"beszel/internal/entities/container"
|
||||
"beszel/internal/entities/system"
|
||||
"log"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase"
|
||||
"github.com/pocketbase/pocketbase/daos"
|
||||
"github.com/pocketbase/pocketbase/models"
|
||||
"github.com/pocketbase/pocketbase/tools/types"
|
||||
)
|
||||
|
||||
type RecordManager struct {
|
||||
app *pocketbase.PocketBase
|
||||
}
|
||||
|
||||
type LongerRecordData struct {
|
||||
shorterType string
|
||||
longerType string
|
||||
longerTimeDuration time.Duration
|
||||
minShorterRecords int
|
||||
}
|
||||
|
||||
type RecordDeletionData struct {
|
||||
recordType string
|
||||
retention time.Duration
|
||||
}
|
||||
|
||||
type RecordStats []struct {
|
||||
Stats []byte `db:"stats"`
|
||||
}
|
||||
|
||||
func NewRecordManager(app *pocketbase.PocketBase) *RecordManager {
|
||||
return &RecordManager{app}
|
||||
}
|
||||
|
||||
// Create longer records by averaging shorter records
|
||||
func (rm *RecordManager) CreateLongerRecords(collections []*models.Collection) {
|
||||
// start := time.Now()
|
||||
longerRecordData := []LongerRecordData{
|
||||
{
|
||||
shorterType: "1m",
|
||||
// change to 9 from 10 to allow edge case timing or short pauses
|
||||
minShorterRecords: 9,
|
||||
longerType: "10m",
|
||||
longerTimeDuration: -10 * time.Minute,
|
||||
},
|
||||
{
|
||||
shorterType: "10m",
|
||||
minShorterRecords: 2,
|
||||
longerType: "20m",
|
||||
longerTimeDuration: -20 * time.Minute,
|
||||
},
|
||||
{
|
||||
shorterType: "20m",
|
||||
minShorterRecords: 6,
|
||||
longerType: "120m",
|
||||
longerTimeDuration: -120 * time.Minute,
|
||||
},
|
||||
{
|
||||
shorterType: "120m",
|
||||
minShorterRecords: 4,
|
||||
longerType: "480m",
|
||||
longerTimeDuration: -480 * time.Minute,
|
||||
},
|
||||
}
|
||||
// wrap the operations in a transaction
|
||||
rm.app.Dao().RunInTransaction(func(txDao *daos.Dao) error {
|
||||
activeSystems, err := txDao.FindRecordsByExpr("systems", dbx.NewExp("status = 'up'"))
|
||||
if err != nil {
|
||||
log.Println("failed to get active systems", "err", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
// loop through all active systems, time periods, and collections
|
||||
for _, system := range activeSystems {
|
||||
// log.Println("processing system", system.GetString("name"))
|
||||
for i := range longerRecordData {
|
||||
recordData := longerRecordData[i]
|
||||
// log.Println("processing longer record type", recordData.longerType)
|
||||
// add one minute padding for longer records because they are created slightly later than the job start time
|
||||
longerRecordPeriod := time.Now().UTC().Add(recordData.longerTimeDuration + time.Minute)
|
||||
// shorter records are created independently of longer records, so we shouldn't need to add padding
|
||||
shorterRecordPeriod := time.Now().UTC().Add(recordData.longerTimeDuration)
|
||||
// loop through both collections
|
||||
for _, collection := range collections {
|
||||
// check creation time of last longer record if not 10m, since 10m is created every run
|
||||
if recordData.longerType != "10m" {
|
||||
lastLongerRecord, err := txDao.FindFirstRecordByFilter(
|
||||
collection.Id,
|
||||
"type = {:type} && system = {:system} && created > {:created}",
|
||||
dbx.Params{"type": recordData.longerType, "system": system.Id, "created": longerRecordPeriod},
|
||||
)
|
||||
// continue if longer record exists
|
||||
if err == nil || lastLongerRecord != nil {
|
||||
// log.Println("longer record found. continuing")
|
||||
continue
|
||||
}
|
||||
}
|
||||
// get shorter records from the past x minutes
|
||||
var stats RecordStats
|
||||
|
||||
err := txDao.DB().
|
||||
Select("stats").
|
||||
From(collection.Name).
|
||||
AndWhere(dbx.NewExp(
|
||||
"type={:type} AND system={:system} AND created > {:created}",
|
||||
dbx.Params{
|
||||
"type": recordData.shorterType,
|
||||
"system": system.Id,
|
||||
"created": shorterRecordPeriod,
|
||||
},
|
||||
)).
|
||||
All(&stats)
|
||||
|
||||
// continue if not enough shorter records
|
||||
if err != nil || len(stats) < recordData.minShorterRecords {
|
||||
// log.Println("not enough shorter records. continue.", len(allShorterRecords), recordData.expectedShorterRecords)
|
||||
continue
|
||||
}
|
||||
// average the shorter records and create longer record
|
||||
longerRecord := models.NewRecord(collection)
|
||||
longerRecord.Set("system", system.Id)
|
||||
longerRecord.Set("type", recordData.longerType)
|
||||
switch collection.Name {
|
||||
case "system_stats":
|
||||
longerRecord.Set("stats", rm.AverageSystemStats(stats))
|
||||
case "container_stats":
|
||||
longerRecord.Set("stats", rm.AverageContainerStats(stats))
|
||||
}
|
||||
if err := txDao.SaveRecord(longerRecord); err != nil {
|
||||
log.Println("failed to save longer record", "err", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
// log.Println("finished creating longer records", "time (ms)", time.Since(start).Milliseconds())
|
||||
}
|
||||
|
||||
// Calculate the average stats of a list of system_stats records without reflect
|
||||
func (rm *RecordManager) AverageSystemStats(records RecordStats) system.Stats {
|
||||
sum := system.Stats{
|
||||
Temperatures: make(map[string]float64),
|
||||
ExtraFs: make(map[string]*system.FsStats),
|
||||
}
|
||||
|
||||
count := float64(len(records))
|
||||
// use different counter for temps in case some records don't have them
|
||||
tempCount := float64(0)
|
||||
|
||||
var stats system.Stats
|
||||
for i := range records {
|
||||
json.Unmarshal(records[i].Stats, &stats)
|
||||
sum.Cpu += stats.Cpu
|
||||
sum.Mem += stats.Mem
|
||||
sum.MemUsed += stats.MemUsed
|
||||
sum.MemPct += stats.MemPct
|
||||
sum.MemBuffCache += stats.MemBuffCache
|
||||
sum.MemZfsArc += stats.MemZfsArc
|
||||
sum.Swap += stats.Swap
|
||||
sum.SwapUsed += stats.SwapUsed
|
||||
sum.DiskTotal += stats.DiskTotal
|
||||
sum.DiskUsed += stats.DiskUsed
|
||||
sum.DiskPct += stats.DiskPct
|
||||
sum.DiskReadPs += stats.DiskReadPs
|
||||
sum.DiskWritePs += stats.DiskWritePs
|
||||
sum.NetworkSent += stats.NetworkSent
|
||||
sum.NetworkRecv += stats.NetworkRecv
|
||||
// set peak values
|
||||
sum.MaxCpu = max(sum.MaxCpu, stats.MaxCpu, stats.Cpu)
|
||||
sum.MaxNetworkSent = max(sum.MaxNetworkSent, stats.MaxNetworkSent, stats.NetworkSent)
|
||||
sum.MaxNetworkRecv = max(sum.MaxNetworkRecv, stats.MaxNetworkRecv, stats.NetworkRecv)
|
||||
sum.MaxDiskReadPs = max(sum.MaxDiskReadPs, stats.MaxDiskReadPs, stats.DiskReadPs)
|
||||
sum.MaxDiskWritePs = max(sum.MaxDiskWritePs, stats.MaxDiskWritePs, stats.DiskWritePs)
|
||||
// add temps to sum
|
||||
if stats.Temperatures != nil {
|
||||
tempCount++
|
||||
for key, value := range stats.Temperatures {
|
||||
if _, ok := sum.Temperatures[key]; !ok {
|
||||
sum.Temperatures[key] = 0
|
||||
}
|
||||
sum.Temperatures[key] += value
|
||||
}
|
||||
}
|
||||
// add extra fs to sum
|
||||
if stats.ExtraFs != nil {
|
||||
for key, value := range stats.ExtraFs {
|
||||
if _, ok := sum.ExtraFs[key]; !ok {
|
||||
sum.ExtraFs[key] = &system.FsStats{}
|
||||
}
|
||||
sum.ExtraFs[key].DiskTotal += value.DiskTotal
|
||||
sum.ExtraFs[key].DiskUsed += value.DiskUsed
|
||||
sum.ExtraFs[key].DiskWritePs += value.DiskWritePs
|
||||
sum.ExtraFs[key].DiskReadPs += value.DiskReadPs
|
||||
// peak values
|
||||
sum.ExtraFs[key].MaxDiskReadPS = max(sum.ExtraFs[key].MaxDiskReadPS, value.MaxDiskReadPS, value.DiskReadPs)
|
||||
sum.ExtraFs[key].MaxDiskWritePS = max(sum.ExtraFs[key].MaxDiskWritePS, value.MaxDiskWritePS, value.DiskWritePs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stats = system.Stats{
|
||||
Cpu: twoDecimals(sum.Cpu / count),
|
||||
Mem: twoDecimals(sum.Mem / count),
|
||||
MemUsed: twoDecimals(sum.MemUsed / count),
|
||||
MemPct: twoDecimals(sum.MemPct / count),
|
||||
MemBuffCache: twoDecimals(sum.MemBuffCache / count),
|
||||
MemZfsArc: twoDecimals(sum.MemZfsArc / count),
|
||||
Swap: twoDecimals(sum.Swap / count),
|
||||
SwapUsed: twoDecimals(sum.SwapUsed / count),
|
||||
DiskTotal: twoDecimals(sum.DiskTotal / count),
|
||||
DiskUsed: twoDecimals(sum.DiskUsed / count),
|
||||
DiskPct: twoDecimals(sum.DiskPct / count),
|
||||
DiskReadPs: twoDecimals(sum.DiskReadPs / count),
|
||||
DiskWritePs: twoDecimals(sum.DiskWritePs / count),
|
||||
NetworkSent: twoDecimals(sum.NetworkSent / count),
|
||||
NetworkRecv: twoDecimals(sum.NetworkRecv / count),
|
||||
MaxCpu: sum.MaxCpu,
|
||||
MaxDiskReadPs: sum.MaxDiskReadPs,
|
||||
MaxDiskWritePs: sum.MaxDiskWritePs,
|
||||
MaxNetworkSent: sum.MaxNetworkSent,
|
||||
MaxNetworkRecv: sum.MaxNetworkRecv,
|
||||
}
|
||||
|
||||
if len(sum.Temperatures) != 0 {
|
||||
stats.Temperatures = make(map[string]float64, len(sum.Temperatures))
|
||||
for key, value := range sum.Temperatures {
|
||||
stats.Temperatures[key] = twoDecimals(value / tempCount)
|
||||
}
|
||||
}
|
||||
|
||||
if len(sum.ExtraFs) != 0 {
|
||||
stats.ExtraFs = make(map[string]*system.FsStats, len(sum.ExtraFs))
|
||||
for key, value := range sum.ExtraFs {
|
||||
stats.ExtraFs[key] = &system.FsStats{
|
||||
DiskTotal: twoDecimals(value.DiskTotal / count),
|
||||
DiskUsed: twoDecimals(value.DiskUsed / count),
|
||||
DiskWritePs: twoDecimals(value.DiskWritePs / count),
|
||||
DiskReadPs: twoDecimals(value.DiskReadPs / count),
|
||||
MaxDiskReadPS: value.MaxDiskReadPS,
|
||||
MaxDiskWritePS: value.MaxDiskWritePS,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return stats
|
||||
}
|
||||
|
||||
// Calculate the average stats of a list of container_stats records
|
||||
func (rm *RecordManager) AverageContainerStats(records RecordStats) []container.Stats {
|
||||
sums := make(map[string]*container.Stats)
|
||||
count := float64(len(records))
|
||||
|
||||
var containerStats []container.Stats
|
||||
for i := range records {
|
||||
// Reset the slice length to 0, but keep the capacity
|
||||
containerStats = containerStats[:0]
|
||||
if err := json.Unmarshal(records[i].Stats, &containerStats); err != nil {
|
||||
return []container.Stats{}
|
||||
}
|
||||
for i := range containerStats {
|
||||
stat := containerStats[i]
|
||||
if _, ok := sums[stat.Name]; !ok {
|
||||
sums[stat.Name] = &container.Stats{Name: stat.Name}
|
||||
}
|
||||
sums[stat.Name].Cpu += stat.Cpu
|
||||
sums[stat.Name].Mem += stat.Mem
|
||||
sums[stat.Name].NetworkSent += stat.NetworkSent
|
||||
sums[stat.Name].NetworkRecv += stat.NetworkRecv
|
||||
}
|
||||
}
|
||||
|
||||
result := make([]container.Stats, 0, len(sums))
|
||||
for _, value := range sums {
|
||||
result = append(result, container.Stats{
|
||||
Name: value.Name,
|
||||
Cpu: twoDecimals(value.Cpu / count),
|
||||
Mem: twoDecimals(value.Mem / count),
|
||||
NetworkSent: twoDecimals(value.NetworkSent / count),
|
||||
NetworkRecv: twoDecimals(value.NetworkRecv / count),
|
||||
})
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Deletes records older than what is displayed in the UI
|
||||
func (rm *RecordManager) DeleteOldRecords() {
|
||||
collections := []string{"system_stats", "container_stats"}
|
||||
recordData := []RecordDeletionData{
|
||||
{
|
||||
recordType: "1m",
|
||||
retention: time.Hour,
|
||||
},
|
||||
{
|
||||
recordType: "10m",
|
||||
retention: 12 * time.Hour,
|
||||
},
|
||||
{
|
||||
recordType: "20m",
|
||||
retention: 24 * time.Hour,
|
||||
},
|
||||
{
|
||||
recordType: "120m",
|
||||
retention: 7 * 24 * time.Hour,
|
||||
},
|
||||
{
|
||||
recordType: "480m",
|
||||
retention: 30 * 24 * time.Hour,
|
||||
},
|
||||
}
|
||||
db := rm.app.Dao().NonconcurrentDB()
|
||||
for _, recordData := range recordData {
|
||||
for _, collectionSlug := range collections {
|
||||
formattedDate := time.Now().UTC().Add(-recordData.retention).Format(types.DefaultDateLayout)
|
||||
expr := dbx.NewExp("[[created]] < {:date} AND [[type]] = {:type}", dbx.Params{"date": formattedDate, "type": recordData.recordType})
|
||||
_, err := db.Delete(collectionSlug, expr).Execute()
|
||||
if err != nil {
|
||||
rm.app.Logger().Error("Failed to delete records", "err", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Round float to two decimals */
|
||||
func twoDecimals(value float64) float64 {
|
||||
return math.Round(value*100) / 100
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
// Package users handles user-related custom functionality.
|
||||
package users
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/pocketbase/pocketbase"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/models"
|
||||
)
|
||||
|
||||
type UserManager struct {
|
||||
app *pocketbase.PocketBase
|
||||
}
|
||||
|
||||
type UserSettings struct {
|
||||
ChartTime string `json:"chartTime"`
|
||||
NotificationEmails []string `json:"emails"`
|
||||
NotificationWebhooks []string `json:"webhooks"`
|
||||
// Language string `json:"lang"`
|
||||
}
|
||||
|
||||
func NewUserManager(app *pocketbase.PocketBase) *UserManager {
|
||||
return &UserManager{
|
||||
app: app,
|
||||
}
|
||||
}
|
||||
|
||||
func (um *UserManager) InitializeUserRole(e *core.ModelEvent) error {
|
||||
user := e.Model.(*models.Record)
|
||||
if user.GetString("role") == "" {
|
||||
user.Set("role", "user")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (um *UserManager) InitializeUserSettings(e *core.ModelEvent) error {
|
||||
record := e.Model.(*models.Record)
|
||||
// intialize settings with defaults
|
||||
settings := UserSettings{
|
||||
// Language: "en",
|
||||
ChartTime: "1h",
|
||||
NotificationEmails: []string{},
|
||||
NotificationWebhooks: []string{},
|
||||
}
|
||||
record.UnmarshalJSONField("settings", &settings)
|
||||
if len(settings.NotificationEmails) == 0 {
|
||||
// get user email from auth record
|
||||
if errs := um.app.Dao().ExpandRecord(record, []string{"user"}, nil); len(errs) == 0 {
|
||||
// app.Logger().Error("failed to expand user relation", "errs", errs)
|
||||
if user := record.ExpandedOne("user"); user != nil {
|
||||
settings.NotificationEmails = []string{user.GetString("email")}
|
||||
} else {
|
||||
log.Println("Failed to get user email from auth record")
|
||||
}
|
||||
} else {
|
||||
log.Println("failed to expand user relation", "errs", errs)
|
||||
}
|
||||
}
|
||||
// if len(settings.NotificationWebhooks) == 0 {
|
||||
// settings.NotificationWebhooks = []string{""}
|
||||
// }
|
||||
record.Set("settings", settings)
|
||||
return nil
|
||||
}
|
||||
@@ -1,481 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/daos"
|
||||
m "github.com/pocketbase/pocketbase/migrations"
|
||||
"github.com/pocketbase/pocketbase/models"
|
||||
)
|
||||
|
||||
func init() {
|
||||
m.Register(func(db dbx.Builder) error {
|
||||
jsonData := `[
|
||||
{
|
||||
"id": "2hz5ncl8tizk5nx",
|
||||
"created": "2024-07-07 16:08:20.979Z",
|
||||
"updated": "2024-10-12 18:55:51.623Z",
|
||||
"name": "systems",
|
||||
"type": "base",
|
||||
"system": false,
|
||||
"schema": [
|
||||
{
|
||||
"system": false,
|
||||
"id": "7xloxkwk",
|
||||
"name": "name",
|
||||
"type": "text",
|
||||
"required": true,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"min": null,
|
||||
"max": null,
|
||||
"pattern": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"system": false,
|
||||
"id": "waj7seaf",
|
||||
"name": "status",
|
||||
"type": "select",
|
||||
"required": false,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"maxSelect": 1,
|
||||
"values": [
|
||||
"up",
|
||||
"down",
|
||||
"paused",
|
||||
"pending"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"system": false,
|
||||
"id": "ve781smf",
|
||||
"name": "host",
|
||||
"type": "text",
|
||||
"required": true,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"min": null,
|
||||
"max": null,
|
||||
"pattern": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"system": false,
|
||||
"id": "pij0k2jk",
|
||||
"name": "port",
|
||||
"type": "text",
|
||||
"required": true,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"min": null,
|
||||
"max": null,
|
||||
"pattern": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"system": false,
|
||||
"id": "qoq64ntl",
|
||||
"name": "info",
|
||||
"type": "json",
|
||||
"required": false,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"maxSize": 2000000
|
||||
}
|
||||
},
|
||||
{
|
||||
"system": false,
|
||||
"id": "jcarjnjj",
|
||||
"name": "users",
|
||||
"type": "relation",
|
||||
"required": true,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"collectionId": "_pb_users_auth_",
|
||||
"cascadeDelete": true,
|
||||
"minSelect": null,
|
||||
"maxSelect": null,
|
||||
"displayFields": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"indexes": [],
|
||||
"listRule": "@request.auth.id != \"\" && users.id ?= @request.auth.id",
|
||||
"viewRule": "@request.auth.id != \"\" && users.id ?= @request.auth.id",
|
||||
"createRule": "@request.auth.id != \"\" && users.id ?= @request.auth.id && @request.auth.role != \"readonly\"",
|
||||
"updateRule": "@request.auth.id != \"\" && users.id ?= @request.auth.id && @request.auth.role != \"readonly\"",
|
||||
"deleteRule": "@request.auth.id != \"\" && users.id ?= @request.auth.id && @request.auth.role != \"readonly\"",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"id": "ej9oowivz8b2mht",
|
||||
"created": "2024-07-07 16:09:09.179Z",
|
||||
"updated": "2024-10-12 18:55:51.623Z",
|
||||
"name": "system_stats",
|
||||
"type": "base",
|
||||
"system": false,
|
||||
"schema": [
|
||||
{
|
||||
"system": false,
|
||||
"id": "h9sg148r",
|
||||
"name": "system",
|
||||
"type": "relation",
|
||||
"required": true,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"collectionId": "2hz5ncl8tizk5nx",
|
||||
"cascadeDelete": true,
|
||||
"minSelect": null,
|
||||
"maxSelect": 1,
|
||||
"displayFields": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"system": false,
|
||||
"id": "azftn0be",
|
||||
"name": "stats",
|
||||
"type": "json",
|
||||
"required": true,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"maxSize": 2000000
|
||||
}
|
||||
},
|
||||
{
|
||||
"system": false,
|
||||
"id": "m1ekhli3",
|
||||
"name": "type",
|
||||
"type": "select",
|
||||
"required": true,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"maxSelect": 1,
|
||||
"values": [
|
||||
"1m",
|
||||
"10m",
|
||||
"20m",
|
||||
"120m",
|
||||
"480m"
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"indexes": [
|
||||
"CREATE INDEX ` + "`" + `idx_GxIee0j` + "`" + ` ON ` + "`" + `system_stats` + "`" + ` (` + "`" + `system` + "`" + `)"
|
||||
],
|
||||
"listRule": "@request.auth.id != \"\"",
|
||||
"viewRule": null,
|
||||
"createRule": null,
|
||||
"updateRule": null,
|
||||
"deleteRule": null,
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"id": "juohu4jipgc13v7",
|
||||
"created": "2024-07-07 16:09:57.976Z",
|
||||
"updated": "2024-10-12 18:55:51.623Z",
|
||||
"name": "container_stats",
|
||||
"type": "base",
|
||||
"system": false,
|
||||
"schema": [
|
||||
{
|
||||
"system": false,
|
||||
"id": "hutcu6ps",
|
||||
"name": "system",
|
||||
"type": "relation",
|
||||
"required": true,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"collectionId": "2hz5ncl8tizk5nx",
|
||||
"cascadeDelete": true,
|
||||
"minSelect": null,
|
||||
"maxSelect": 1,
|
||||
"displayFields": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"system": false,
|
||||
"id": "r39hhnil",
|
||||
"name": "stats",
|
||||
"type": "json",
|
||||
"required": true,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"maxSize": 2000000
|
||||
}
|
||||
},
|
||||
{
|
||||
"system": false,
|
||||
"id": "vo7iuj96",
|
||||
"name": "type",
|
||||
"type": "select",
|
||||
"required": true,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"maxSelect": 1,
|
||||
"values": [
|
||||
"1m",
|
||||
"10m",
|
||||
"20m",
|
||||
"120m",
|
||||
"480m"
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"indexes": [],
|
||||
"listRule": "@request.auth.id != \"\"",
|
||||
"viewRule": null,
|
||||
"createRule": null,
|
||||
"updateRule": null,
|
||||
"deleteRule": null,
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"id": "_pb_users_auth_",
|
||||
"created": "2024-07-14 16:25:18.226Z",
|
||||
"updated": "2024-10-12 22:27:19.081Z",
|
||||
"name": "users",
|
||||
"type": "auth",
|
||||
"system": false,
|
||||
"schema": [
|
||||
{
|
||||
"system": false,
|
||||
"id": "qkbp58ae",
|
||||
"name": "role",
|
||||
"type": "select",
|
||||
"required": false,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"maxSelect": 1,
|
||||
"values": [
|
||||
"user",
|
||||
"admin",
|
||||
"readonly"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"system": false,
|
||||
"id": "users_avatar",
|
||||
"name": "avatar",
|
||||
"type": "file",
|
||||
"required": false,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"mimeTypes": [
|
||||
"image/jpeg",
|
||||
"image/png",
|
||||
"image/svg+xml",
|
||||
"image/gif",
|
||||
"image/webp"
|
||||
],
|
||||
"thumbs": null,
|
||||
"maxSelect": 1,
|
||||
"maxSize": 5242880,
|
||||
"protected": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"indexes": [],
|
||||
"listRule": "id = @request.auth.id",
|
||||
"viewRule": "id = @request.auth.id",
|
||||
"createRule": null,
|
||||
"updateRule": null,
|
||||
"deleteRule": null,
|
||||
"options": {
|
||||
"allowEmailAuth": true,
|
||||
"allowOAuth2Auth": true,
|
||||
"allowUsernameAuth": false,
|
||||
"exceptEmailDomains": null,
|
||||
"manageRule": null,
|
||||
"minPasswordLength": 8,
|
||||
"onlyEmailDomains": null,
|
||||
"onlyVerified": true,
|
||||
"requireEmail": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "elngm8x1l60zi2v",
|
||||
"created": "2024-07-15 01:16:04.044Z",
|
||||
"updated": "2024-10-12 22:27:29.128Z",
|
||||
"name": "alerts",
|
||||
"type": "base",
|
||||
"system": false,
|
||||
"schema": [
|
||||
{
|
||||
"system": false,
|
||||
"id": "hn5ly3vi",
|
||||
"name": "user",
|
||||
"type": "relation",
|
||||
"required": true,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"collectionId": "_pb_users_auth_",
|
||||
"cascadeDelete": true,
|
||||
"minSelect": null,
|
||||
"maxSelect": 1,
|
||||
"displayFields": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"system": false,
|
||||
"id": "g5sl3jdg",
|
||||
"name": "system",
|
||||
"type": "relation",
|
||||
"required": true,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"collectionId": "2hz5ncl8tizk5nx",
|
||||
"cascadeDelete": true,
|
||||
"minSelect": null,
|
||||
"maxSelect": 1,
|
||||
"displayFields": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"system": false,
|
||||
"id": "zj3ingrv",
|
||||
"name": "name",
|
||||
"type": "select",
|
||||
"required": true,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"maxSelect": 1,
|
||||
"values": [
|
||||
"Status",
|
||||
"CPU",
|
||||
"Memory",
|
||||
"Disk",
|
||||
"Temperature",
|
||||
"Bandwidth"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"system": false,
|
||||
"id": "o2ablxvn",
|
||||
"name": "value",
|
||||
"type": "number",
|
||||
"required": false,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"min": null,
|
||||
"max": null,
|
||||
"noDecimal": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"system": false,
|
||||
"id": "fstdehcq",
|
||||
"name": "min",
|
||||
"type": "number",
|
||||
"required": false,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"min": null,
|
||||
"max": 60,
|
||||
"noDecimal": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"system": false,
|
||||
"id": "6hgdf6hs",
|
||||
"name": "triggered",
|
||||
"type": "bool",
|
||||
"required": false,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {}
|
||||
}
|
||||
],
|
||||
"indexes": [],
|
||||
"listRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
||||
"viewRule": "",
|
||||
"createRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
||||
"updateRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
||||
"deleteRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"id": "4afacsdnlu8q8r2",
|
||||
"created": "2024-09-12 17:42:55.324Z",
|
||||
"updated": "2024-10-12 18:55:51.624Z",
|
||||
"name": "user_settings",
|
||||
"type": "base",
|
||||
"system": false,
|
||||
"schema": [
|
||||
{
|
||||
"system": false,
|
||||
"id": "d5vztyxa",
|
||||
"name": "user",
|
||||
"type": "relation",
|
||||
"required": true,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"collectionId": "_pb_users_auth_",
|
||||
"cascadeDelete": false,
|
||||
"minSelect": null,
|
||||
"maxSelect": 1,
|
||||
"displayFields": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"system": false,
|
||||
"id": "xcx4qgqq",
|
||||
"name": "settings",
|
||||
"type": "json",
|
||||
"required": false,
|
||||
"presentable": false,
|
||||
"unique": false,
|
||||
"options": {
|
||||
"maxSize": 2000000
|
||||
}
|
||||
}
|
||||
],
|
||||
"indexes": [
|
||||
"CREATE UNIQUE INDEX ` + "`" + `idx_30Lwgf2` + "`" + ` ON ` + "`" + `user_settings` + "`" + ` (` + "`" + `user` + "`" + `)"
|
||||
],
|
||||
"listRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
||||
"viewRule": null,
|
||||
"createRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
||||
"updateRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
||||
"deleteRule": null,
|
||||
"options": {}
|
||||
}
|
||||
]`
|
||||
|
||||
collections := []*models.Collection{}
|
||||
if err := json.Unmarshal([]byte(jsonData), &collections); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return daos.New(db).ImportCollections(collections, true, nil)
|
||||
}, func(db dbx.Builder) error {
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/daos"
|
||||
m "github.com/pocketbase/pocketbase/migrations"
|
||||
)
|
||||
|
||||
func init() {
|
||||
m.Register(func(db dbx.Builder) error {
|
||||
dao := daos.New(db)
|
||||
|
||||
settings, _ := dao.FindSettings()
|
||||
settings.Meta.AppName = "Beszel"
|
||||
settings.Meta.HideControls = true
|
||||
|
||||
return dao.SaveSettings(settings)
|
||||
}, nil)
|
||||
}
|
||||
Binary file not shown.
@@ -1,13 +0,0 @@
|
||||
// Package site handles the Beszel frontend embedding.
|
||||
package site
|
||||
|
||||
import (
|
||||
"embed"
|
||||
|
||||
"github.com/labstack/echo/v5"
|
||||
)
|
||||
|
||||
//go:embed all:dist
|
||||
var assets embed.FS
|
||||
|
||||
var Dist = echo.MustSubFS(assets, "dist")
|
||||
@@ -1,13 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en" dir="ltr">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<link rel="icon" type="image/svg+xml" href="/static/favicon.svg" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Beszel</title>
|
||||
</head>
|
||||
<body>
|
||||
<div id="app"></div>
|
||||
<script type="module" src="/src/main.tsx"></script>
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,15 +0,0 @@
|
||||
import type { LinguiConfig } from "@lingui/conf"
|
||||
|
||||
const config: LinguiConfig = {
|
||||
locales: ["en", "ar", "de", "es", "fr", "it", "ja", "ko", "pt", "tr", "ru", "uk", "vi", "zh-CN", "zh-HK"],
|
||||
sourceLocale: "en",
|
||||
compileNamespace: "ts",
|
||||
catalogs: [
|
||||
{
|
||||
path: "<rootDir>/src/locales/{locale}/{locale}",
|
||||
include: ["src"],
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
export default config
|
||||
8240
beszel/site/package-lock.json
generated
8240
beszel/site/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user