mirror of
https://github.com/henrygd/beszel.git
synced 2025-11-25 14:33:25 +00:00
Compare commits
51 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a213b70a1c | ||
|
|
66cc0a4b24 | ||
|
|
f051f6a5f8 | ||
|
|
b9f142c28c | ||
|
|
45e1283b83 | ||
|
|
94cb5f2798 | ||
|
|
2883467b2b | ||
|
|
0c77190161 | ||
|
|
8d4d072343 | ||
|
|
d6e0daf52a | ||
|
|
22e9ede766 | ||
|
|
9ab359d3cf | ||
|
|
5447ccad47 | ||
|
|
3e51d79c37 | ||
|
|
0996d60224 | ||
|
|
7a5ec067f5 | ||
|
|
98563d643d | ||
|
|
268e364bd4 | ||
|
|
dd84a9fd35 | ||
|
|
2f4e537f72 | ||
|
|
9637363cf3 | ||
|
|
73d0dd25ec | ||
|
|
2ecf5572ba | ||
|
|
5e97167ee0 | ||
|
|
1a4862ecd9 | ||
|
|
6235d15fa2 | ||
|
|
4694642674 | ||
|
|
56c0b86025 | ||
|
|
82e3f3c7c1 | ||
|
|
38a9c535b8 | ||
|
|
34c83e7c17 | ||
|
|
fe5732d75a | ||
|
|
cc32b50d82 | ||
|
|
764e043e83 | ||
|
|
cec9339f6d | ||
|
|
f96f04f876 | ||
|
|
06b1c2200b | ||
|
|
e88e2bf3dc | ||
|
|
8621a45383 | ||
|
|
f2ddee9216 | ||
|
|
f350d61ee2 | ||
|
|
2d670c585d | ||
|
|
55d1c00903 | ||
|
|
78a9086b55 | ||
|
|
4ee169fea5 | ||
|
|
a286bed54c | ||
|
|
314cee081a | ||
|
|
e287124632 | ||
|
|
9cccefd3fa | ||
|
|
ec95f63806 | ||
|
|
812fe20df7 |
4
.github/workflows/docker-images.yml
vendored
4
.github/workflows/docker-images.yml
vendored
@@ -3,7 +3,7 @@ name: Make docker images
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
- 'v*'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -71,5 +71,3 @@ jobs:
|
||||
push: ${{ github.ref_type == 'tag' }}
|
||||
tags: ${{ steps.metadata.outputs.tags }}
|
||||
labels: ${{ steps.metadata.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -11,4 +11,3 @@ dist
|
||||
beszel/cmd/hub/hub
|
||||
beszel/cmd/agent/agent
|
||||
node_modules
|
||||
package-lock.json
|
||||
|
||||
@@ -38,5 +38,5 @@ func main() {
|
||||
addr = portEnvVar
|
||||
}
|
||||
|
||||
agent.NewAgent(pubKey, addr).Run()
|
||||
agent.NewAgent().Run(pubKey, addr)
|
||||
}
|
||||
|
||||
@@ -9,9 +9,9 @@ require (
|
||||
github.com/goccy/go-json v0.10.3
|
||||
github.com/labstack/echo/v5 v5.0.0-20230722203903-ec5b858dab61
|
||||
github.com/pocketbase/dbx v1.10.1
|
||||
github.com/pocketbase/pocketbase v0.22.20
|
||||
github.com/pocketbase/pocketbase v0.22.21
|
||||
github.com/rhysd/go-github-selfupdate v1.2.3
|
||||
github.com/shirou/gopsutil/v4 v4.24.8
|
||||
github.com/shirou/gopsutil/v4 v4.24.9
|
||||
github.com/spf13/cobra v1.8.1
|
||||
golang.org/x/crypto v0.27.0
|
||||
)
|
||||
@@ -20,29 +20,30 @@ require (
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7 // indirect
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.33 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.18 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.31.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.39 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.37 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.26 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 // indirect
|
||||
github.com/aws/smithy-go v1.20.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.18 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.20 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.18 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.64.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.23.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.31.3 // indirect
|
||||
github.com/aws/smithy-go v1.21.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/disintegration/imaging v1.6.2 // indirect
|
||||
github.com/domodwyer/mailyak/v3 v3.6.2 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/ebitengine/purego v0.8.0 // indirect
|
||||
github.com/fatih/color v1.17.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.5 // indirect
|
||||
github.com/ganigeorgiev/fexpr v0.4.1 // indirect
|
||||
@@ -67,7 +68,6 @@ require (
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/spf13/cast v1.7.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/tcnksm/go-gitconfig v0.1.2 // indirect
|
||||
@@ -89,9 +89,9 @@ require (
|
||||
golang.org/x/text v0.18.0 // indirect
|
||||
golang.org/x/time v0.6.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
|
||||
google.golang.org/api v0.197.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
google.golang.org/grpc v1.66.2 // indirect
|
||||
google.golang.org/api v0.199.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240930140551-af27646dc61f // indirect
|
||||
google.golang.org/grpc v1.67.1 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
modernc.org/gc/v3 v3.0.0-20240801135723-a856999a2e4a // indirect
|
||||
modernc.org/libc v1.61.0 // indirect
|
||||
|
||||
110
beszel/go.sum
110
beszel/go.sum
@@ -1,13 +1,13 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ=
|
||||
cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc=
|
||||
cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U=
|
||||
cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk=
|
||||
cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw=
|
||||
cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc=
|
||||
cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0=
|
||||
cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY=
|
||||
cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY=
|
||||
cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo=
|
||||
cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k=
|
||||
cloud.google.com/go/iam v1.1.13 h1:7zWBXG9ERbMLrzQBRhFliAV+kjcRToDTgQT3CTwYyv4=
|
||||
cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus=
|
||||
cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs=
|
||||
@@ -26,44 +26,44 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
|
||||
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.5 h1:mWSRTwQAb0aLE17dSzztCVJWI9+cRMgqebndjwDyK0g=
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.5/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.33 h1:Nof9o/MsmH4oa0s2q9a0k7tMz5x/Yj5k06lDODWz3BU=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.33/go.mod h1:kEqdYzRb8dd8Sy2pOdEbExTTF5v7ozEXX0McgPE7xks=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.32 h1:7Cxhp/BnT2RcGy4VisJ9miUPecY+lyE9I8JvcZofn9I=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.32/go.mod h1:P5/QMF3/DCHbXGEGkdbilXHsyTBX5D3HSwcrSc9p20I=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 h1:pfQ2sqNpMVK6xz2RbqLEL0GH87JOwSxPV2rzm8Zsb74=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13/go.mod h1:NG7RXPUlqfsCLLFfi0+IpKN4sCB9D9fw/qTaSB+xRoU=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.18 h1:9DIp7vhmOPmueCDwpXa45bEbLHHTt1kcxChdTJWWxvI=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.18/go.mod h1:aJv/Fwz8r56ozwYFRC4bzoeL1L17GYQYemfblOBux1M=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.31.0 h1:3V05LbxTSItI5kUqNwhJrrrY1BAXxXt0sN0l72QmG5U=
|
||||
github.com/aws/aws-sdk-go-v2 v1.31.0/go.mod h1:ztolYtaEUtdpf9Wftr31CJfLVjOnD/CVRkKOOYgF8hA=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.5 h1:xDAuZTn4IMm8o1LnBZvmrL8JA1io4o3YWNXgohbf20g=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.5/go.mod h1:wYSv6iDS621sEFLfKvpPE2ugjTuGlAG7iROg0hLOkfc=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.39 h1:FCylu78eTGzW1ynHcongXK9YHtoXD5AiiUqq3YfJYjU=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.39/go.mod h1:wczj2hbyskP4LjMKBEZwPRO1shXY+GsQleab+ZXT2ik=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.37 h1:G2aOH01yW8X373JK419THj5QVqu9vKEwxSEsGxihoW0=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.37/go.mod h1:0ecCjlb7htYCptRD45lXJ6aJDQac6D2NlKGpZqyTG6A=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 h1:C/d03NAmh8C4BZXhuRNboF/DqhBkBCeDiJDcaqIT5pA=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14/go.mod h1:7I0Ju7p9mCIdlrfS+JCgqcYD0VXz/N4yozsox+0o078=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.26 h1:BTfwWNFVGLxW2bih/V2xhgCsYDQwG1cAWhWoW9Jx7wE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.26/go.mod h1:LA1/FxoEFFmv7XpkB8KKqLAUz8AePdK9H0Ec7PUKazs=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18 h1:kYQ3H1u0ANr9KEKlGs/jTLrBFPo8P8NaH/w7A01NeeM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18/go.mod h1:r506HmK5JDUh9+Mw4CfGJGSSoqIiLCndAuqXuhbv67Y=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18 h1:Z7IdFUONvTcvS7YuhtVxN99v2cCoHRXOS4mTr0B/pUc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18/go.mod h1:DkKMmksZVVyat+Y+r1dEOgJEfUeA7UngIHWeKsi0yNc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.17 h1:Roo69qTpfu8OlJ2Tb7pAYVuF0CpuUMB0IYWwYP/4DZM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.17/go.mod h1:NcWPxQzGM1USQggaTVwz6VpqMZPX1CvDJLDh6jnOCa4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19 h1:FLMkfEiRjhgeDTCjjLoc3URo/TBkgeQbocA78lfkzSI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19/go.mod h1:Vx+GucNSsdhaxs3aZIKfSUjKVGsxN25nX2SRcdhuw08=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 h1:rfprUlsdzgl7ZL2KlXiUAoJnI/VxfHCvDFr2QDFj6u4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19/go.mod h1:SCWkEdRq8/7EK60NcvvQ6NXKuTcchAD4ROAsC37VEZE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17 h1:u+EfGmksnJc/x5tq3A+OD7LrMbSSR/5TrKLvkdy/fhY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17/go.mod h1:VaMx6302JHax2vHJWgRo+5n9zvbacs3bLU/23DNQrTY=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2 h1:Kp6PWAlXwP1UvIflkIP6MFZYBNDCa4mFCGtxrpICVOg=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2/go.mod h1:5FmD/Dqq57gP+XwaUnd5WFPipAuzrf0HmupX27Gvjvc=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 h1:pIaGg+08llrP7Q5aiz9ICWbY8cqhTkyy+0SHvfzQpTc=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.22.7/go.mod h1:eEygMHnTKH/3kNp9Jr1n3PdejuSNcgwLe1dWgQtO0VQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 h1:/Cfdu0XV3mONYKaOt1Gr0k1KvQzkzPyiKUdlWJqy+J4=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7/go.mod h1:bCbAxKDqNvkHxRaIMnyVPXPo+OaPRwvmgzMxbz1VKSA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 h1:NKTa1eqZYw8tiHSRGpP0VtTdub/8KNk8sDkNPFaOKDE=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.7/go.mod h1:NXi1dIAGteSaRLqYgarlhP/Ij0cFT+qmCwiJqWh/U5o=
|
||||
github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4=
|
||||
github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.18 h1:OWYvKL53l1rbsUmW7bQyJVsYU/Ii3bbAAQIIFNbM0Tk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.18/go.mod h1:CUx0G1v3wG6l01tUB+j7Y8kclA8NSqK4ef0YG79a4cg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 h1:QFASJGfT8wMXtuP3D5CRmMjARHv9ZmzFUMJznHDOY3w=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5/go.mod h1:QdZ3OmoIjSX+8D1OPAzPxDfjXASbBMDsz9qvtyIhtik=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.20 h1:rTWjG6AvWekO2B1LHeM3ktU7MqyX9rzWQ7hgzneZW7E=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.20/go.mod h1:RGW2DDpVc8hu6Y6yG8G5CHVmVOAn1oV8rNKOHRJyswg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 h1:Xbwbmk44URTiHNx6PNo0ujDE6ERlsCKJD3u1zfnzAPg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20/go.mod h1:oAfOFzUB14ltPZj1rWwRc3d/6OgD76R8KlvU3EqM9Fg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.18 h1:eb+tFOIl9ZsUe2259/BKPeniKuz4/02zZFH/i4Nf8Rg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.18/go.mod h1:GVCC2IJNJTmdlyEsSmofEy7EfJncP7DNnXDzRjJ5Keg=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.64.0 h1:I0p8knB/IDYSQ3dbanaCr4UhiYQ96bvKRhGYxvLyiD8=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.64.0/go.mod h1:NLTqRLe3pUNu3nTEHI6XlHLKYmc8fbHUdMxAB6+s41Q=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.23.3 h1:rs4JCczF805+FDv2tRhZ1NU0RB2H6ryAvsWPanAr72Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.23.3/go.mod h1:XRlMvmad0ZNL+75C5FYdMvbbLkd6qiqz6foR1nA1PXY=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.3 h1:S7EPdMVZod8BGKQQPTBK+FcX9g7bKR7c4+HxWqHP7Vg=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.3/go.mod h1:FnvDM4sfa+isJ3kDXIzAB9GAwVSzFzSy97uZ3IsHo4E=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.31.3 h1:VzudTFrDCIDakXtemR7l6Qzt2+JYsVqo2MxBPt5k8T8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.31.3/go.mod h1:yMWe0F+XG0DkRZK5ODZhG7BEFYhLXi2dqGsv6tX0cgI=
|
||||
github.com/aws/smithy-go v1.21.0 h1:H7L8dtDRk0P1Qm6y0ji7MCYMQObJ5R9CRpyPhRUkLYA=
|
||||
github.com/aws/smithy-go v1.21.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
@@ -84,6 +84,8 @@ github.com/domodwyer/mailyak/v3 v3.6.2 h1:x3tGMsyFhTCaxp6ycgR0FE/bu5QiNp+hetUuCO
|
||||
github.com/domodwyer/mailyak/v3 v3.6.2/go.mod h1:lOm/u9CyCVWHeaAmHIdF4RiKVxKUT/H5XX10lIKAL6c=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/ebitengine/purego v0.8.0 h1:JbqvnEzRvPpxhCJzJJ2y0RbiZ8nyjccVUrSM3q+GvvE=
|
||||
github.com/ebitengine/purego v0.8.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
@@ -215,8 +217,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pocketbase/dbx v1.10.1 h1:cw+vsyfCJD8YObOVeqb93YErnlxwYMkNZ4rwN0G0AaA=
|
||||
github.com/pocketbase/dbx v1.10.1/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs=
|
||||
github.com/pocketbase/pocketbase v0.22.20 h1:yUkhO5bTPWlzD4ZK6EQlS4R3AcHKDlBD+DxxU2BR83I=
|
||||
github.com/pocketbase/pocketbase v0.22.20/go.mod h1:Cw5E4uoGhKItBIE2lJL3NfmiUr9Syk2xaNJ2G7Dssow=
|
||||
github.com/pocketbase/pocketbase v0.22.21 h1:DGPCxn6co8VuTV0mton4NFO/ON49XiFMszRr+Mysy48=
|
||||
github.com/pocketbase/pocketbase v0.22.21/go.mod h1:Cw5E4uoGhKItBIE2lJL3NfmiUr9Syk2xaNJ2G7Dssow=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@@ -227,12 +229,8 @@ github.com/rhysd/go-github-selfupdate v1.2.3/go.mod h1:mp/N8zj6jFfBQy/XMYoWsmfzx
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shirou/gopsutil/v4 v4.24.8 h1:pVQjIenQkIhqO81mwTaXjTzOMT7d3TZkf43PlVFHENI=
|
||||
github.com/shirou/gopsutil/v4 v4.24.8/go.mod h1:wE0OrJtj4dG+hYkxqDH3QiBICdKSf04/npcvLLc/oRg=
|
||||
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
|
||||
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
|
||||
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
|
||||
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
|
||||
github.com/shirou/gopsutil/v4 v4.24.9 h1:KIV+/HaHD5ka5f570RZq+2SaeFsb/pq+fp2DGNWYoOI=
|
||||
github.com/shirou/gopsutil/v4 v4.24.9/go.mod h1:3fkaHNeYsUFCGZ8+9vZVWtbyM1k2eRnlL+bWO8Bxa/Q=
|
||||
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
|
||||
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
@@ -366,8 +364,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY=
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
|
||||
google.golang.org/api v0.197.0 h1:x6CwqQLsFiA5JKAiGyGBjc2bNtHtLddhJCE2IKuhhcQ=
|
||||
google.golang.org/api v0.197.0/go.mod h1:AuOuo20GoQ331nq7DquGHlU6d+2wN2fZ8O0ta60nRNw=
|
||||
google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs=
|
||||
google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -377,17 +375,17 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU=
|
||||
google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240812133136-8ffd90a71988 h1:+/tmTy5zAieooKIXfzDm9KiA3Bv6JBwriRN9LY+yayk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240812133136-8ffd90a71988/go.mod h1:4+X6GvPs+25wZKbQq9qyAXrwIRExv7w0Ea6MgZLZiDM=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240930140551-af27646dc61f h1:cUMEy+8oS78BWIH9OWazBkzbr090Od9tWBNtZHkOhf0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240930140551-af27646dc61f/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo=
|
||||
google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
|
||||
google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
|
||||
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
||||
@@ -2,395 +2,87 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"beszel"
|
||||
"beszel/internal/entities/container"
|
||||
"beszel/internal/entities/system"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/shirou/gopsutil/v4/common"
|
||||
"github.com/shirou/gopsutil/v4/cpu"
|
||||
"github.com/shirou/gopsutil/v4/disk"
|
||||
"github.com/shirou/gopsutil/v4/host"
|
||||
"github.com/shirou/gopsutil/v4/mem"
|
||||
"github.com/shirou/gopsutil/v4/sensors"
|
||||
|
||||
sshServer "github.com/gliderlabs/ssh"
|
||||
psutilNet "github.com/shirou/gopsutil/v4/net"
|
||||
)
|
||||
|
||||
type Agent struct {
|
||||
addr string
|
||||
pubKey []byte
|
||||
sem chan struct{}
|
||||
containerStatsMap map[string]*container.PrevContainerStats
|
||||
containerStatsMutex *sync.Mutex
|
||||
fsNames []string
|
||||
fsStats map[string]*system.FsStats
|
||||
netInterfaces map[string]struct{}
|
||||
netIoStats *system.NetIoStats
|
||||
dockerClient *http.Client
|
||||
sensorsContext context.Context
|
||||
bufferPool *sync.Pool
|
||||
debug bool // true if LOG_LEVEL is set to debug
|
||||
fsNames []string // List of filesystem device names being monitored
|
||||
fsStats map[string]*system.FsStats // Keeps track of disk stats for each filesystem
|
||||
netInterfaces map[string]struct{} // Stores all valid network interfaces
|
||||
netIoStats system.NetIoStats // Keeps track of bandwidth usage
|
||||
dockerManager *dockerManager // Manages Docker API requests
|
||||
sensorsContext context.Context // Sensors context to override sys location
|
||||
sensorsWhitelist map[string]struct{} // List of sensors to monitor
|
||||
systemInfo system.Info // Host system info
|
||||
}
|
||||
|
||||
func NewAgent(pubKey []byte, addr string) *Agent {
|
||||
func NewAgent() *Agent {
|
||||
return &Agent{
|
||||
addr: addr,
|
||||
pubKey: pubKey,
|
||||
sem: make(chan struct{}, 15),
|
||||
containerStatsMap: make(map[string]*container.PrevContainerStats),
|
||||
containerStatsMutex: &sync.Mutex{},
|
||||
netIoStats: &system.NetIoStats{},
|
||||
dockerClient: newDockerClient(),
|
||||
sensorsContext: context.Background(),
|
||||
bufferPool: &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
},
|
||||
sensorsContext: context.Background(),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Agent) acquireSemaphore() {
|
||||
a.sem <- struct{}{}
|
||||
}
|
||||
|
||||
func (a *Agent) releaseSemaphore() {
|
||||
<-a.sem
|
||||
}
|
||||
|
||||
func (a *Agent) getSystemStats() (system.Info, system.Stats) {
|
||||
systemStats := system.Stats{}
|
||||
|
||||
// cpu percent
|
||||
cpuPct, err := cpu.Percent(0, false)
|
||||
if err != nil {
|
||||
log.Println("Error getting cpu percent:", err)
|
||||
} else if len(cpuPct) > 0 {
|
||||
systemStats.Cpu = twoDecimals(cpuPct[0])
|
||||
}
|
||||
|
||||
// memory
|
||||
if v, err := mem.VirtualMemory(); err == nil {
|
||||
systemStats.Mem = bytesToGigabytes(v.Total)
|
||||
systemStats.MemUsed = bytesToGigabytes(v.Used)
|
||||
systemStats.MemBuffCache = bytesToGigabytes(v.Total - v.Free - v.Used)
|
||||
systemStats.MemPct = twoDecimals(v.UsedPercent)
|
||||
systemStats.Swap = bytesToGigabytes(v.SwapTotal)
|
||||
systemStats.SwapUsed = bytesToGigabytes(v.SwapTotal - v.SwapFree)
|
||||
}
|
||||
|
||||
// disk usage
|
||||
for _, stats := range a.fsStats {
|
||||
// log.Println("Reading filesystem:", fs.Mountpoint)
|
||||
if d, err := disk.Usage(stats.Mountpoint); err == nil {
|
||||
stats.DiskTotal = bytesToGigabytes(d.Total)
|
||||
stats.DiskUsed = bytesToGigabytes(d.Used)
|
||||
if stats.Root {
|
||||
systemStats.DiskTotal = bytesToGigabytes(d.Total)
|
||||
systemStats.DiskUsed = bytesToGigabytes(d.Used)
|
||||
systemStats.DiskPct = twoDecimals(d.UsedPercent)
|
||||
}
|
||||
} else {
|
||||
// reset stats if error (likely unmounted)
|
||||
log.Printf("Error reading %s: %+v\n", stats.Mountpoint, err)
|
||||
stats.DiskTotal = 0
|
||||
stats.DiskUsed = 0
|
||||
stats.TotalRead = 0
|
||||
stats.TotalWrite = 0
|
||||
func (a *Agent) Run(pubKey []byte, addr string) {
|
||||
// Set up slog with a log level determined by the LOG_LEVEL env var
|
||||
if logLevelStr, exists := os.LookupEnv("LOG_LEVEL"); exists {
|
||||
switch strings.ToLower(logLevelStr) {
|
||||
case "debug":
|
||||
a.debug = true
|
||||
slog.SetLogLoggerLevel(slog.LevelDebug)
|
||||
case "warn":
|
||||
slog.SetLogLoggerLevel(slog.LevelWarn)
|
||||
case "error":
|
||||
slog.SetLogLoggerLevel(slog.LevelError)
|
||||
}
|
||||
}
|
||||
|
||||
// disk i/o
|
||||
if ioCounters, err := disk.IOCounters(a.fsNames...); err == nil {
|
||||
for _, d := range ioCounters {
|
||||
stats := a.fsStats[d.Name]
|
||||
if stats == nil {
|
||||
continue
|
||||
}
|
||||
secondsElapsed := time.Since(stats.Time).Seconds()
|
||||
readPerSecond := float64(d.ReadBytes-stats.TotalRead) / secondsElapsed
|
||||
writePerSecond := float64(d.WriteBytes-stats.TotalWrite) / secondsElapsed
|
||||
stats.Time = time.Now()
|
||||
stats.DiskReadPs = bytesToMegabytes(readPerSecond)
|
||||
stats.DiskWritePs = bytesToMegabytes(writePerSecond)
|
||||
stats.TotalRead = d.ReadBytes
|
||||
stats.TotalWrite = d.WriteBytes
|
||||
// if root filesystem, update system stats
|
||||
if stats.Root {
|
||||
systemStats.DiskReadPs = stats.DiskReadPs
|
||||
systemStats.DiskWritePs = stats.DiskWritePs
|
||||
}
|
||||
// Set sensors context (allows overriding sys location for sensors)
|
||||
if sysSensors, exists := os.LookupEnv("SYS_SENSORS"); exists {
|
||||
slog.Info("SYS_SENSORS", "path", sysSensors)
|
||||
a.sensorsContext = context.WithValue(a.sensorsContext,
|
||||
common.EnvKey, common.EnvMap{common.HostSysEnvKey: sysSensors},
|
||||
)
|
||||
}
|
||||
|
||||
// Set sensors whitelist
|
||||
if sensors, exists := os.LookupEnv("SENSORS"); exists {
|
||||
a.sensorsWhitelist = make(map[string]struct{})
|
||||
for _, sensor := range strings.Split(sensors, ",") {
|
||||
a.sensorsWhitelist[sensor] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// network stats
|
||||
if netIO, err := psutilNet.IOCounters(true); err == nil {
|
||||
secondsElapsed := time.Since(a.netIoStats.Time).Seconds()
|
||||
a.netIoStats.Time = time.Now()
|
||||
bytesSent := uint64(0)
|
||||
bytesRecv := uint64(0)
|
||||
// sum all bytes sent and received
|
||||
for _, v := range netIO {
|
||||
// skip if not in valid network interfaces list
|
||||
if _, exists := a.netInterfaces[v.Name]; !exists {
|
||||
continue
|
||||
}
|
||||
// log.Printf("%+v: %+v recv, %+v sent\n", v.Name, v.BytesRecv, v.BytesSent)
|
||||
bytesSent += v.BytesSent
|
||||
bytesRecv += v.BytesRecv
|
||||
}
|
||||
// add to systemStats
|
||||
sentPerSecond := float64(bytesSent-a.netIoStats.BytesSent) / secondsElapsed
|
||||
recvPerSecond := float64(bytesRecv-a.netIoStats.BytesRecv) / secondsElapsed
|
||||
networkSentPs := bytesToMegabytes(sentPerSecond)
|
||||
networkRecvPs := bytesToMegabytes(recvPerSecond)
|
||||
// add check for issue (#150) where sent is a massive number
|
||||
if networkSentPs > 10_000 || networkRecvPs > 10_000 {
|
||||
log.Printf("Warning: network sent/recv is %.2f/%.2f MB/s. Resetting stats.\n", networkSentPs, networkRecvPs)
|
||||
for _, v := range netIO {
|
||||
if _, exists := a.netInterfaces[v.Name]; !exists {
|
||||
continue
|
||||
}
|
||||
log.Printf("%+s: %v recv, %v sent\n", v.Name, v.BytesRecv, v.BytesSent)
|
||||
}
|
||||
// reset network I/O stats
|
||||
a.initializeNetIoStats()
|
||||
} else {
|
||||
systemStats.NetworkSent = networkSentPs
|
||||
systemStats.NetworkRecv = networkRecvPs
|
||||
// update netIoStats
|
||||
a.netIoStats.BytesSent = bytesSent
|
||||
a.netIoStats.BytesRecv = bytesRecv
|
||||
}
|
||||
// initialize system info / docker manager
|
||||
a.initializeSystemInfo()
|
||||
a.initializeDiskInfo()
|
||||
a.initializeNetIoStats()
|
||||
a.dockerManager = newDockerManager()
|
||||
|
||||
// if debugging, print stats
|
||||
if a.debug {
|
||||
slog.Debug("Stats", "data", a.gatherStats())
|
||||
}
|
||||
|
||||
// temperatures
|
||||
if temps, err := sensors.TemperaturesWithContext(a.sensorsContext); err == nil {
|
||||
systemStats.Temperatures = make(map[string]float64)
|
||||
// log.Printf("Temperatures: %+v\n", temps)
|
||||
for i, temp := range temps {
|
||||
if _, ok := systemStats.Temperatures[temp.SensorKey]; ok {
|
||||
// if key already exists, append int to key
|
||||
systemStats.Temperatures[temp.SensorKey+"_"+strconv.Itoa(i)] = twoDecimals(temp.Temperature)
|
||||
} else {
|
||||
systemStats.Temperatures[temp.SensorKey] = twoDecimals(temp.Temperature)
|
||||
}
|
||||
}
|
||||
// log.Printf("Temperature map: %+v\n", systemStats.Temperatures)
|
||||
}
|
||||
|
||||
systemInfo := system.Info{
|
||||
Cpu: systemStats.Cpu,
|
||||
MemPct: systemStats.MemPct,
|
||||
DiskPct: systemStats.DiskPct,
|
||||
AgentVersion: beszel.Version,
|
||||
}
|
||||
|
||||
// add host info
|
||||
if info, err := host.Info(); err == nil {
|
||||
systemInfo.Uptime = info.Uptime
|
||||
systemInfo.Hostname = info.Hostname
|
||||
}
|
||||
// add cpu stats
|
||||
if info, err := cpu.Info(); err == nil && len(info) > 0 {
|
||||
systemInfo.CpuModel = info[0].ModelName
|
||||
}
|
||||
if cores, err := cpu.Counts(false); err == nil {
|
||||
systemInfo.Cores = cores
|
||||
}
|
||||
if threads, err := cpu.Counts(true); err == nil {
|
||||
systemInfo.Threads = threads
|
||||
}
|
||||
|
||||
return systemInfo, systemStats
|
||||
}
|
||||
|
||||
func (a *Agent) getDockerStats() ([]container.Stats, error) {
|
||||
resp, err := a.dockerClient.Get("http://localhost/containers/json")
|
||||
if err != nil {
|
||||
a.closeIdleConnections(err)
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var containers []container.ApiInfo
|
||||
if err := json.NewDecoder(resp.Body).Decode(&containers); err != nil {
|
||||
log.Printf("Error decoding containers: %+v\n", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
containerStats := make([]container.Stats, 0, len(containers))
|
||||
containerStatsMutex := sync.Mutex{}
|
||||
|
||||
// store valid ids to clean up old container ids from map
|
||||
validIds := make(map[string]struct{}, len(containers))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, ctr := range containers {
|
||||
ctr.IdShort = ctr.Id[:12]
|
||||
validIds[ctr.IdShort] = struct{}{}
|
||||
// check if container is less than 1 minute old (possible restart)
|
||||
// note: can't use Created field because it's not updated on restart
|
||||
if strings.Contains(ctr.Status, "second") {
|
||||
// if so, remove old container data
|
||||
a.deleteContainerStatsSync(ctr.IdShort)
|
||||
}
|
||||
wg.Add(1)
|
||||
a.acquireSemaphore()
|
||||
go func() {
|
||||
defer a.releaseSemaphore()
|
||||
defer wg.Done()
|
||||
cstats, err := a.getContainerStats(ctr)
|
||||
if err != nil {
|
||||
// close idle connections if error is a network timeout
|
||||
isTimeout := a.closeIdleConnections(err)
|
||||
// delete container from map if not a timeout
|
||||
if !isTimeout {
|
||||
a.deleteContainerStatsSync(ctr.IdShort)
|
||||
}
|
||||
// retry once
|
||||
cstats, err = a.getContainerStats(ctr)
|
||||
if err != nil {
|
||||
log.Printf("Error getting container stats: %+v\n", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
containerStatsMutex.Lock()
|
||||
defer containerStatsMutex.Unlock()
|
||||
containerStats = append(containerStats, cstats)
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
for id := range a.containerStatsMap {
|
||||
if _, exists := validIds[id]; !exists {
|
||||
// log.Printf("Removing container cpu map entry: %+v\n", id)
|
||||
delete(a.containerStatsMap, id)
|
||||
}
|
||||
}
|
||||
|
||||
return containerStats, nil
|
||||
}
|
||||
|
||||
func (a *Agent) getContainerStats(ctr container.ApiInfo) (container.Stats, error) {
|
||||
cStats := container.Stats{}
|
||||
|
||||
resp, err := a.dockerClient.Get("http://localhost/containers/" + ctr.IdShort + "/stats?stream=0&one-shot=1")
|
||||
if err != nil {
|
||||
return cStats, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// use a pooled buffer to store the response body
|
||||
buf := a.bufferPool.Get().(*bytes.Buffer)
|
||||
defer a.bufferPool.Put(buf)
|
||||
buf.Reset()
|
||||
_, err = io.Copy(buf, resp.Body)
|
||||
if err != nil {
|
||||
return cStats, err
|
||||
}
|
||||
|
||||
// unmarshal the json data from the buffer
|
||||
var statsJson container.ApiStats
|
||||
if err := json.Unmarshal(buf.Bytes(), &statsJson); err != nil {
|
||||
return cStats, err
|
||||
}
|
||||
|
||||
name := ctr.Names[0][1:]
|
||||
|
||||
// check if container has valid data, otherwise may be in restart loop (#103)
|
||||
if statsJson.MemoryStats.Usage == 0 {
|
||||
return cStats, fmt.Errorf("%s - invalid data", name)
|
||||
}
|
||||
|
||||
// memory (https://docs.docker.com/reference/cli/docker/container/stats/)
|
||||
memCache := statsJson.MemoryStats.Stats["inactive_file"]
|
||||
if memCache == 0 {
|
||||
memCache = statsJson.MemoryStats.Stats["cache"]
|
||||
}
|
||||
usedMemory := statsJson.MemoryStats.Usage - memCache
|
||||
|
||||
a.containerStatsMutex.Lock()
|
||||
defer a.containerStatsMutex.Unlock()
|
||||
|
||||
// add empty values if they doesn't exist in map
|
||||
stats, initialized := a.containerStatsMap[ctr.IdShort]
|
||||
if !initialized {
|
||||
stats = &container.PrevContainerStats{}
|
||||
a.containerStatsMap[ctr.IdShort] = stats
|
||||
}
|
||||
|
||||
// cpu
|
||||
cpuDelta := statsJson.CPUStats.CPUUsage.TotalUsage - stats.Cpu[0]
|
||||
systemDelta := statsJson.CPUStats.SystemUsage - stats.Cpu[1]
|
||||
cpuPct := float64(cpuDelta) / float64(systemDelta) * 100
|
||||
if cpuPct > 100 {
|
||||
return cStats, fmt.Errorf("%s cpu pct greater than 100: %+v", name, cpuPct)
|
||||
}
|
||||
stats.Cpu = [2]uint64{statsJson.CPUStats.CPUUsage.TotalUsage, statsJson.CPUStats.SystemUsage}
|
||||
|
||||
// network
|
||||
var total_sent, total_recv uint64
|
||||
for _, v := range statsJson.Networks {
|
||||
total_sent += v.TxBytes
|
||||
total_recv += v.RxBytes
|
||||
}
|
||||
var sent_delta, recv_delta float64
|
||||
// prevent first run from sending all prev sent/recv bytes
|
||||
if initialized {
|
||||
secondsElapsed := time.Since(stats.Net.Time).Seconds()
|
||||
sent_delta = float64(total_sent-stats.Net.Sent) / secondsElapsed
|
||||
recv_delta = float64(total_recv-stats.Net.Recv) / secondsElapsed
|
||||
// log.Printf("sent delta: %+v, recv delta: %+v\n", sent_delta, recv_delta)
|
||||
}
|
||||
stats.Net.Sent = total_sent
|
||||
stats.Net.Recv = total_recv
|
||||
stats.Net.Time = time.Now()
|
||||
|
||||
// cStats := a.containerStatsPool.Get().(*container.Stats)
|
||||
cStats.Name = name
|
||||
cStats.Cpu = twoDecimals(cpuPct)
|
||||
cStats.Mem = bytesToMegabytes(float64(usedMemory))
|
||||
cStats.NetworkSent = bytesToMegabytes(sent_delta)
|
||||
cStats.NetworkRecv = bytesToMegabytes(recv_delta)
|
||||
|
||||
return cStats, nil
|
||||
}
|
||||
|
||||
// delete container stats from map using mutex
|
||||
func (a *Agent) deleteContainerStatsSync(id string) {
|
||||
a.containerStatsMutex.Lock()
|
||||
defer a.containerStatsMutex.Unlock()
|
||||
delete(a.containerStatsMap, id)
|
||||
a.startServer(pubKey, addr)
|
||||
}
|
||||
|
||||
func (a *Agent) gatherStats() system.CombinedData {
|
||||
systemInfo, systemStats := a.getSystemStats()
|
||||
systemData := system.CombinedData{
|
||||
Stats: systemStats,
|
||||
Info: systemInfo,
|
||||
Stats: a.getSystemStats(),
|
||||
Info: a.systemInfo,
|
||||
}
|
||||
// add docker stats
|
||||
if containerStats, err := a.getDockerStats(); err == nil {
|
||||
if containerStats, err := a.dockerManager.getDockerStats(); err == nil {
|
||||
systemData.Containers = containerStats
|
||||
} else {
|
||||
slog.Debug("Error getting docker stats", "err", err)
|
||||
}
|
||||
// add extra filesystems
|
||||
systemData.Stats.ExtraFs = make(map[string]*system.FsStats)
|
||||
@@ -399,296 +91,5 @@ func (a *Agent) gatherStats() system.CombinedData {
|
||||
systemData.Stats.ExtraFs[name] = stats
|
||||
}
|
||||
}
|
||||
// log.Printf("%+v\n", systemData)
|
||||
return systemData
|
||||
}
|
||||
|
||||
func (a *Agent) startServer() {
|
||||
sshServer.Handle(a.handleSession)
|
||||
|
||||
log.Printf("Starting SSH server on %s", a.addr)
|
||||
if err := sshServer.ListenAndServe(a.addr, nil, sshServer.NoPty(),
|
||||
sshServer.PublicKeyAuth(func(ctx sshServer.Context, key sshServer.PublicKey) bool {
|
||||
allowed, _, _, _, _ := sshServer.ParseAuthorizedKey(a.pubKey)
|
||||
return sshServer.KeysEqual(key, allowed)
|
||||
}),
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Agent) handleSession(s sshServer.Session) {
|
||||
stats := a.gatherStats()
|
||||
encoder := json.NewEncoder(s)
|
||||
if err := encoder.Encode(stats); err != nil {
|
||||
log.Println("Error encoding stats:", err.Error())
|
||||
s.Exit(1)
|
||||
return
|
||||
}
|
||||
s.Exit(0)
|
||||
}
|
||||
|
||||
func (a *Agent) Run() {
|
||||
a.fsStats = make(map[string]*system.FsStats)
|
||||
|
||||
// set sensors context (allows overriding sys location for sensors)
|
||||
if sysSensors, exists := os.LookupEnv("SYS_SENSORS"); exists {
|
||||
// log.Println("Using sys location for sensors:", sysSensors)
|
||||
a.sensorsContext = context.WithValue(a.sensorsContext,
|
||||
common.EnvKey, common.EnvMap{common.HostSysEnvKey: sysSensors},
|
||||
)
|
||||
}
|
||||
|
||||
a.initializeDiskInfo()
|
||||
a.initializeDiskIoStats()
|
||||
a.initializeNetIoStats()
|
||||
|
||||
// log.Printf("Filesystems: %+v\n", a.fsStats)
|
||||
a.startServer()
|
||||
}
|
||||
|
||||
// Sets up the filesystems to monitor for disk usage and I/O.
|
||||
func (a *Agent) initializeDiskInfo() error {
|
||||
filesystem := os.Getenv("FILESYSTEM")
|
||||
hasRoot := false
|
||||
|
||||
// add values from EXTRA_FILESYSTEMS env var to fsStats
|
||||
if extraFilesystems, exists := os.LookupEnv("EXTRA_FILESYSTEMS"); exists {
|
||||
for _, filesystem := range strings.Split(extraFilesystems, ",") {
|
||||
a.fsStats[filepath.Base(filesystem)] = &system.FsStats{}
|
||||
}
|
||||
}
|
||||
|
||||
partitions, err := disk.Partitions(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if FILESYSTEM env var is set, use it to find root filesystem
|
||||
if filesystem != "" {
|
||||
for _, v := range partitions {
|
||||
// use filesystem env var if matching partition is found
|
||||
if strings.HasSuffix(v.Device, filesystem) || v.Mountpoint == filesystem {
|
||||
a.fsStats[filepath.Base(v.Device)] = &system.FsStats{Root: true, Mountpoint: v.Mountpoint}
|
||||
hasRoot = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasRoot {
|
||||
// if no match, log available partition details
|
||||
log.Printf("Partition details not found for %s:\n", filesystem)
|
||||
for _, v := range partitions {
|
||||
fmt.Printf("%+v\n", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range partitions {
|
||||
// binary root fallback - use root mountpoint
|
||||
if !hasRoot && v.Mountpoint == "/" {
|
||||
a.fsStats[filepath.Base(v.Device)] = &system.FsStats{Root: true, Mountpoint: "/"}
|
||||
hasRoot = true
|
||||
}
|
||||
// docker root fallback - use /etc/hosts device if not mapped
|
||||
if !hasRoot && v.Mountpoint == "/etc/hosts" && strings.HasPrefix(v.Device, "/dev") && !strings.Contains(v.Device, "mapper") {
|
||||
a.fsStats[filepath.Base(v.Device)] = &system.FsStats{Root: true, Mountpoint: "/"}
|
||||
hasRoot = true
|
||||
}
|
||||
// check if device is in /extra-filesystem
|
||||
if strings.HasPrefix(v.Mountpoint, "/extra-filesystem") {
|
||||
// add to fsStats if not already there
|
||||
if _, exists := a.fsStats[filepath.Base(v.Device)]; !exists {
|
||||
a.fsStats[filepath.Base(v.Device)] = &system.FsStats{Mountpoint: v.Mountpoint}
|
||||
}
|
||||
continue
|
||||
}
|
||||
// set mountpoints for extra filesystems if passed in via env var
|
||||
for name, stats := range a.fsStats {
|
||||
if strings.HasSuffix(v.Device, name) {
|
||||
stats.Mountpoint = v.Mountpoint
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// remove extra filesystems that don't have a mountpoint
|
||||
for name, stats := range a.fsStats {
|
||||
if stats.Root {
|
||||
log.Println("Detected root fs:", name)
|
||||
}
|
||||
if stats.Mountpoint == "" {
|
||||
log.Printf("Ignoring %s. No mountpoint found.\n", name)
|
||||
delete(a.fsStats, name)
|
||||
}
|
||||
}
|
||||
|
||||
// if no root filesystem set, use most read device in /proc/diskstats
|
||||
if !hasRoot {
|
||||
rootDevice := findFallbackIoDevice(filepath.Base(filesystem))
|
||||
log.Printf("Using / as mountpoint and %s for I/O\n", rootDevice)
|
||||
a.fsStats[rootDevice] = &system.FsStats{Root: true, Mountpoint: "/"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sets start values for disk I/O stats.
|
||||
func (a *Agent) initializeDiskIoStats() {
|
||||
// create slice of fs names to pass to disk.IOCounters
|
||||
a.fsNames = make([]string, 0, len(a.fsStats))
|
||||
for name := range a.fsStats {
|
||||
a.fsNames = append(a.fsNames, name)
|
||||
}
|
||||
|
||||
if ioCounters, err := disk.IOCounters(a.fsNames...); err == nil {
|
||||
for _, d := range ioCounters {
|
||||
if a.fsStats[d.Name] == nil {
|
||||
continue
|
||||
}
|
||||
a.fsStats[d.Name].Time = time.Now()
|
||||
a.fsStats[d.Name].TotalRead = d.ReadBytes
|
||||
a.fsStats[d.Name].TotalWrite = d.WriteBytes
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Agent) initializeNetIoStats() {
|
||||
// reset valid network interfaces
|
||||
a.netInterfaces = make(map[string]struct{}, 0)
|
||||
|
||||
// map of network interface names passed in via NICS env var
|
||||
var nicsMap map[string]struct{}
|
||||
nics, nicsEnvExists := os.LookupEnv("NICS")
|
||||
if nicsEnvExists {
|
||||
nicsMap = make(map[string]struct{}, 0)
|
||||
for _, nic := range strings.Split(nics, ",") {
|
||||
nicsMap[nic] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// reset network I/O stats
|
||||
a.netIoStats.BytesSent = 0
|
||||
a.netIoStats.BytesRecv = 0
|
||||
|
||||
// get intial network I/O stats
|
||||
if netIO, err := psutilNet.IOCounters(true); err == nil {
|
||||
a.netIoStats.Time = time.Now()
|
||||
for _, v := range netIO {
|
||||
switch {
|
||||
// skip if nics exists and the interface is not in the list
|
||||
case nicsEnvExists:
|
||||
if _, nameInNics := nicsMap[v.Name]; !nameInNics {
|
||||
continue
|
||||
}
|
||||
// otherwise run the interface name through the skipNetworkInterface function
|
||||
default:
|
||||
if a.skipNetworkInterface(v) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
log.Printf("Detected network interface: %+v (%+v recv, %+v sent)\n", v.Name, v.BytesRecv, v.BytesSent)
|
||||
a.netIoStats.BytesSent += v.BytesSent
|
||||
a.netIoStats.BytesRecv += v.BytesRecv
|
||||
// store as a valid network interface
|
||||
a.netInterfaces[v.Name] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func bytesToMegabytes(b float64) float64 {
|
||||
return twoDecimals(b / 1048576)
|
||||
}
|
||||
|
||||
func bytesToGigabytes(b uint64) float64 {
|
||||
return twoDecimals(float64(b) / 1073741824)
|
||||
}
|
||||
|
||||
func twoDecimals(value float64) float64 {
|
||||
return math.Round(value*100) / 100
|
||||
}
|
||||
|
||||
func (a *Agent) skipNetworkInterface(v psutilNet.IOCountersStat) bool {
|
||||
switch {
|
||||
case strings.HasPrefix(v.Name, "lo"),
|
||||
strings.HasPrefix(v.Name, "docker"),
|
||||
strings.HasPrefix(v.Name, "br-"),
|
||||
strings.HasPrefix(v.Name, "veth"),
|
||||
v.BytesRecv == 0,
|
||||
v.BytesSent == 0:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func newDockerClient() *http.Client {
|
||||
dockerHost := "unix:///var/run/docker.sock"
|
||||
if dockerHostEnv, exists := os.LookupEnv("DOCKER_HOST"); exists {
|
||||
dockerHost = dockerHostEnv
|
||||
}
|
||||
|
||||
parsedURL, err := url.Parse(dockerHost)
|
||||
if err != nil {
|
||||
log.Fatal("Error parsing DOCKER_HOST: " + err.Error())
|
||||
}
|
||||
|
||||
transport := &http.Transport{
|
||||
ForceAttemptHTTP2: false,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
DisableCompression: true,
|
||||
MaxConnsPerHost: 20,
|
||||
MaxIdleConnsPerHost: 20,
|
||||
DisableKeepAlives: false,
|
||||
}
|
||||
|
||||
switch parsedURL.Scheme {
|
||||
case "unix":
|
||||
transport.DialContext = func(ctx context.Context, proto, addr string) (net.Conn, error) {
|
||||
return (&net.Dialer{}).DialContext(ctx, "unix", parsedURL.Path)
|
||||
}
|
||||
case "tcp", "http", "https":
|
||||
log.Println("Using DOCKER_HOST: " + dockerHost)
|
||||
transport.DialContext = func(ctx context.Context, proto, addr string) (net.Conn, error) {
|
||||
return (&net.Dialer{}).DialContext(ctx, "tcp", parsedURL.Host)
|
||||
}
|
||||
default:
|
||||
log.Fatal("Unsupported DOCKER_HOST: " + parsedURL.Scheme)
|
||||
}
|
||||
|
||||
return &http.Client{
|
||||
Timeout: time.Second,
|
||||
Transport: transport,
|
||||
}
|
||||
}
|
||||
|
||||
// closes idle connections on timeouts to prevent reuse of stale connections
|
||||
func (a *Agent) closeIdleConnections(err error) (isTimeout bool) {
|
||||
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
||||
log.Printf("Closing idle connections. Error: %+v\n", err)
|
||||
a.dockerClient.Transport.(*http.Transport).CloseIdleConnections()
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns the device with the most reads in /proc/diskstats,
|
||||
// or the device specified by the filesystem argument if it exists
|
||||
// (fallback in case the root device is not supplied or detected)
|
||||
func findFallbackIoDevice(filesystem string) string {
|
||||
var maxReadBytes uint64
|
||||
maxReadDevice := "/"
|
||||
counters, err := disk.IOCounters()
|
||||
if err != nil {
|
||||
return maxReadDevice
|
||||
}
|
||||
for _, d := range counters {
|
||||
if d.Name == filesystem {
|
||||
return d.Name
|
||||
}
|
||||
if d.ReadBytes > maxReadBytes {
|
||||
maxReadBytes = d.ReadBytes
|
||||
maxReadDevice = d.Name
|
||||
}
|
||||
}
|
||||
return maxReadDevice
|
||||
}
|
||||
|
||||
169
beszel/internal/agent/disk.go
Normal file
169
beszel/internal/agent/disk.go
Normal file
@@ -0,0 +1,169 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"beszel/internal/entities/system"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/shirou/gopsutil/v4/disk"
|
||||
)
|
||||
|
||||
// Sets up the filesystems to monitor for disk usage and I/O.
|
||||
func (a *Agent) initializeDiskInfo() {
|
||||
filesystem := os.Getenv("FILESYSTEM")
|
||||
efPath := "/extra-filesystems"
|
||||
hasRoot := false
|
||||
|
||||
// Create map for disk stats
|
||||
a.fsStats = make(map[string]*system.FsStats)
|
||||
|
||||
partitions, err := disk.Partitions(false)
|
||||
if err != nil {
|
||||
slog.Error("Error getting disk partitions", "err", err)
|
||||
}
|
||||
slog.Debug("Disk", "partitions", partitions)
|
||||
|
||||
// ioContext := context.WithValue(a.sensorsContext,
|
||||
// common.EnvKey, common.EnvMap{common.HostProcEnvKey: "/tmp/testproc"},
|
||||
// )
|
||||
// diskIoCounters, err := disk.IOCountersWithContext(ioContext)
|
||||
|
||||
diskIoCounters, err := disk.IOCounters()
|
||||
if err != nil {
|
||||
slog.Error("Error getting diskstats", "err", err)
|
||||
}
|
||||
slog.Debug("Disk I/O", "diskstats", diskIoCounters)
|
||||
|
||||
// Helper function to add a filesystem to fsStats if it doesn't exist
|
||||
addFsStat := func(device, mountpoint string, root bool) {
|
||||
key := filepath.Base(device)
|
||||
if _, exists := a.fsStats[key]; !exists {
|
||||
if root {
|
||||
slog.Info("Detected root device", "name", key)
|
||||
// check if root device is in /proc/diskstats, use fallback if not
|
||||
if _, exists := diskIoCounters[key]; !exists {
|
||||
slog.Warn("Device not found in diskstats", "name", key)
|
||||
key = findFallbackIoDevice(filesystem, diskIoCounters)
|
||||
slog.Info("Using I/O fallback", "name", key)
|
||||
}
|
||||
}
|
||||
a.fsStats[key] = &system.FsStats{Root: root, Mountpoint: mountpoint}
|
||||
}
|
||||
}
|
||||
|
||||
// Use FILESYSTEM env var to find root filesystem
|
||||
if filesystem != "" {
|
||||
for _, p := range partitions {
|
||||
if strings.HasSuffix(p.Device, filesystem) || p.Mountpoint == filesystem {
|
||||
addFsStat(p.Device, p.Mountpoint, true)
|
||||
hasRoot = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasRoot {
|
||||
slog.Warn("Partition details not found", "filesystem", filesystem)
|
||||
}
|
||||
}
|
||||
|
||||
// Add EXTRA_FILESYSTEMS env var values to fsStats
|
||||
if extraFilesystems, exists := os.LookupEnv("EXTRA_FILESYSTEMS"); exists {
|
||||
for _, fs := range strings.Split(extraFilesystems, ",") {
|
||||
found := false
|
||||
for _, p := range partitions {
|
||||
if strings.HasSuffix(p.Device, fs) || p.Mountpoint == fs {
|
||||
addFsStat(p.Device, p.Mountpoint, false)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// if not in partitions, test if we can get disk usage
|
||||
if !found {
|
||||
if _, err := disk.Usage(fs); err == nil {
|
||||
addFsStat(filepath.Base(fs), fs, false)
|
||||
} else {
|
||||
slog.Error("Invalid filesystem", "name", fs, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process partitions for various mount points
|
||||
for _, p := range partitions {
|
||||
// fmt.Println(p.Device, p.Mountpoint)
|
||||
// Binary root fallback or docker root fallback
|
||||
if !hasRoot && (p.Mountpoint == "/" || (p.Mountpoint == "/etc/hosts" && strings.HasPrefix(p.Device, "/dev") && !strings.Contains(p.Device, "mapper"))) {
|
||||
addFsStat(p.Device, "/", true)
|
||||
hasRoot = true
|
||||
}
|
||||
|
||||
// Check if device is in /extra-filesystems
|
||||
if strings.HasPrefix(p.Mountpoint, efPath) {
|
||||
addFsStat(p.Device, p.Mountpoint, false)
|
||||
}
|
||||
}
|
||||
|
||||
// Check all folders in /extra-filesystems and add them if not already present
|
||||
if folders, err := os.ReadDir(efPath); err == nil {
|
||||
existingMountpoints := make(map[string]bool)
|
||||
for _, stats := range a.fsStats {
|
||||
existingMountpoints[stats.Mountpoint] = true
|
||||
}
|
||||
for _, folder := range folders {
|
||||
if folder.IsDir() {
|
||||
mountpoint := filepath.Join(efPath, folder.Name())
|
||||
slog.Debug("/extra-filesystems", "mountpoint", mountpoint)
|
||||
if !existingMountpoints[mountpoint] {
|
||||
a.fsStats[folder.Name()] = &system.FsStats{Mountpoint: mountpoint}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no root filesystem set, use fallback
|
||||
if !hasRoot {
|
||||
rootDevice := findFallbackIoDevice(filepath.Base(filesystem), diskIoCounters)
|
||||
slog.Info("Root disk", "mountpoint", "/", "io", rootDevice)
|
||||
a.fsStats[rootDevice] = &system.FsStats{Root: true, Mountpoint: "/"}
|
||||
}
|
||||
|
||||
a.initializeDiskIoStats(diskIoCounters)
|
||||
}
|
||||
|
||||
// Returns the device with the most reads in /proc/diskstats,
|
||||
// or the device specified by the filesystem argument if it exists
|
||||
func findFallbackIoDevice(filesystem string, diskIoCounters map[string]disk.IOCountersStat) string {
|
||||
var maxReadBytes uint64
|
||||
maxReadDevice := "/"
|
||||
for _, d := range diskIoCounters {
|
||||
if d.Name == filesystem {
|
||||
return d.Name
|
||||
}
|
||||
if d.ReadBytes > maxReadBytes {
|
||||
maxReadBytes = d.ReadBytes
|
||||
maxReadDevice = d.Name
|
||||
}
|
||||
}
|
||||
return maxReadDevice
|
||||
}
|
||||
|
||||
// Sets start values for disk I/O stats.
|
||||
func (a *Agent) initializeDiskIoStats(diskIoCounters map[string]disk.IOCountersStat) {
|
||||
for device, stats := range a.fsStats {
|
||||
// skip if not in diskIoCounters
|
||||
d, exists := diskIoCounters[device]
|
||||
if !exists {
|
||||
slog.Warn("Device not found in diskstats", "name", device)
|
||||
continue
|
||||
}
|
||||
// populate initial values
|
||||
stats.Time = time.Now()
|
||||
stats.TotalRead = d.ReadBytes
|
||||
stats.TotalWrite = d.WriteBytes
|
||||
// add to list of valid io device names
|
||||
a.fsNames = append(a.fsNames, device)
|
||||
}
|
||||
}
|
||||
253
beszel/internal/agent/docker.go
Normal file
253
beszel/internal/agent/docker.go
Normal file
@@ -0,0 +1,253 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"beszel/internal/entities/container"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/blang/semver"
|
||||
)
|
||||
|
||||
type dockerManager struct {
|
||||
client *http.Client // Client to query Docker API
|
||||
wg sync.WaitGroup // WaitGroup to wait for all goroutines to finish
|
||||
sem chan struct{} // Semaphore to limit concurrent container requests
|
||||
containerStatsMutex sync.RWMutex // Mutex to prevent concurrent access to containerStatsMap
|
||||
apiContainerList *[]container.ApiInfo // List of containers from Docker API
|
||||
containerStatsMap map[string]*container.Stats // Keeps track of container stats
|
||||
validIds map[string]struct{} // Map of valid container ids, used to prune invalid containers from containerStatsMap
|
||||
}
|
||||
|
||||
// Add goroutine to the queue
|
||||
func (d *dockerManager) queue() {
|
||||
d.sem <- struct{}{}
|
||||
d.wg.Add(1)
|
||||
}
|
||||
|
||||
// Remove goroutine from the queue
|
||||
func (d *dockerManager) dequeue() {
|
||||
<-d.sem
|
||||
d.wg.Done()
|
||||
}
|
||||
|
||||
// Returns stats for all running containers
|
||||
func (dm *dockerManager) getDockerStats() ([]*container.Stats, error) {
|
||||
resp, err := dm.client.Get("http://localhost/containers/json")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&dm.apiContainerList); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
containersLength := len(*dm.apiContainerList)
|
||||
|
||||
// store valid ids to clean up old container ids from map
|
||||
if dm.validIds == nil {
|
||||
dm.validIds = make(map[string]struct{}, containersLength)
|
||||
} else {
|
||||
clear(dm.validIds)
|
||||
}
|
||||
|
||||
for _, ctr := range *dm.apiContainerList {
|
||||
ctr.IdShort = ctr.Id[:12]
|
||||
dm.validIds[ctr.IdShort] = struct{}{}
|
||||
// check if container is less than 1 minute old (possible restart)
|
||||
// note: can't use Created field because it's not updated on restart
|
||||
if strings.Contains(ctr.Status, "second") {
|
||||
// if so, remove old container data
|
||||
dm.deleteContainerStatsSync(ctr.IdShort)
|
||||
}
|
||||
dm.queue()
|
||||
go func() {
|
||||
defer dm.dequeue()
|
||||
err := dm.updateContainerStats(ctr)
|
||||
if err != nil {
|
||||
dm.deleteContainerStatsSync(ctr.IdShort)
|
||||
// retry once
|
||||
err = dm.updateContainerStats(ctr)
|
||||
if err != nil {
|
||||
slog.Error("Error getting container stats", "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
dm.wg.Wait()
|
||||
|
||||
// populate final stats and remove old / invalid container stats
|
||||
stats := make([]*container.Stats, 0, containersLength)
|
||||
for id, v := range dm.containerStatsMap {
|
||||
if _, exists := dm.validIds[id]; !exists {
|
||||
delete(dm.containerStatsMap, id)
|
||||
} else {
|
||||
stats = append(stats, v)
|
||||
}
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// Updates stats for individual container
|
||||
func (dm *dockerManager) updateContainerStats(ctr container.ApiInfo) error {
|
||||
name := ctr.Names[0][1:]
|
||||
|
||||
resp, err := dm.client.Get("http://localhost/containers/" + ctr.IdShort + "/stats?stream=0&one-shot=1")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
dm.containerStatsMutex.Lock()
|
||||
defer dm.containerStatsMutex.Unlock()
|
||||
|
||||
// add empty values if they doesn't exist in map
|
||||
stats, initialized := dm.containerStatsMap[ctr.IdShort]
|
||||
if !initialized {
|
||||
stats = &container.Stats{Name: name}
|
||||
dm.containerStatsMap[ctr.IdShort] = stats
|
||||
}
|
||||
|
||||
// reset current stats
|
||||
stats.Cpu = 0
|
||||
stats.Mem = 0
|
||||
stats.NetworkSent = 0
|
||||
stats.NetworkRecv = 0
|
||||
|
||||
// docker host container stats response
|
||||
var res container.ApiStats
|
||||
if err := json.NewDecoder(resp.Body).Decode(&res); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check if container has valid data, otherwise may be in restart loop (#103)
|
||||
if res.MemoryStats.Usage == 0 {
|
||||
return fmt.Errorf("%s - no memory stats - see https://github.com/henrygd/beszel/issues/144", name)
|
||||
}
|
||||
|
||||
// memory (https://docs.docker.com/reference/cli/docker/container/stats/)
|
||||
memCache := res.MemoryStats.Stats.InactiveFile
|
||||
if memCache == 0 {
|
||||
memCache = res.MemoryStats.Stats.Cache
|
||||
}
|
||||
usedMemory := res.MemoryStats.Usage - memCache
|
||||
|
||||
// cpu
|
||||
cpuDelta := res.CPUStats.CPUUsage.TotalUsage - stats.PrevCpu[0]
|
||||
systemDelta := res.CPUStats.SystemUsage - stats.PrevCpu[1]
|
||||
cpuPct := float64(cpuDelta) / float64(systemDelta) * 100
|
||||
if cpuPct > 100 {
|
||||
return fmt.Errorf("%s cpu pct greater than 100: %+v", name, cpuPct)
|
||||
}
|
||||
stats.PrevCpu = [2]uint64{res.CPUStats.CPUUsage.TotalUsage, res.CPUStats.SystemUsage}
|
||||
|
||||
// network
|
||||
var total_sent, total_recv uint64
|
||||
for _, v := range res.Networks {
|
||||
total_sent += v.TxBytes
|
||||
total_recv += v.RxBytes
|
||||
}
|
||||
var sent_delta, recv_delta float64
|
||||
// prevent first run from sending all prev sent/recv bytes
|
||||
if initialized {
|
||||
secondsElapsed := time.Since(stats.PrevNet.Time).Seconds()
|
||||
sent_delta = float64(total_sent-stats.PrevNet.Sent) / secondsElapsed
|
||||
recv_delta = float64(total_recv-stats.PrevNet.Recv) / secondsElapsed
|
||||
}
|
||||
stats.PrevNet.Sent = total_sent
|
||||
stats.PrevNet.Recv = total_recv
|
||||
stats.PrevNet.Time = time.Now()
|
||||
|
||||
stats.Cpu = twoDecimals(cpuPct)
|
||||
stats.Mem = bytesToMegabytes(float64(usedMemory))
|
||||
stats.NetworkSent = bytesToMegabytes(sent_delta)
|
||||
stats.NetworkRecv = bytesToMegabytes(recv_delta)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete container stats from map using mutex
|
||||
func (dm *dockerManager) deleteContainerStatsSync(id string) {
|
||||
dm.containerStatsMutex.Lock()
|
||||
defer dm.containerStatsMutex.Unlock()
|
||||
delete(dm.containerStatsMap, id)
|
||||
}
|
||||
|
||||
// Creates a new http client for Docker API
|
||||
func newDockerManager() *dockerManager {
|
||||
dockerHost := "unix:///var/run/docker.sock"
|
||||
if dockerHostEnv, exists := os.LookupEnv("DOCKER_HOST"); exists {
|
||||
slog.Info("DOCKER_HOST", "host", dockerHostEnv)
|
||||
dockerHost = dockerHostEnv
|
||||
}
|
||||
|
||||
parsedURL, err := url.Parse(dockerHost)
|
||||
if err != nil {
|
||||
slog.Error("Error parsing DOCKER_HOST", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
transport := &http.Transport{
|
||||
DisableCompression: true,
|
||||
MaxConnsPerHost: 0,
|
||||
}
|
||||
|
||||
switch parsedURL.Scheme {
|
||||
case "unix":
|
||||
transport.DialContext = func(ctx context.Context, proto, addr string) (net.Conn, error) {
|
||||
return (&net.Dialer{}).DialContext(ctx, "unix", parsedURL.Path)
|
||||
}
|
||||
case "tcp", "http", "https":
|
||||
transport.DialContext = func(ctx context.Context, proto, addr string) (net.Conn, error) {
|
||||
return (&net.Dialer{}).DialContext(ctx, "tcp", parsedURL.Host)
|
||||
}
|
||||
default:
|
||||
slog.Error("Invalid DOCKER_HOST", "scheme", parsedURL.Scheme)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
dockerClient := &dockerManager{
|
||||
client: &http.Client{
|
||||
Timeout: time.Millisecond * 1100,
|
||||
Transport: transport,
|
||||
},
|
||||
containerStatsMap: make(map[string]*container.Stats),
|
||||
}
|
||||
|
||||
// Make sure sem is initialized
|
||||
concurrency := 200
|
||||
defer func() { dockerClient.sem = make(chan struct{}, concurrency) }()
|
||||
|
||||
// Check docker version
|
||||
// (versions before 25.0.0 have a bug with one-shot which requires all requests to be made in one batch)
|
||||
var versionInfo struct {
|
||||
Version string `json:"Version"`
|
||||
}
|
||||
resp, err := dockerClient.client.Get("http://localhost/version")
|
||||
if err != nil {
|
||||
return dockerClient
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&versionInfo); err != nil {
|
||||
return dockerClient
|
||||
}
|
||||
|
||||
// if version > 25, one-shot works correctly and we can limit concurrent connections / goroutines to 5
|
||||
if dockerVersion, err := semver.Parse(versionInfo.Version); err == nil && dockerVersion.Major > 24 {
|
||||
concurrency = 5
|
||||
}
|
||||
slog.Debug("Docker", "version", versionInfo.Version, "concurrency", concurrency)
|
||||
|
||||
return dockerClient
|
||||
}
|
||||
67
beszel/internal/agent/network.go
Normal file
67
beszel/internal/agent/network.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
psutilNet "github.com/shirou/gopsutil/v4/net"
|
||||
)
|
||||
|
||||
func (a *Agent) initializeNetIoStats() {
|
||||
// reset valid network interfaces
|
||||
a.netInterfaces = make(map[string]struct{}, 0)
|
||||
|
||||
// map of network interface names passed in via NICS env var
|
||||
var nicsMap map[string]struct{}
|
||||
nics, nicsEnvExists := os.LookupEnv("NICS")
|
||||
if nicsEnvExists {
|
||||
nicsMap = make(map[string]struct{}, 0)
|
||||
for _, nic := range strings.Split(nics, ",") {
|
||||
nicsMap[nic] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// reset network I/O stats
|
||||
a.netIoStats.BytesSent = 0
|
||||
a.netIoStats.BytesRecv = 0
|
||||
|
||||
// get intial network I/O stats
|
||||
if netIO, err := psutilNet.IOCounters(true); err == nil {
|
||||
a.netIoStats.Time = time.Now()
|
||||
for _, v := range netIO {
|
||||
switch {
|
||||
// skip if nics exists and the interface is not in the list
|
||||
case nicsEnvExists:
|
||||
if _, nameInNics := nicsMap[v.Name]; !nameInNics {
|
||||
continue
|
||||
}
|
||||
// otherwise run the interface name through the skipNetworkInterface function
|
||||
default:
|
||||
if a.skipNetworkInterface(v) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
slog.Info("Detected network interface", "name", v.Name, "sent", v.BytesSent, "recv", v.BytesRecv)
|
||||
a.netIoStats.BytesSent += v.BytesSent
|
||||
a.netIoStats.BytesRecv += v.BytesRecv
|
||||
// store as a valid network interface
|
||||
a.netInterfaces[v.Name] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Agent) skipNetworkInterface(v psutilNet.IOCountersStat) bool {
|
||||
switch {
|
||||
case strings.HasPrefix(v.Name, "lo"),
|
||||
strings.HasPrefix(v.Name, "docker"),
|
||||
strings.HasPrefix(v.Name, "br-"),
|
||||
strings.HasPrefix(v.Name, "veth"),
|
||||
v.BytesRecv == 0,
|
||||
v.BytesSent == 0:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
35
beszel/internal/agent/server.go
Normal file
35
beszel/internal/agent/server.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
sshServer "github.com/gliderlabs/ssh"
|
||||
)
|
||||
|
||||
func (a *Agent) startServer(pubKey []byte, addr string) {
|
||||
sshServer.Handle(a.handleSession)
|
||||
|
||||
slog.Info("Starting SSH server", "address", addr)
|
||||
if err := sshServer.ListenAndServe(addr, nil, sshServer.NoPty(),
|
||||
sshServer.PublicKeyAuth(func(ctx sshServer.Context, key sshServer.PublicKey) bool {
|
||||
allowed, _, _, _, _ := sshServer.ParseAuthorizedKey(pubKey)
|
||||
return sshServer.KeysEqual(key, allowed)
|
||||
}),
|
||||
); err != nil {
|
||||
slog.Error("Error starting SSH server", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Agent) handleSession(s sshServer.Session) {
|
||||
stats := a.gatherStats()
|
||||
slog.Debug("Sending stats", "data", stats)
|
||||
if err := json.NewEncoder(s).Encode(stats); err != nil {
|
||||
slog.Error("Error encoding stats", "err", err)
|
||||
s.Exit(1)
|
||||
return
|
||||
}
|
||||
s.Exit(0)
|
||||
}
|
||||
185
beszel/internal/agent/system.go
Normal file
185
beszel/internal/agent/system.go
Normal file
@@ -0,0 +1,185 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"beszel"
|
||||
"beszel/internal/entities/system"
|
||||
"log/slog"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/shirou/gopsutil/v4/cpu"
|
||||
"github.com/shirou/gopsutil/v4/disk"
|
||||
"github.com/shirou/gopsutil/v4/host"
|
||||
"github.com/shirou/gopsutil/v4/mem"
|
||||
psutilNet "github.com/shirou/gopsutil/v4/net"
|
||||
"github.com/shirou/gopsutil/v4/sensors"
|
||||
)
|
||||
|
||||
// Sets initial / non-changing values about the host system
|
||||
func (a *Agent) initializeSystemInfo() {
|
||||
a.systemInfo.AgentVersion = beszel.Version
|
||||
a.systemInfo.Hostname, _ = os.Hostname()
|
||||
a.systemInfo.KernelVersion, _ = host.KernelVersion()
|
||||
|
||||
// cpu model
|
||||
if info, err := cpu.Info(); err == nil && len(info) > 0 {
|
||||
a.systemInfo.CpuModel = info[0].ModelName
|
||||
}
|
||||
// cores / threads
|
||||
a.systemInfo.Cores, _ = cpu.Counts(false)
|
||||
if threads, err := cpu.Counts(true); err == nil {
|
||||
if threads > 0 && threads < a.systemInfo.Cores {
|
||||
// in lxc logical cores reflects container limits, so use that as cores if lower
|
||||
a.systemInfo.Cores = threads
|
||||
} else {
|
||||
a.systemInfo.Threads = threads
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Returns current info, stats about the host system
|
||||
func (a *Agent) getSystemStats() system.Stats {
|
||||
systemStats := system.Stats{}
|
||||
|
||||
// cpu percent
|
||||
cpuPct, err := cpu.Percent(0, false)
|
||||
if err != nil {
|
||||
slog.Error("Error getting cpu percent", "err", err)
|
||||
} else if len(cpuPct) > 0 {
|
||||
systemStats.Cpu = twoDecimals(cpuPct[0])
|
||||
}
|
||||
|
||||
// memory
|
||||
if v, err := mem.VirtualMemory(); err == nil {
|
||||
systemStats.Mem = bytesToGigabytes(v.Total)
|
||||
systemStats.MemUsed = bytesToGigabytes(v.Used)
|
||||
systemStats.MemBuffCache = bytesToGigabytes(v.Total - v.Free - v.Used)
|
||||
systemStats.MemPct = twoDecimals(v.UsedPercent)
|
||||
systemStats.Swap = bytesToGigabytes(v.SwapTotal)
|
||||
systemStats.SwapUsed = bytesToGigabytes(v.SwapTotal - v.SwapFree)
|
||||
}
|
||||
|
||||
// disk usage
|
||||
for _, stats := range a.fsStats {
|
||||
if d, err := disk.Usage(stats.Mountpoint); err == nil {
|
||||
stats.DiskTotal = bytesToGigabytes(d.Total)
|
||||
stats.DiskUsed = bytesToGigabytes(d.Used)
|
||||
if stats.Root {
|
||||
systemStats.DiskTotal = bytesToGigabytes(d.Total)
|
||||
systemStats.DiskUsed = bytesToGigabytes(d.Used)
|
||||
systemStats.DiskPct = twoDecimals(d.UsedPercent)
|
||||
}
|
||||
} else {
|
||||
// reset stats if error (likely unmounted)
|
||||
slog.Error("Error getting disk stats", "name", stats.Mountpoint, "err", err)
|
||||
stats.DiskTotal = 0
|
||||
stats.DiskUsed = 0
|
||||
stats.TotalRead = 0
|
||||
stats.TotalWrite = 0
|
||||
}
|
||||
}
|
||||
|
||||
// disk i/o
|
||||
if ioCounters, err := disk.IOCounters(a.fsNames...); err == nil {
|
||||
for _, d := range ioCounters {
|
||||
stats := a.fsStats[d.Name]
|
||||
if stats == nil {
|
||||
continue
|
||||
}
|
||||
secondsElapsed := time.Since(stats.Time).Seconds()
|
||||
readPerSecond := float64(d.ReadBytes-stats.TotalRead) / secondsElapsed
|
||||
writePerSecond := float64(d.WriteBytes-stats.TotalWrite) / secondsElapsed
|
||||
stats.Time = time.Now()
|
||||
stats.DiskReadPs = bytesToMegabytes(readPerSecond)
|
||||
stats.DiskWritePs = bytesToMegabytes(writePerSecond)
|
||||
stats.TotalRead = d.ReadBytes
|
||||
stats.TotalWrite = d.WriteBytes
|
||||
// if root filesystem, update system stats
|
||||
if stats.Root {
|
||||
systemStats.DiskReadPs = stats.DiskReadPs
|
||||
systemStats.DiskWritePs = stats.DiskWritePs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// network stats
|
||||
if netIO, err := psutilNet.IOCounters(true); err == nil {
|
||||
secondsElapsed := time.Since(a.netIoStats.Time).Seconds()
|
||||
a.netIoStats.Time = time.Now()
|
||||
bytesSent := uint64(0)
|
||||
bytesRecv := uint64(0)
|
||||
// sum all bytes sent and received
|
||||
for _, v := range netIO {
|
||||
// skip if not in valid network interfaces list
|
||||
if _, exists := a.netInterfaces[v.Name]; !exists {
|
||||
continue
|
||||
}
|
||||
bytesSent += v.BytesSent
|
||||
bytesRecv += v.BytesRecv
|
||||
}
|
||||
// add to systemStats
|
||||
sentPerSecond := float64(bytesSent-a.netIoStats.BytesSent) / secondsElapsed
|
||||
recvPerSecond := float64(bytesRecv-a.netIoStats.BytesRecv) / secondsElapsed
|
||||
networkSentPs := bytesToMegabytes(sentPerSecond)
|
||||
networkRecvPs := bytesToMegabytes(recvPerSecond)
|
||||
// add check for issue (#150) where sent is a massive number
|
||||
if networkSentPs > 10_000 || networkRecvPs > 10_000 {
|
||||
slog.Warn("Invalid network stats. Resetting.", "sent", networkSentPs, "recv", networkRecvPs)
|
||||
for _, v := range netIO {
|
||||
if _, exists := a.netInterfaces[v.Name]; !exists {
|
||||
continue
|
||||
}
|
||||
slog.Info(v.Name, "recv", v.BytesRecv, "sent", v.BytesSent)
|
||||
}
|
||||
// reset network I/O stats
|
||||
a.initializeNetIoStats()
|
||||
} else {
|
||||
systemStats.NetworkSent = networkSentPs
|
||||
systemStats.NetworkRecv = networkRecvPs
|
||||
// update netIoStats
|
||||
a.netIoStats.BytesSent = bytesSent
|
||||
a.netIoStats.BytesRecv = bytesRecv
|
||||
}
|
||||
}
|
||||
|
||||
// temperatures
|
||||
temps, err := sensors.TemperaturesWithContext(a.sensorsContext)
|
||||
if err != nil && a.debug {
|
||||
err.(*sensors.Warnings).Verbose = true
|
||||
slog.Debug("Sensor error", "errs", err)
|
||||
}
|
||||
if len(temps) > 0 {
|
||||
slog.Debug("Temperatures", "data", temps)
|
||||
systemStats.Temperatures = make(map[string]float64, len(temps))
|
||||
for i, sensor := range temps {
|
||||
// skip if temperature is 0
|
||||
if sensor.Temperature == 0 {
|
||||
continue
|
||||
}
|
||||
if _, ok := systemStats.Temperatures[sensor.SensorKey]; ok {
|
||||
// if key already exists, append int to key
|
||||
systemStats.Temperatures[sensor.SensorKey+"_"+strconv.Itoa(i)] = twoDecimals(sensor.Temperature)
|
||||
} else {
|
||||
systemStats.Temperatures[sensor.SensorKey] = twoDecimals(sensor.Temperature)
|
||||
}
|
||||
}
|
||||
// remove sensors from systemStats if whitelist exists and sensor is not in whitelist
|
||||
// (do this here instead of in initial loop so we have correct keys if int was appended)
|
||||
if a.sensorsWhitelist != nil {
|
||||
for key := range systemStats.Temperatures {
|
||||
if _, nameInWhitelist := a.sensorsWhitelist[key]; !nameInWhitelist {
|
||||
delete(systemStats.Temperatures, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// update base system info
|
||||
a.systemInfo.Cpu = systemStats.Cpu
|
||||
a.systemInfo.MemPct = systemStats.MemPct
|
||||
a.systemInfo.DiskPct = systemStats.DiskPct
|
||||
a.systemInfo.Uptime, _ = host.Uptime()
|
||||
|
||||
return systemStats
|
||||
}
|
||||
15
beszel/internal/agent/utils.go
Normal file
15
beszel/internal/agent/utils.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package agent
|
||||
|
||||
import "math"
|
||||
|
||||
func bytesToMegabytes(b float64) float64 {
|
||||
return twoDecimals(b / 1048576)
|
||||
}
|
||||
|
||||
func bytesToGigabytes(b uint64) float64 {
|
||||
return twoDecimals(float64(b) / 1073741824)
|
||||
}
|
||||
|
||||
func twoDecimals(value float64) float64 {
|
||||
return math.Round(value*100) / 100
|
||||
}
|
||||
@@ -39,46 +39,31 @@ func NewAlertManager(app *pocketbase.PocketBase) *AlertManager {
|
||||
}
|
||||
}
|
||||
|
||||
func (am *AlertManager) HandleSystemAlerts(newStatus string, newRecord *models.Record, oldRecord *models.Record) {
|
||||
func (am *AlertManager) HandleSystemInfoAlerts(systemRecord *models.Record, systemInfo system.Info) {
|
||||
alertRecords, err := am.app.Dao().FindRecordsByExpr("alerts",
|
||||
dbx.NewExp("system = {:system}", dbx.Params{"system": oldRecord.GetId()}),
|
||||
dbx.NewExp("system={:system}", dbx.Params{"system": systemRecord.GetId()}),
|
||||
)
|
||||
if err != nil || len(alertRecords) == 0 {
|
||||
// log.Println("no alerts found for system")
|
||||
return
|
||||
}
|
||||
// log.Println("found alerts", len(alertRecords))
|
||||
var systemInfo *system.Info
|
||||
for _, alertRecord := range alertRecords {
|
||||
name := alertRecord.GetString("name")
|
||||
switch name {
|
||||
case "Status":
|
||||
am.handleStatusAlerts(newStatus, oldRecord, alertRecord)
|
||||
case "CPU", "Memory", "Disk":
|
||||
if newStatus != "up" {
|
||||
continue
|
||||
}
|
||||
if systemInfo == nil {
|
||||
systemInfo = getSystemInfo(newRecord)
|
||||
}
|
||||
if name == "CPU" {
|
||||
am.handleSlidingValueAlert(newRecord, alertRecord, name, systemInfo.Cpu)
|
||||
am.handleSlidingValueAlert(systemRecord, alertRecord, name, systemInfo.Cpu)
|
||||
} else if name == "Memory" {
|
||||
am.handleSlidingValueAlert(newRecord, alertRecord, name, systemInfo.MemPct)
|
||||
am.handleSlidingValueAlert(systemRecord, alertRecord, name, systemInfo.MemPct)
|
||||
} else if name == "Disk" {
|
||||
am.handleSlidingValueAlert(newRecord, alertRecord, name, systemInfo.DiskPct)
|
||||
am.handleSlidingValueAlert(systemRecord, alertRecord, name, systemInfo.DiskPct)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getSystemInfo(record *models.Record) *system.Info {
|
||||
var SystemInfo system.Info
|
||||
record.UnmarshalJSONField("info", &SystemInfo)
|
||||
return &SystemInfo
|
||||
}
|
||||
|
||||
func (am *AlertManager) handleSlidingValueAlert(newRecord *models.Record, alertRecord *models.Record, name string, curValue float64) {
|
||||
func (am *AlertManager) handleSlidingValueAlert(systemRecord *models.Record, alertRecord *models.Record, name string, curValue float64) {
|
||||
triggered := alertRecord.GetBool("triggered")
|
||||
threshold := alertRecord.GetFloat("value")
|
||||
// fmt.Println(name, curValue, "threshold", threshold, "triggered", triggered)
|
||||
@@ -87,12 +72,12 @@ func (am *AlertManager) handleSlidingValueAlert(newRecord *models.Record, alertR
|
||||
var systemName string
|
||||
if !triggered && curValue > threshold {
|
||||
alertRecord.Set("triggered", true)
|
||||
systemName = newRecord.GetString("name")
|
||||
systemName = systemRecord.GetString("name")
|
||||
subject = fmt.Sprintf("%s usage above threshold on %s", name, systemName)
|
||||
body = fmt.Sprintf("%s usage on %s is %.1f%%.", name, systemName, curValue)
|
||||
} else if triggered && curValue <= threshold {
|
||||
alertRecord.Set("triggered", false)
|
||||
systemName = newRecord.GetString("name")
|
||||
systemName = systemRecord.GetString("name")
|
||||
subject = fmt.Sprintf("%s usage below threshold on %s", name, systemName)
|
||||
body = fmt.Sprintf("%s usage on %s is below threshold at %.1f%%.", name, systemName, curValue)
|
||||
} else {
|
||||
@@ -119,42 +104,55 @@ func (am *AlertManager) handleSlidingValueAlert(newRecord *models.Record, alertR
|
||||
}
|
||||
}
|
||||
|
||||
func (am *AlertManager) handleStatusAlerts(newStatus string, oldRecord *models.Record, alertRecord *models.Record) error {
|
||||
func (am *AlertManager) HandleStatusAlerts(newStatus string, oldSystemRecord *models.Record) error {
|
||||
var alertStatus string
|
||||
switch newStatus {
|
||||
case "up":
|
||||
if oldRecord.GetString("status") == "down" {
|
||||
if oldSystemRecord.GetString("status") == "down" {
|
||||
alertStatus = "up"
|
||||
}
|
||||
case "down":
|
||||
if oldRecord.GetString("status") == "up" {
|
||||
if oldSystemRecord.GetString("status") == "up" {
|
||||
alertStatus = "down"
|
||||
}
|
||||
}
|
||||
if alertStatus == "" {
|
||||
return nil
|
||||
}
|
||||
// expand the user relation
|
||||
if errs := am.app.Dao().ExpandRecord(alertRecord, []string{"user"}, nil); len(errs) > 0 {
|
||||
return fmt.Errorf("failed to expand: %v", errs)
|
||||
}
|
||||
user := alertRecord.ExpandedOne("user")
|
||||
if user == nil {
|
||||
// check if use
|
||||
alertRecords, err := am.app.Dao().FindRecordsByExpr("alerts",
|
||||
dbx.HashExp{
|
||||
"system": oldSystemRecord.GetId(),
|
||||
"name": "Status",
|
||||
},
|
||||
)
|
||||
if err != nil || len(alertRecords) == 0 {
|
||||
// log.Println("no alerts found for system")
|
||||
return nil
|
||||
}
|
||||
emoji := "\U0001F534"
|
||||
if alertStatus == "up" {
|
||||
emoji = "\u2705"
|
||||
for _, alertRecord := range alertRecords {
|
||||
// expand the user relation
|
||||
if errs := am.app.Dao().ExpandRecord(alertRecord, []string{"user"}, nil); len(errs) > 0 {
|
||||
return fmt.Errorf("failed to expand: %v", errs)
|
||||
}
|
||||
user := alertRecord.ExpandedOne("user")
|
||||
if user == nil {
|
||||
return nil
|
||||
}
|
||||
emoji := "\U0001F534"
|
||||
if alertStatus == "up" {
|
||||
emoji = "\u2705"
|
||||
}
|
||||
// send alert
|
||||
systemName := oldSystemRecord.GetString("name")
|
||||
am.sendAlert(AlertData{
|
||||
UserID: user.GetId(),
|
||||
Title: fmt.Sprintf("Connection to %s is %s %v", systemName, alertStatus, emoji),
|
||||
Message: fmt.Sprintf("Connection to %s is %s", systemName, alertStatus),
|
||||
Link: am.app.Settings().Meta.AppUrl + "/system/" + url.QueryEscape(systemName),
|
||||
LinkText: "View " + systemName,
|
||||
})
|
||||
}
|
||||
// send alert
|
||||
systemName := oldRecord.GetString("name")
|
||||
am.sendAlert(AlertData{
|
||||
UserID: user.GetId(),
|
||||
Title: fmt.Sprintf("Connection to %s is %s %v", systemName, alertStatus, emoji),
|
||||
Message: fmt.Sprintf("Connection to %s is %s", systemName, alertStatus),
|
||||
Link: am.app.Settings().Meta.AppUrl + "/system/" + url.QueryEscape(systemName),
|
||||
LinkText: "View " + systemName,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -85,15 +85,13 @@ type CPUUsage struct {
|
||||
}
|
||||
|
||||
type MemoryStats struct {
|
||||
|
||||
// current res_counter usage for memory
|
||||
Usage uint64 `json:"usage,omitempty"`
|
||||
Cache uint64 `json:"cache,omitempty"`
|
||||
// all the stats exported via memory.stat.
|
||||
Stats MemoryStatsStats `json:"stats,omitempty"`
|
||||
// maximum usage ever recorded.
|
||||
// MaxUsage uint64 `json:"max_usage,omitempty"`
|
||||
// TODO(vishh): Export these as stronger types.
|
||||
// all the stats exported via memory.stat.
|
||||
Stats map[string]uint64 `json:"stats,omitempty"`
|
||||
// number of times memory usage hits limits.
|
||||
// Failcnt uint64 `json:"failcnt,omitempty"`
|
||||
// Limit uint64 `json:"limit,omitempty"`
|
||||
@@ -106,6 +104,11 @@ type MemoryStats struct {
|
||||
// PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"`
|
||||
}
|
||||
|
||||
type MemoryStatsStats struct {
|
||||
Cache uint64 `json:"cache,omitempty"`
|
||||
InactiveFile uint64 `json:"inactive_file,omitempty"`
|
||||
}
|
||||
|
||||
type NetworkStats struct {
|
||||
// Bytes received. Windows and Linux.
|
||||
RxBytes uint64 `json:"rx_bytes"`
|
||||
@@ -113,21 +116,19 @@ type NetworkStats struct {
|
||||
TxBytes uint64 `json:"tx_bytes"`
|
||||
}
|
||||
|
||||
// Container stats to return to the hub
|
||||
type Stats struct {
|
||||
Name string `json:"n"`
|
||||
Cpu float64 `json:"c"`
|
||||
Mem float64 `json:"m"`
|
||||
NetworkSent float64 `json:"ns"`
|
||||
NetworkRecv float64 `json:"nr"`
|
||||
type prevNetStats struct {
|
||||
Sent uint64
|
||||
Recv uint64
|
||||
Time time.Time
|
||||
}
|
||||
|
||||
// Keeps track of container stats from previous run
|
||||
type PrevContainerStats struct {
|
||||
Cpu [2]uint64
|
||||
Net struct {
|
||||
Sent uint64
|
||||
Recv uint64
|
||||
Time time.Time
|
||||
}
|
||||
// Docker container stats
|
||||
type Stats struct {
|
||||
Name string `json:"n"`
|
||||
Cpu float64 `json:"c"`
|
||||
Mem float64 `json:"m"`
|
||||
NetworkSent float64 `json:"ns"`
|
||||
NetworkRecv float64 `json:"nr"`
|
||||
PrevCpu [2]uint64 `json:"-"`
|
||||
PrevNet prevNetStats `json:"-"`
|
||||
}
|
||||
|
||||
@@ -26,7 +26,6 @@ type Stats struct {
|
||||
|
||||
type FsStats struct {
|
||||
Time time.Time `json:"-"`
|
||||
Device string `json:"-"`
|
||||
Root bool `json:"-"`
|
||||
Mountpoint string `json:"-"`
|
||||
DiskTotal float64 `json:"d"`
|
||||
@@ -45,22 +44,21 @@ type NetIoStats struct {
|
||||
}
|
||||
|
||||
type Info struct {
|
||||
Hostname string `json:"h"`
|
||||
|
||||
Cores int `json:"c"`
|
||||
Threads int `json:"t"`
|
||||
CpuModel string `json:"m"`
|
||||
// Os string `json:"o"`
|
||||
Uptime uint64 `json:"u"`
|
||||
Cpu float64 `json:"cpu"`
|
||||
MemPct float64 `json:"mp"`
|
||||
DiskPct float64 `json:"dp"`
|
||||
AgentVersion string `json:"v"`
|
||||
Hostname string `json:"h"`
|
||||
KernelVersion string `json:"k,omitempty"`
|
||||
Cores int `json:"c"`
|
||||
Threads int `json:"t,omitempty"`
|
||||
CpuModel string `json:"m"`
|
||||
Uptime uint64 `json:"u"`
|
||||
Cpu float64 `json:"cpu"`
|
||||
MemPct float64 `json:"mp"`
|
||||
DiskPct float64 `json:"dp"`
|
||||
AgentVersion string `json:"v"`
|
||||
}
|
||||
|
||||
// Final data structure to return to the hub
|
||||
type CombinedData struct {
|
||||
Stats Stats `json:"stats"`
|
||||
Info Info `json:"info"`
|
||||
Containers []container.Stats `json:"container"`
|
||||
Stats Stats `json:"stats"`
|
||||
Info Info `json:"info"`
|
||||
Containers []*container.Stats `json:"container"`
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"beszel/internal/records"
|
||||
"beszel/internal/users"
|
||||
"beszel/site"
|
||||
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"encoding/pem"
|
||||
@@ -22,7 +23,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
|
||||
"github.com/labstack/echo/v5"
|
||||
"github.com/pocketbase/pocketbase"
|
||||
"github.com/pocketbase/pocketbase/apis"
|
||||
@@ -39,6 +39,9 @@ type Hub struct {
|
||||
systemConnections map[string]*ssh.Client
|
||||
sshClientConfig *ssh.ClientConfig
|
||||
pubKey string
|
||||
am *alerts.AlertManager
|
||||
um *users.UserManager
|
||||
rm *records.RecordManager
|
||||
}
|
||||
|
||||
func NewHub(app *pocketbase.PocketBase) *Hub {
|
||||
@@ -46,13 +49,16 @@ func NewHub(app *pocketbase.PocketBase) *Hub {
|
||||
app: app,
|
||||
connectionLock: &sync.Mutex{},
|
||||
systemConnections: make(map[string]*ssh.Client),
|
||||
am: alerts.NewAlertManager(app),
|
||||
um: users.NewUserManager(app),
|
||||
rm: records.NewRecordManager(app),
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hub) Run() {
|
||||
rm := records.NewRecordManager(h.app)
|
||||
am := alerts.NewAlertManager(h.app)
|
||||
um := users.NewUserManager(h.app)
|
||||
// rm := records.NewRecordManager(h.app)
|
||||
// am := alerts.NewAlertManager(h.app)
|
||||
// um := users.NewUserManager(h.app)
|
||||
|
||||
// loosely check if it was executed using "go run"
|
||||
isGoRun := strings.HasPrefix(os.Args[0], os.TempDir())
|
||||
@@ -90,7 +96,7 @@ func (h *Hub) Run() {
|
||||
return nil
|
||||
})
|
||||
|
||||
// serve site
|
||||
// serve web ui
|
||||
h.app.OnBeforeServe().Add(func(e *core.ServeEvent) error {
|
||||
switch isGoRun {
|
||||
case true:
|
||||
@@ -98,12 +104,17 @@ func (h *Hub) Run() {
|
||||
Scheme: "http",
|
||||
Host: "localhost:5173",
|
||||
})
|
||||
e.Router.GET("/static/*", apis.StaticDirectoryHandler(os.DirFS("../../site/public/static"), false))
|
||||
e.Router.Any("/*", echo.WrapHandler(proxy))
|
||||
// e.Router.Any("/", echo.WrapHandler(proxy))
|
||||
default:
|
||||
e.Router.GET("/static/*", apis.StaticDirectoryHandler(site.Static, false))
|
||||
e.Router.Any("/*", apis.StaticDirectoryHandler(site.Dist, true))
|
||||
csp, cspExists := os.LookupEnv("CSP")
|
||||
e.Router.Any("/*", func(c echo.Context) error {
|
||||
if cspExists {
|
||||
c.Response().Header().Del("X-Frame-Options")
|
||||
c.Response().Header().Set("Content-Security-Policy", csp)
|
||||
}
|
||||
indexFallback := !strings.HasPrefix(c.Request().URL.Path, "/static/")
|
||||
return apis.StaticDirectoryHandler(site.Dist, indexFallback)(c)
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -115,9 +126,9 @@ func (h *Hub) Run() {
|
||||
// set up cron jobs
|
||||
scheduler := cron.New()
|
||||
// delete old records once every hour
|
||||
scheduler.MustAdd("delete old records", "8 * * * *", rm.DeleteOldRecords)
|
||||
scheduler.MustAdd("delete old records", "8 * * * *", h.rm.DeleteOldRecords)
|
||||
// create longer records every 10 minutes
|
||||
scheduler.MustAdd("create longer records", "*/10 * * * *", rm.CreateLongerRecords)
|
||||
scheduler.MustAdd("create longer records", "*/10 * * * *", h.rm.CreateLongerRecords)
|
||||
scheduler.Start()
|
||||
return nil
|
||||
})
|
||||
@@ -141,7 +152,7 @@ func (h *Hub) Run() {
|
||||
return c.JSON(http.StatusOK, map[string]bool{"firstRun": adminNum == 0})
|
||||
})
|
||||
// send test notification
|
||||
e.Router.GET("/api/beszel/send-test-notification", am.SendTestNotification)
|
||||
e.Router.GET("/api/beszel/send-test-notification", h.am.SendTestNotification)
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -160,8 +171,8 @@ func (h *Hub) Run() {
|
||||
})
|
||||
|
||||
// handle default values for user / user_settings creation
|
||||
h.app.OnModelBeforeCreate("users").Add(um.InitializeUserRole)
|
||||
h.app.OnModelBeforeCreate("user_settings").Add(um.InitializeUserSettings)
|
||||
h.app.OnModelBeforeCreate("users").Add(h.um.InitializeUserRole)
|
||||
h.app.OnModelBeforeCreate("user_settings").Add(h.um.InitializeUserSettings)
|
||||
|
||||
// do things after a systems record is updated
|
||||
h.app.OnModelAfterUpdate("systems").Add(func(e *core.ModelEvent) error {
|
||||
@@ -177,10 +188,11 @@ func (h *Hub) Run() {
|
||||
// if system is set to pending (unpause), try to connect immediately
|
||||
if newStatus == "pending" {
|
||||
go h.updateSystem(newRecord)
|
||||
} else {
|
||||
h.am.HandleStatusAlerts(newStatus, oldRecord)
|
||||
|
||||
}
|
||||
|
||||
// alerts
|
||||
am.HandleSystemAlerts(newStatus, newRecord, oldRecord)
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -256,7 +268,7 @@ func (h *Hub) updateSystem(record *models.Record) {
|
||||
}
|
||||
// get system stats from agent
|
||||
var systemData system.CombinedData
|
||||
if err := requestJsonFromAgent(client, &systemData); err != nil {
|
||||
if err := h.requestJsonFromAgent(client, &systemData); err != nil {
|
||||
if err.Error() == "bad client" {
|
||||
// if previous connection was closed, try again
|
||||
h.app.Logger().Error("Existing SSH connection closed. Retrying...", "host", record.GetString("host"), "port", record.GetString("port"))
|
||||
@@ -294,6 +306,8 @@ func (h *Hub) updateSystem(record *models.Record) {
|
||||
h.app.Logger().Error("Failed to save record: ", "err", err.Error())
|
||||
}
|
||||
}
|
||||
// system info alerts (todo: temp alerts, extra fs alerts)
|
||||
h.am.HandleSystemInfoAlerts(record, systemData.Info)
|
||||
}
|
||||
|
||||
// set system to specified status and save record
|
||||
@@ -349,7 +363,8 @@ func (h *Hub) createSSHClientConfig() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func requestJsonFromAgent(client *ssh.Client, systemData *system.CombinedData) error {
|
||||
// Fetches system stats from the agent and decodes the json data into the provided struct
|
||||
func (h *Hub) requestJsonFromAgent(client *ssh.Client, systemData *system.CombinedData) error {
|
||||
session, err := newSessionWithTimeout(client, 5*time.Second)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad client")
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/pocketbase/pocketbase"
|
||||
"github.com/pocketbase/pocketbase/daos"
|
||||
"github.com/pocketbase/pocketbase/models"
|
||||
"github.com/pocketbase/pocketbase/tools/types"
|
||||
)
|
||||
|
||||
type RecordManager struct {
|
||||
@@ -143,9 +144,10 @@ func (rm *RecordManager) CreateLongerRecords() {
|
||||
|
||||
// Calculate the average stats of a list of system_stats records without reflect
|
||||
func (rm *RecordManager) AverageSystemStats(records []*models.Record) system.Stats {
|
||||
var sum system.Stats
|
||||
sum.Temperatures = make(map[string]float64)
|
||||
sum.ExtraFs = make(map[string]*system.FsStats)
|
||||
sum := system.Stats{
|
||||
Temperatures: make(map[string]float64),
|
||||
ExtraFs: make(map[string]*system.FsStats),
|
||||
}
|
||||
|
||||
count := float64(len(records))
|
||||
// use different counter for temps in case some records don't have them
|
||||
@@ -232,7 +234,7 @@ func (rm *RecordManager) AverageSystemStats(records []*models.Record) system.Sta
|
||||
}
|
||||
|
||||
// Calculate the average stats of a list of container_stats records
|
||||
func (rm *RecordManager) AverageContainerStats(records []*models.Record) (stats []container.Stats) {
|
||||
func (rm *RecordManager) AverageContainerStats(records []*models.Record) []container.Stats {
|
||||
sums := make(map[string]*container.Stats)
|
||||
count := float64(len(records))
|
||||
|
||||
@@ -241,7 +243,7 @@ func (rm *RecordManager) AverageContainerStats(records []*models.Record) (stats
|
||||
record.UnmarshalJSONField("stats", &containerStats)
|
||||
for _, stat := range containerStats {
|
||||
if _, ok := sums[stat.Name]; !ok {
|
||||
sums[stat.Name] = &container.Stats{Name: stat.Name, Cpu: 0, Mem: 0}
|
||||
sums[stat.Name] = &container.Stats{Name: stat.Name}
|
||||
}
|
||||
sums[stat.Name].Cpu += stat.Cpu
|
||||
sums[stat.Name].Mem += stat.Mem
|
||||
@@ -250,8 +252,9 @@ func (rm *RecordManager) AverageContainerStats(records []*models.Record) (stats
|
||||
}
|
||||
}
|
||||
|
||||
result := make([]container.Stats, 0, len(sums))
|
||||
for _, value := range sums {
|
||||
stats = append(stats, container.Stats{
|
||||
result = append(result, container.Stats{
|
||||
Name: value.Name,
|
||||
Cpu: twoDecimals(value.Cpu / count),
|
||||
Mem: twoDecimals(value.Mem / count),
|
||||
@@ -259,11 +262,11 @@ func (rm *RecordManager) AverageContainerStats(records []*models.Record) (stats
|
||||
NetworkRecv: twoDecimals(value.NetworkRecv / count),
|
||||
})
|
||||
}
|
||||
return stats
|
||||
return result
|
||||
}
|
||||
|
||||
// Deletes records older than what is displayed in the UI
|
||||
func (rm *RecordManager) DeleteOldRecords() {
|
||||
// start := time.Now()
|
||||
collections := []string{"system_stats", "container_stats"}
|
||||
recordData := []RecordDeletionData{
|
||||
{
|
||||
@@ -287,29 +290,17 @@ func (rm *RecordManager) DeleteOldRecords() {
|
||||
retention: 30 * 24 * time.Hour,
|
||||
},
|
||||
}
|
||||
rm.app.Dao().RunInTransaction(func(txDao *daos.Dao) error {
|
||||
for _, recordData := range recordData {
|
||||
exp := dbx.NewExp(
|
||||
"type = {:type} AND created < {:created}",
|
||||
dbx.Params{"type": recordData.recordType, "created": time.Now().UTC().Add(-recordData.retention)},
|
||||
)
|
||||
for _, collectionSlug := range collections {
|
||||
collectionRecords, err := txDao.FindRecordsByExpr(collectionSlug, exp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, record := range collectionRecords {
|
||||
err := txDao.DeleteRecord(record)
|
||||
if err != nil {
|
||||
rm.app.Logger().Error("Failed to delete records", "err", err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
db := rm.app.Dao().NonconcurrentDB()
|
||||
for _, recordData := range recordData {
|
||||
for _, collectionSlug := range collections {
|
||||
formattedDate := time.Now().UTC().Add(-recordData.retention).Format(types.DefaultDateLayout)
|
||||
expr := dbx.NewExp("[[created]] < {:date} AND [[type]] = {:type}", dbx.Params{"date": formattedDate, "type": recordData.recordType})
|
||||
_, err := db.Delete(collectionSlug, expr).Execute()
|
||||
if err != nil {
|
||||
rm.app.Logger().Error("Failed to delete records", "err", err.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
// log.Println("finished deleting old records", "time (ms)", time.Since(start).Milliseconds())
|
||||
}
|
||||
}
|
||||
|
||||
/* Round float to two decimals */
|
||||
|
||||
Binary file not shown.
@@ -11,5 +11,3 @@ import (
|
||||
var assets embed.FS
|
||||
|
||||
var Dist = echo.MustSubFS(assets, "dist")
|
||||
|
||||
var Static = echo.MustSubFS(assets, "dist/static")
|
||||
|
||||
4216
beszel/site/package-lock.json
generated
Normal file
4216
beszel/site/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"name": "site",
|
||||
"name": "beszel",
|
||||
"private": true,
|
||||
"version": "0.0.0",
|
||||
"type": "module",
|
||||
@@ -23,7 +23,7 @@
|
||||
"@radix-ui/react-toast": "^1.2.1",
|
||||
"@radix-ui/react-tooltip": "^1.1.2",
|
||||
"@tanstack/react-table": "^8.20.5",
|
||||
"@vitejs/plugin-react": "^4.3.1",
|
||||
"@vitejs/plugin-react": "^4.3.2",
|
||||
"class-variance-authority": "^0.7.0",
|
||||
"clsx": "^2.1.1",
|
||||
"cmdk": "^1.0.0",
|
||||
@@ -34,20 +34,19 @@
|
||||
"pocketbase": "^0.21.5",
|
||||
"react": "^18.3.1",
|
||||
"react-dom": "^18.3.1",
|
||||
"recharts": "^2.13.0-alpha.4",
|
||||
"recharts": "^2.13.0-alpha.5",
|
||||
"tailwind-merge": "^2.5.2",
|
||||
"tailwindcss-animate": "^1.0.7",
|
||||
"use-is-in-viewport": "^1.0.9",
|
||||
"valibot": "^0.36.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/bun": "^1.1.8",
|
||||
"@types/react": "^18.3.5",
|
||||
"@types/bun": "^1.1.10",
|
||||
"@types/react": "^18.3.10",
|
||||
"@types/react-dom": "^18.3.0",
|
||||
"autoprefixer": "^10.4.20",
|
||||
"postcss": "^8.4.44",
|
||||
"tailwindcss": "^3.4.10",
|
||||
"typescript": "^5.5.4",
|
||||
"vite": "^5.4.2"
|
||||
"postcss": "^8.4.47",
|
||||
"tailwindcss": "^3.4.13",
|
||||
"typescript": "^5.6.2",
|
||||
"vite": "^5.4.8"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,17 @@
|
||||
import { Area, AreaChart, CartesianGrid, XAxis, YAxis } from 'recharts'
|
||||
|
||||
import { ChartContainer, ChartTooltip, ChartTooltipContent } from '@/components/ui/chart'
|
||||
import { useYAxisWidth, chartTimeData, cn, formatShortDate, twoDecimalString } from '@/lib/utils'
|
||||
import { useMemo } from 'react'
|
||||
import {
|
||||
useYAxisWidth,
|
||||
chartTimeData,
|
||||
cn,
|
||||
formatShortDate,
|
||||
twoDecimalString,
|
||||
toFixedFloat,
|
||||
getSizeVal,
|
||||
getSizeUnit,
|
||||
} from '@/lib/utils'
|
||||
// import { useMemo } from 'react'
|
||||
// import Spinner from '../spinner'
|
||||
import { useStore } from '@nanostores/react'
|
||||
import { $chartTime } from '@/lib/stores'
|
||||
@@ -11,17 +20,17 @@ import { SystemStatsRecord } from '@/types'
|
||||
export default function DiskChart({
|
||||
ticks,
|
||||
systemData,
|
||||
dataKey,
|
||||
diskSize,
|
||||
}: {
|
||||
ticks: number[]
|
||||
systemData: SystemStatsRecord[]
|
||||
dataKey: string
|
||||
diskSize: number
|
||||
}) {
|
||||
const chartTime = useStore($chartTime)
|
||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
||||
|
||||
const diskSize = useMemo(() => {
|
||||
return Math.round(systemData.at(-1)?.stats.d ?? NaN)
|
||||
}, [systemData])
|
||||
|
||||
return (
|
||||
<div>
|
||||
{/* {!yAxisSet && <Spinner />} */}
|
||||
@@ -50,7 +59,9 @@ export default function DiskChart({
|
||||
minTickGap={6}
|
||||
tickLine={false}
|
||||
axisLine={false}
|
||||
tickFormatter={(value) => updateYAxisWidth(value + ' GB')}
|
||||
tickFormatter={(value) =>
|
||||
updateYAxisWidth(toFixedFloat(getSizeVal(value), 2) + getSizeUnit(value))
|
||||
}
|
||||
/>
|
||||
<XAxis
|
||||
dataKey="created"
|
||||
@@ -69,13 +80,15 @@ export default function DiskChart({
|
||||
content={
|
||||
<ChartTooltipContent
|
||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
||||
contentFormatter={(item) => twoDecimalString(item.value) + ' GB'}
|
||||
contentFormatter={({ value }) =>
|
||||
twoDecimalString(getSizeVal(value)) + getSizeUnit(value)
|
||||
}
|
||||
indicator="line"
|
||||
/>
|
||||
}
|
||||
/>
|
||||
<Area
|
||||
dataKey="stats.du"
|
||||
dataKey={dataKey}
|
||||
name="Disk Usage"
|
||||
type="monotoneX"
|
||||
fill="hsl(var(--chart-4))"
|
||||
|
||||
@@ -17,9 +17,11 @@ import { SystemStatsRecord } from '@/types'
|
||||
export default function DiskIoChart({
|
||||
ticks,
|
||||
systemData,
|
||||
dataKeys,
|
||||
}: {
|
||||
ticks: number[]
|
||||
systemData: SystemStatsRecord[]
|
||||
dataKeys: string[]
|
||||
}) {
|
||||
const chartTime = useStore($chartTime)
|
||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
||||
@@ -77,26 +79,22 @@ export default function DiskIoChart({
|
||||
/>
|
||||
}
|
||||
/>
|
||||
<Area
|
||||
dataKey="stats.dw"
|
||||
name="Write"
|
||||
type="monotoneX"
|
||||
fill="hsl(var(--chart-3))"
|
||||
fillOpacity={0.3}
|
||||
stroke="hsl(var(--chart-3))"
|
||||
// animationDuration={1200}
|
||||
isAnimationActive={false}
|
||||
/>
|
||||
<Area
|
||||
dataKey="stats.dr"
|
||||
name="Read"
|
||||
type="monotoneX"
|
||||
fill="hsl(var(--chart-1))"
|
||||
fillOpacity={0.3}
|
||||
stroke="hsl(var(--chart-1))"
|
||||
// animationDuration={1200}
|
||||
isAnimationActive={false}
|
||||
/>
|
||||
{dataKeys.map((dataKey, i) => {
|
||||
const action = i ? 'Read' : 'Write'
|
||||
const color = i ? 'hsl(var(--chart-1))' : 'hsl(var(--chart-3))'
|
||||
return (
|
||||
<Area
|
||||
key={i}
|
||||
dataKey={dataKey}
|
||||
name={action}
|
||||
type="monotoneX"
|
||||
fill={color}
|
||||
fillOpacity={0.3}
|
||||
stroke={color}
|
||||
isAnimationActive={false}
|
||||
/>
|
||||
)
|
||||
})}
|
||||
</AreaChart>
|
||||
</ChartContainer>
|
||||
</div>
|
||||
|
||||
@@ -1,89 +0,0 @@
|
||||
import { Area, AreaChart, CartesianGrid, XAxis, YAxis } from 'recharts'
|
||||
|
||||
import { ChartContainer, ChartTooltip, ChartTooltipContent } from '@/components/ui/chart'
|
||||
import { useYAxisWidth, chartTimeData, cn, formatShortDate, twoDecimalString } from '@/lib/utils'
|
||||
import { useMemo } from 'react'
|
||||
// import Spinner from '../spinner'
|
||||
import { useStore } from '@nanostores/react'
|
||||
import { $chartTime } from '@/lib/stores'
|
||||
import { SystemStatsRecord } from '@/types'
|
||||
|
||||
export default function ExFsDiskChart({
|
||||
ticks,
|
||||
systemData,
|
||||
fs,
|
||||
}: {
|
||||
ticks: number[]
|
||||
systemData: SystemStatsRecord[]
|
||||
fs: string
|
||||
}) {
|
||||
const chartTime = useStore($chartTime)
|
||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
||||
|
||||
const diskSize = useMemo(() => {
|
||||
const size = systemData.at(-1)?.stats.efs?.[fs].d ?? 0
|
||||
return size > 10 ? Math.round(size) : size
|
||||
}, [systemData])
|
||||
|
||||
return (
|
||||
<div>
|
||||
<ChartContainer
|
||||
config={{}}
|
||||
className={cn('h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity', {
|
||||
'opacity-100': yAxisWidth,
|
||||
})}
|
||||
>
|
||||
<AreaChart
|
||||
accessibilityLayer
|
||||
data={systemData}
|
||||
margin={{
|
||||
top: 10,
|
||||
}}
|
||||
>
|
||||
<CartesianGrid vertical={false} />
|
||||
<YAxis
|
||||
className="tracking-tighter"
|
||||
width={yAxisWidth}
|
||||
domain={[0, diskSize]}
|
||||
tickCount={9}
|
||||
minTickGap={6}
|
||||
tickLine={false}
|
||||
axisLine={false}
|
||||
tickFormatter={(value) => updateYAxisWidth(value + ' GB')}
|
||||
/>
|
||||
<XAxis
|
||||
dataKey="created"
|
||||
domain={[ticks[0], ticks.at(-1)!]}
|
||||
ticks={ticks}
|
||||
type="number"
|
||||
scale={'time'}
|
||||
minTickGap={35}
|
||||
tickMargin={8}
|
||||
axisLine={false}
|
||||
tickFormatter={chartTimeData[chartTime].format}
|
||||
/>
|
||||
<ChartTooltip
|
||||
animationEasing="ease-out"
|
||||
animationDuration={150}
|
||||
content={
|
||||
<ChartTooltipContent
|
||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
||||
contentFormatter={(item) => twoDecimalString(item.value) + ' GB'}
|
||||
indicator="line"
|
||||
/>
|
||||
}
|
||||
/>
|
||||
<Area
|
||||
dataKey={`stats.efs.${fs}.du`}
|
||||
name="Disk Usage"
|
||||
type="monotoneX"
|
||||
fill="hsl(var(--chart-4))"
|
||||
fillOpacity={0.4}
|
||||
stroke="hsl(var(--chart-4))"
|
||||
isAnimationActive={false}
|
||||
/>
|
||||
</AreaChart>
|
||||
</ChartContainer>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -1,106 +0,0 @@
|
||||
import { Area, AreaChart, CartesianGrid, XAxis, YAxis } from 'recharts'
|
||||
|
||||
import { ChartContainer, ChartTooltip, ChartTooltipContent } from '@/components/ui/chart'
|
||||
import {
|
||||
useYAxisWidth,
|
||||
chartTimeData,
|
||||
cn,
|
||||
formatShortDate,
|
||||
toFixedWithoutTrailingZeros,
|
||||
twoDecimalString,
|
||||
} from '@/lib/utils'
|
||||
// import Spinner from '../spinner'
|
||||
import { useStore } from '@nanostores/react'
|
||||
import { $chartTime } from '@/lib/stores'
|
||||
import { SystemStatsRecord } from '@/types'
|
||||
|
||||
export default function ExFsDiskIoChart({
|
||||
ticks,
|
||||
systemData,
|
||||
fs,
|
||||
}: {
|
||||
ticks: number[]
|
||||
systemData: SystemStatsRecord[]
|
||||
fs: string
|
||||
}) {
|
||||
const chartTime = useStore($chartTime)
|
||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
||||
|
||||
return (
|
||||
<div>
|
||||
{/* {!yAxisSet && <Spinner />} */}
|
||||
<ChartContainer
|
||||
config={{}}
|
||||
className={cn('h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity', {
|
||||
'opacity-100': yAxisWidth,
|
||||
})}
|
||||
>
|
||||
<AreaChart
|
||||
accessibilityLayer
|
||||
data={systemData}
|
||||
margin={{
|
||||
left: 0,
|
||||
right: 0,
|
||||
top: 10,
|
||||
bottom: 0,
|
||||
}}
|
||||
>
|
||||
<CartesianGrid vertical={false} />
|
||||
<YAxis
|
||||
className="tracking-tighter"
|
||||
width={yAxisWidth}
|
||||
// domain={[0, (max: number) => (max <= 0.4 ? 0.4 : Math.ceil(max))]}
|
||||
tickFormatter={(value) => {
|
||||
const val = toFixedWithoutTrailingZeros(value, 2)
|
||||
return updateYAxisWidth(val + ' MB/s')
|
||||
}}
|
||||
tickLine={false}
|
||||
axisLine={false}
|
||||
/>
|
||||
<XAxis
|
||||
dataKey="created"
|
||||
domain={[ticks[0], ticks.at(-1)!]}
|
||||
ticks={ticks}
|
||||
type="number"
|
||||
scale={'time'}
|
||||
minTickGap={35}
|
||||
tickMargin={8}
|
||||
axisLine={false}
|
||||
tickFormatter={chartTimeData[chartTime].format}
|
||||
/>
|
||||
<ChartTooltip
|
||||
animationEasing="ease-out"
|
||||
animationDuration={150}
|
||||
content={
|
||||
<ChartTooltipContent
|
||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
||||
contentFormatter={(item) => twoDecimalString(item.value) + ' MB/s'}
|
||||
indicator="line"
|
||||
/>
|
||||
}
|
||||
/>
|
||||
<Area
|
||||
dataKey={`stats.efs.${fs}.w`}
|
||||
name="Write"
|
||||
type="monotoneX"
|
||||
fill="hsl(var(--chart-3))"
|
||||
fillOpacity={0.3}
|
||||
stroke="hsl(var(--chart-3))"
|
||||
// animationDuration={1200}
|
||||
isAnimationActive={false}
|
||||
/>
|
||||
<Area
|
||||
dataKey={`stats.efs.${fs}.r`}
|
||||
name="Read"
|
||||
type="monotoneX"
|
||||
fill="hsl(var(--chart-1))"
|
||||
fillOpacity={0.3}
|
||||
stroke="hsl(var(--chart-1))"
|
||||
// animationDuration={1200}
|
||||
isAnimationActive={false}
|
||||
/>
|
||||
</AreaChart>
|
||||
</ChartContainer>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -100,10 +100,10 @@ export default function MemChart({
|
||||
dataKey="stats.mb"
|
||||
name="Cache / Buffers"
|
||||
type="monotoneX"
|
||||
fill="hsl(var(--chart-2))"
|
||||
fillOpacity={0.2}
|
||||
strokeOpacity={0.3}
|
||||
stroke="hsl(var(--chart-2))"
|
||||
fill="hsla(160 60% 45% / 0.5)"
|
||||
fillOpacity={0.4}
|
||||
// strokeOpacity={1}
|
||||
stroke="hsla(160 60% 45% / 0.5)"
|
||||
stackId="1"
|
||||
isAnimationActive={false}
|
||||
/>
|
||||
|
||||
@@ -54,6 +54,8 @@ export default function TemperatureChart({
|
||||
return chartData
|
||||
}, [systemData])
|
||||
|
||||
const colors = Object.keys(newChartData.colors)
|
||||
|
||||
return (
|
||||
<div>
|
||||
{/* {!yAxisSet && <Spinner />} */}
|
||||
@@ -76,6 +78,7 @@ export default function TemperatureChart({
|
||||
<CartesianGrid vertical={false} />
|
||||
<YAxis
|
||||
className="tracking-tighter"
|
||||
domain={[0, 'auto']}
|
||||
width={yAxisWidth}
|
||||
tickFormatter={(value) => {
|
||||
const val = toFixedWithoutTrailingZeros(value, 2)
|
||||
@@ -108,7 +111,7 @@ export default function TemperatureChart({
|
||||
/>
|
||||
}
|
||||
/>
|
||||
{Object.keys(newChartData.colors).map((key) => (
|
||||
{colors.map((key) => (
|
||||
<Line
|
||||
key={key}
|
||||
dataKey={key}
|
||||
@@ -120,7 +123,7 @@ export default function TemperatureChart({
|
||||
isAnimationActive={false}
|
||||
/>
|
||||
))}
|
||||
<ChartLegend content={<ChartLegendContent />} />
|
||||
{colors.length < 12 && <ChartLegend content={<ChartLegendContent />} />}
|
||||
</LineChart>
|
||||
</ChartContainer>
|
||||
</div>
|
||||
|
||||
@@ -71,7 +71,11 @@ export default function SettingsProfilePage({ userSettings }: { userSettings: Us
|
||||
<Label className="block" htmlFor="chartTime">
|
||||
Default time period
|
||||
</Label>
|
||||
<Select name="chartTime" defaultValue={userSettings.chartTime}>
|
||||
<Select
|
||||
name="chartTime"
|
||||
key={userSettings.chartTime}
|
||||
defaultValue={userSettings.chartTime}
|
||||
>
|
||||
<SelectTrigger id="chartTime">
|
||||
<SelectValue />
|
||||
</SelectTrigger>
|
||||
|
||||
@@ -5,7 +5,7 @@ import { pb } from '@/lib/stores'
|
||||
import { Separator } from '@/components/ui/separator'
|
||||
import { Card } from '@/components/ui/card'
|
||||
import { BellIcon, LoaderCircleIcon, PlusIcon, SaveIcon, Trash2Icon } from 'lucide-react'
|
||||
import { ChangeEventHandler, useState } from 'react'
|
||||
import { ChangeEventHandler, useEffect, useState } from 'react'
|
||||
import { toast } from '@/components/ui/use-toast'
|
||||
import { InputTags } from '@/components/ui/input-tags'
|
||||
import { UserSettings } from '@/types'
|
||||
@@ -29,7 +29,13 @@ const SettingsNotificationsPage = ({ userSettings }: { userSettings: UserSetting
|
||||
const [emails, setEmails] = useState<string[]>(userSettings.emails ?? [])
|
||||
const [isLoading, setIsLoading] = useState(false)
|
||||
|
||||
const addWebhook = () => {
|
||||
// update values when userSettings changes
|
||||
useEffect(() => {
|
||||
setWebhooks(userSettings.webhooks ?? [])
|
||||
setEmails(userSettings.emails ?? [])
|
||||
}, [userSettings])
|
||||
|
||||
function addWebhook() {
|
||||
setWebhooks([...webhooks, ''])
|
||||
// focus on the new input
|
||||
queueMicrotask(() => {
|
||||
@@ -39,7 +45,7 @@ const SettingsNotificationsPage = ({ userSettings }: { userSettings: UserSetting
|
||||
}
|
||||
const removeWebhook = (index: number) => setWebhooks(webhooks.filter((_, i) => i !== index))
|
||||
|
||||
const updateWebhook = (index: number, value: string) => {
|
||||
function updateWebhook(index: number, value: string) {
|
||||
const newWebhooks = [...webhooks]
|
||||
newWebhooks[index] = value
|
||||
setWebhooks(newWebhooks)
|
||||
|
||||
@@ -4,28 +4,16 @@ import { Suspense, lazy, useCallback, useEffect, useMemo, useRef, useState } fro
|
||||
import { Card, CardHeader, CardTitle, CardDescription, CardContent } from '../ui/card'
|
||||
import { useStore } from '@nanostores/react'
|
||||
import Spinner from '../spinner'
|
||||
import {
|
||||
ClockArrowUp,
|
||||
CpuIcon,
|
||||
GlobeIcon,
|
||||
LayoutGridIcon,
|
||||
MonitorIcon,
|
||||
StretchHorizontalIcon,
|
||||
XIcon,
|
||||
} from 'lucide-react'
|
||||
import { ClockArrowUp, CpuIcon, GlobeIcon, LayoutGridIcon, MonitorIcon, XIcon } from 'lucide-react'
|
||||
import ChartTimeSelect from '../charts/chart-time-select'
|
||||
import {
|
||||
chartTimeData,
|
||||
cn,
|
||||
getPbTimestamp,
|
||||
useClampedIsInViewport,
|
||||
useLocalStorage,
|
||||
} from '@/lib/utils'
|
||||
import { chartTimeData, cn, getPbTimestamp, useLocalStorage } from '@/lib/utils'
|
||||
import { Separator } from '../ui/separator'
|
||||
import { scaleTime } from 'd3-scale'
|
||||
import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from '../ui/tooltip'
|
||||
import { Button, buttonVariants } from '../ui/button'
|
||||
import { Input } from '../ui/input'
|
||||
import { Rows, TuxIcon } from '../ui/icons'
|
||||
import { useIntersectionObserver } from '@/lib/use-intersection-observer'
|
||||
|
||||
const CpuChart = lazy(() => import('../charts/cpu-chart'))
|
||||
const ContainerCpuChart = lazy(() => import('../charts/container-cpu-chart'))
|
||||
@@ -37,8 +25,6 @@ const BandwidthChart = lazy(() => import('../charts/bandwidth-chart'))
|
||||
const ContainerNetChart = lazy(() => import('../charts/container-net-chart'))
|
||||
const SwapChart = lazy(() => import('../charts/swap-chart'))
|
||||
const TemperatureChart = lazy(() => import('../charts/temperature-chart'))
|
||||
const ExFsDiskChart = lazy(() => import('../charts/extra-fs-disk-chart'))
|
||||
const ExFsDiskIoChart = lazy(() => import('../charts/extra-fs-disk-io-chart'))
|
||||
|
||||
export default function SystemDetail({ name }: { name: string }) {
|
||||
const systems = useStore($systems)
|
||||
@@ -47,7 +33,6 @@ export default function SystemDetail({ name }: { name: string }) {
|
||||
const [ticks, setTicks] = useState([] as number[])
|
||||
const [system, setSystem] = useState({} as SystemRecord)
|
||||
const [systemStats, setSystemStats] = useState([] as SystemStatsRecord[])
|
||||
const [hasDockerStats, setHasDocker] = useState(false)
|
||||
const netCardRef = useRef<HTMLDivElement>(null)
|
||||
const [dockerCpuChartData, setDockerCpuChartData] = useState<Record<string, number | string>[]>(
|
||||
[]
|
||||
@@ -58,6 +43,7 @@ export default function SystemDetail({ name }: { name: string }) {
|
||||
const [dockerNetChartData, setDockerNetChartData] = useState<Record<string, number | number[]>[]>(
|
||||
[]
|
||||
)
|
||||
const hasDockerStats = dockerCpuChartData.length > 0
|
||||
|
||||
useEffect(() => {
|
||||
document.title = `${name} / Beszel`
|
||||
@@ -65,7 +51,7 @@ export default function SystemDetail({ name }: { name: string }) {
|
||||
resetCharts()
|
||||
$chartTime.set($userSettings.get().chartTime)
|
||||
$containerFilter.set('')
|
||||
setHasDocker(false)
|
||||
// setHasDocker(false)
|
||||
}
|
||||
}, [name])
|
||||
|
||||
@@ -149,7 +135,6 @@ export default function SystemDetail({ name }: { name: string }) {
|
||||
const expectedInterval = chartTimeData[chartTime].expectedInterval
|
||||
if (containerStats.status === 'fulfilled' && containerStats.value.length) {
|
||||
makeContainerData(addEmptyValues(containerStats.value, expectedInterval))
|
||||
setHasDocker(true)
|
||||
}
|
||||
if (systemStats.status === 'fulfilled') {
|
||||
setSystemStats(addEmptyValues(systemStats.value, expectedInterval))
|
||||
@@ -199,13 +184,41 @@ export default function SystemDetail({ name }: { name: string }) {
|
||||
setDockerNetChartData(dockerNetData)
|
||||
}, [])
|
||||
|
||||
const uptime = useMemo(() => {
|
||||
let uptime = system.info?.u || 0
|
||||
if (uptime < 172800) {
|
||||
return `${Math.trunc(uptime / 3600)} hours`
|
||||
// values for system info bar
|
||||
const systemInfo = useMemo(() => {
|
||||
if (!system.info) {
|
||||
return []
|
||||
}
|
||||
return `${Math.trunc(system.info?.u / 86400)} days`
|
||||
}, [system.info?.u])
|
||||
let uptime: number | string = system.info.u
|
||||
if (system.info.u < 172800) {
|
||||
const hours = Math.trunc(uptime / 3600)
|
||||
uptime = `${hours} hour${hours > 1 ? 's' : ''}`
|
||||
} else {
|
||||
uptime = `${Math.trunc(system.info?.u / 86400)} days`
|
||||
}
|
||||
return [
|
||||
{ value: system.host, Icon: GlobeIcon },
|
||||
{
|
||||
value: system.info.h,
|
||||
Icon: MonitorIcon,
|
||||
label: 'Hostname',
|
||||
// hide if hostname is same as host or name
|
||||
hide: system.info.h === system.host || system.info.h === system.name,
|
||||
},
|
||||
{ value: uptime, Icon: ClockArrowUp, label: 'Uptime' },
|
||||
{ value: system.info.k, Icon: TuxIcon, label: 'Kernel' },
|
||||
{
|
||||
value: `${system.info.m} (${system.info.c}c${system.info.t ? `/${system.info.t}t` : ''})`,
|
||||
Icon: CpuIcon,
|
||||
hide: !system.info.m,
|
||||
},
|
||||
] as {
|
||||
value: string | number | undefined
|
||||
label?: string
|
||||
Icon: any
|
||||
hide?: boolean
|
||||
}[]
|
||||
}, [system.info])
|
||||
|
||||
/** Space for tooltip if more than 12 containers */
|
||||
const bottomSpacing = useMemo(() => {
|
||||
@@ -252,46 +265,31 @@ export default function SystemDetail({ name }: { name: string }) {
|
||||
</span>
|
||||
{system.status}
|
||||
</div>
|
||||
<Separator orientation="vertical" className="h-4 bg-primary/30" />
|
||||
<div className="flex gap-1.5 items-center">
|
||||
<GlobeIcon className="h-4 w-4" /> {system.host}
|
||||
</div>
|
||||
{/* show hostname if it's different than host or name */}
|
||||
{system.info?.h && system.info.h != system.host && system.info.h != system.name && (
|
||||
<TooltipProvider>
|
||||
<Tooltip delayDuration={150}>
|
||||
<Separator orientation="vertical" className="h-4 bg-primary/30" />
|
||||
<TooltipTrigger asChild>
|
||||
<div className="flex gap-1.5 items-center">
|
||||
<MonitorIcon className="h-4 w-4" /> {system.info.h}
|
||||
</div>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>Hostname</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
)}
|
||||
{system.info?.u && (
|
||||
<TooltipProvider>
|
||||
<Tooltip delayDuration={150}>
|
||||
<Separator orientation="vertical" className="h-4 bg-primary/30" />
|
||||
<TooltipTrigger asChild>
|
||||
<div className="flex gap-1.5 items-center">
|
||||
<ClockArrowUp className="h-4 w-4" /> {uptime}
|
||||
</div>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>Uptime</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
)}
|
||||
{system.info?.m && (
|
||||
<>
|
||||
<Separator orientation="vertical" className="h-4 bg-primary/30" />
|
||||
{systemInfo.map(({ value, label, Icon, hide }, i) => {
|
||||
if (hide || !value) {
|
||||
return null
|
||||
}
|
||||
const content = (
|
||||
<div className="flex gap-1.5 items-center">
|
||||
<CpuIcon className="h-4 w-4" />
|
||||
{system.info.m} ({system.info.c}c/{system.info.t}t)
|
||||
<Icon className="h-4 w-4" /> {value}
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
)
|
||||
return (
|
||||
<div key={i} className="contents">
|
||||
<Separator orientation="vertical" className="h-4 bg-primary/30" />
|
||||
{label ? (
|
||||
<TooltipProvider>
|
||||
<Tooltip delayDuration={150}>
|
||||
<TooltipTrigger asChild>{content}</TooltipTrigger>
|
||||
<TooltipContent>{label}</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
) : (
|
||||
content
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
<div className="lg:ml-auto flex items-center gap-2 max-sm:-mb-1">
|
||||
@@ -310,7 +308,7 @@ export default function SystemDetail({ name }: { name: string }) {
|
||||
{grid ? (
|
||||
<LayoutGridIcon className="h-[1.2rem] w-[1.2rem] opacity-85" />
|
||||
) : (
|
||||
<StretchHorizontalIcon className="h-[1.2rem] w-[1.2rem] opacity-85" />
|
||||
<Rows className="h-[1.3rem] w-[1.3rem] opacity-85" />
|
||||
)}
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
@@ -361,18 +359,21 @@ export default function SystemDetail({ name }: { name: string }) {
|
||||
</ChartCard>
|
||||
)}
|
||||
|
||||
{(systemStats.at(-1)?.stats.s ?? 0) > 0 && (
|
||||
<ChartCard grid={grid} title="Swap Usage" description="Swap space used by the system">
|
||||
<SwapChart ticks={ticks} systemData={systemStats} />
|
||||
</ChartCard>
|
||||
)}
|
||||
|
||||
<ChartCard grid={grid} title="Disk Space" description="Usage of root partition">
|
||||
<DiskChart ticks={ticks} systemData={systemStats} />
|
||||
<DiskChart
|
||||
ticks={ticks}
|
||||
systemData={systemStats}
|
||||
dataKey="stats.du"
|
||||
diskSize={Math.round(systemStats.at(-1)?.stats.d ?? NaN)}
|
||||
/>
|
||||
</ChartCard>
|
||||
|
||||
<ChartCard grid={grid} title="Disk I/O" description="Throughput of root filesystem">
|
||||
<DiskIoChart ticks={ticks} systemData={systemStats} />
|
||||
<DiskIoChart
|
||||
ticks={ticks}
|
||||
systemData={systemStats}
|
||||
dataKeys={['stats.dw', 'stats.dr']}
|
||||
/>
|
||||
</ChartCard>
|
||||
|
||||
<ChartCard
|
||||
@@ -400,6 +401,12 @@ export default function SystemDetail({ name }: { name: string }) {
|
||||
</div>
|
||||
)}
|
||||
|
||||
{(systemStats.at(-1)?.stats.su ?? 0) > 0 && (
|
||||
<ChartCard grid={grid} title="Swap Usage" description="Swap space used by the system">
|
||||
<SwapChart ticks={ticks} systemData={systemStats} />
|
||||
</ChartCard>
|
||||
)}
|
||||
|
||||
{systemStats.at(-1)?.stats.t && (
|
||||
<ChartCard grid={grid} title="Temperature" description="Temperatures of system sensors">
|
||||
<TemperatureChart ticks={ticks} systemData={systemStats} />
|
||||
@@ -418,14 +425,23 @@ export default function SystemDetail({ name }: { name: string }) {
|
||||
title={`${extraFsName} Usage`}
|
||||
description={`Disk usage of ${extraFsName}`}
|
||||
>
|
||||
<ExFsDiskChart ticks={ticks} systemData={systemStats} fs={extraFsName} />
|
||||
<DiskChart
|
||||
ticks={ticks}
|
||||
systemData={systemStats}
|
||||
dataKey={`stats.efs.${extraFsName}.du`}
|
||||
diskSize={Math.round(systemStats.at(-1)?.stats.efs?.[extraFsName].d ?? NaN)}
|
||||
/>
|
||||
</ChartCard>
|
||||
<ChartCard
|
||||
grid={grid}
|
||||
title={`${extraFsName} I/O`}
|
||||
description={`Throughput of of ${extraFsName}`}
|
||||
description={`Throughput of ${extraFsName}`}
|
||||
>
|
||||
<ExFsDiskIoChart ticks={ticks} systemData={systemStats} fs={extraFsName} />
|
||||
<DiskIoChart
|
||||
ticks={ticks}
|
||||
systemData={systemStats}
|
||||
dataKeys={[`stats.efs.${extraFsName}.w`, `stats.efs.${extraFsName}.r`]}
|
||||
/>
|
||||
</ChartCard>
|
||||
</div>
|
||||
)
|
||||
@@ -484,13 +500,12 @@ function ChartCard({
|
||||
grid?: boolean
|
||||
isContainerChart?: boolean
|
||||
}) {
|
||||
const target = useRef<HTMLDivElement>(null)
|
||||
const [isInViewport, wrappedTargetRef] = useClampedIsInViewport({ target: target })
|
||||
const { isIntersecting, ref } = useIntersectionObserver()
|
||||
|
||||
return (
|
||||
<Card
|
||||
className={cn('pb-2 sm:pb-4 odd:last-of-type:col-span-full', { 'col-span-full': !grid })}
|
||||
ref={wrappedTargetRef}
|
||||
ref={ref}
|
||||
>
|
||||
<CardHeader className="pb-5 pt-4 relative space-y-1 max-sm:py-3 max-sm:px-4">
|
||||
<CardTitle className="text-xl sm:text-2xl">{title}</CardTitle>
|
||||
@@ -499,7 +514,7 @@ function ChartCard({
|
||||
</CardHeader>
|
||||
<CardContent className="pl-0 w-[calc(100%-1.6em)] h-52 relative">
|
||||
{<Spinner />}
|
||||
{isInViewport && <Suspense>{children}</Suspense>}
|
||||
{isIntersecting && <Suspense>{children}</Suspense>}
|
||||
</CardContent>
|
||||
</Card>
|
||||
)
|
||||
|
||||
25
beszel/site/src/components/ui/icons.tsx
Normal file
25
beszel/site/src/components/ui/icons.tsx
Normal file
@@ -0,0 +1,25 @@
|
||||
import { SVGProps } from 'react'
|
||||
|
||||
// linux-logo-bold from https://github.com/phosphor-icons/core (MIT license)
|
||||
export function TuxIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg viewBox="0 0 256 256" {...props}>
|
||||
<path
|
||||
fill="currentColor"
|
||||
d="M231 217a12 12 0 0 1-16-2c-2-1-35-44-35-127a52 52 0 1 0-104 0c0 83-33 126-35 127a12 12 0 0 1-18-14c0-1 29-39 29-113a76 76 0 1 1 152 0c0 74 29 112 29 113a12 12 0 0 1-2 16m-127-97a16 16 0 1 0-16-16 16 16 0 0 0 16 16m64-16a16 16 0 1 0-16 16 16 16 0 0 0 16-16m-73 51 28 12a12 12 0 0 0 10 0l28-12a12 12 0 0 0-10-22l-23 10-23-10a12 12 0 0 0-10 22m33 29a57 57 0 0 0-39 15 12 12 0 0 0 17 18 33 33 0 0 1 44 0 12 12 0 1 0 17-18 57 57 0 0 0-39-15"
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
// MingCute Apache License 2.0 https://github.com/Richard9394/MingCute
|
||||
export function Rows(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg viewBox="0 0 24 24" {...props}>
|
||||
<path
|
||||
fill="currentColor"
|
||||
d="M5 3a2 2 0 0 0-2 2v4a2 2 0 0 0 2 2h14a2 2 0 0 0 2-2V5a2 2 0 0 0-2-2zm0 2h14v4H5zm0 8a2 2 0 0 0-2 2v4a2 2 0 0 0 2 2h14a2 2 0 0 0 2-2v-4a2 2 0 0 0-2-2zm0 2h14v4H5z"
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
169
beszel/site/src/lib/use-intersection-observer.ts
Normal file
169
beszel/site/src/lib/use-intersection-observer.ts
Normal file
@@ -0,0 +1,169 @@
|
||||
import { useEffect, useRef, useState } from 'react'
|
||||
|
||||
// adapted from usehooks-ts/use-intersection-observer
|
||||
|
||||
/** The hook internal state. */
|
||||
type State = {
|
||||
/** A boolean indicating if the element is intersecting. */
|
||||
isIntersecting: boolean
|
||||
/** The intersection observer entry. */
|
||||
entry?: IntersectionObserverEntry
|
||||
}
|
||||
|
||||
/** Represents the options for configuring the Intersection Observer. */
|
||||
type UseIntersectionObserverOptions = {
|
||||
/**
|
||||
* The element that is used as the viewport for checking visibility of the target.
|
||||
* @default null
|
||||
*/
|
||||
root?: Element | Document | null
|
||||
/**
|
||||
* A margin around the root.
|
||||
* @default '0%'
|
||||
*/
|
||||
rootMargin?: string
|
||||
/**
|
||||
* A threshold indicating the percentage of the target's visibility needed to trigger the callback.
|
||||
* @default 0
|
||||
*/
|
||||
threshold?: number | number[]
|
||||
/**
|
||||
* If true, freezes the intersection state once the element becomes visible.
|
||||
* @default true
|
||||
*/
|
||||
freeze?: boolean
|
||||
/**
|
||||
* A callback function to be invoked when the intersection state changes.
|
||||
* @param {boolean} isIntersecting - A boolean indicating if the element is intersecting.
|
||||
* @param {IntersectionObserverEntry} entry - The intersection observer Entry.
|
||||
* @default undefined
|
||||
*/
|
||||
onChange?: (isIntersecting: boolean, entry: IntersectionObserverEntry) => void
|
||||
/**
|
||||
* The initial state of the intersection.
|
||||
* @default false
|
||||
*/
|
||||
initialIsIntersecting?: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* The return type of the useIntersectionObserver hook.
|
||||
*
|
||||
* Supports both tuple and object destructing.
|
||||
* @param {(node: Element | null) => void} ref - The ref callback function.
|
||||
* @param {boolean} isIntersecting - A boolean indicating if the element is intersecting.
|
||||
* @param {IntersectionObserverEntry | undefined} entry - The intersection observer Entry.
|
||||
*/
|
||||
type IntersectionReturn = {
|
||||
ref: (node?: Element | null) => void
|
||||
isIntersecting: boolean
|
||||
entry?: IntersectionObserverEntry
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom hook that tracks the intersection of a DOM element with its containing element or the viewport using the [`Intersection Observer API`](https://developer.mozilla.org/en-US/docs/Web/API/Intersection_Observer_API).
|
||||
* @param {UseIntersectionObserverOptions} options - The options for the Intersection Observer.
|
||||
* @returns {IntersectionReturn} The ref callback, a boolean indicating if the element is intersecting, and the intersection observer entry.
|
||||
* @example
|
||||
* ```tsx
|
||||
* const { ref, isIntersecting, entry } = useIntersectionObserver({ threshold: 0.5 });
|
||||
* ```
|
||||
*/
|
||||
export function useIntersectionObserver({
|
||||
threshold = 0,
|
||||
root = null,
|
||||
rootMargin = '0%',
|
||||
freeze = true,
|
||||
initialIsIntersecting = false,
|
||||
onChange,
|
||||
}: UseIntersectionObserverOptions = {}): IntersectionReturn {
|
||||
const [ref, setRef] = useState<Element | null>(null)
|
||||
|
||||
const [state, setState] = useState<State>(() => ({
|
||||
isIntersecting: initialIsIntersecting,
|
||||
entry: undefined,
|
||||
}))
|
||||
|
||||
const callbackRef = useRef<UseIntersectionObserverOptions['onChange']>()
|
||||
|
||||
callbackRef.current = onChange
|
||||
|
||||
const frozen = state.entry?.isIntersecting && freeze
|
||||
|
||||
useEffect(() => {
|
||||
// Ensure we have a ref to observe
|
||||
if (!ref) return
|
||||
|
||||
// Ensure the browser supports the Intersection Observer API
|
||||
if (!('IntersectionObserver' in window)) return
|
||||
|
||||
// Skip if frozen
|
||||
if (frozen) return
|
||||
|
||||
let unobserve: (() => void) | undefined
|
||||
|
||||
const observer = new IntersectionObserver(
|
||||
(entries: IntersectionObserverEntry[]): void => {
|
||||
const thresholds = Array.isArray(observer.thresholds)
|
||||
? observer.thresholds
|
||||
: [observer.thresholds]
|
||||
|
||||
entries.forEach((entry) => {
|
||||
const isIntersecting =
|
||||
entry.isIntersecting &&
|
||||
thresholds.some((threshold) => entry.intersectionRatio >= threshold)
|
||||
|
||||
setState({ isIntersecting, entry })
|
||||
|
||||
if (callbackRef.current) {
|
||||
callbackRef.current(isIntersecting, entry)
|
||||
}
|
||||
|
||||
if (isIntersecting && freeze && unobserve) {
|
||||
unobserve()
|
||||
unobserve = undefined
|
||||
}
|
||||
})
|
||||
},
|
||||
{ threshold, root, rootMargin }
|
||||
)
|
||||
|
||||
observer.observe(ref)
|
||||
|
||||
return () => {
|
||||
observer.disconnect()
|
||||
}
|
||||
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [
|
||||
ref,
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
JSON.stringify(threshold),
|
||||
root,
|
||||
rootMargin,
|
||||
frozen,
|
||||
freeze,
|
||||
])
|
||||
|
||||
// ensures that if the observed element changes, the intersection observer is reinitialized
|
||||
const prevRef = useRef<Element | null>(null)
|
||||
|
||||
useEffect(() => {
|
||||
if (
|
||||
!ref &&
|
||||
state.entry?.target &&
|
||||
!freeze &&
|
||||
!frozen &&
|
||||
prevRef.current !== state.entry.target
|
||||
) {
|
||||
prevRef.current = state.entry.target
|
||||
setState({ isIntersecting: initialIsIntersecting, entry: undefined })
|
||||
}
|
||||
}, [ref, state.entry, freeze, frozen, initialIsIntersecting])
|
||||
|
||||
return {
|
||||
ref: setRef,
|
||||
isIntersecting: !!state.isIntersecting,
|
||||
entry: state.entry,
|
||||
} as IntersectionReturn
|
||||
}
|
||||
@@ -7,7 +7,6 @@ import { RecordModel, RecordSubscription } from 'pocketbase'
|
||||
import { WritableAtom } from 'nanostores'
|
||||
import { timeDay, timeHour } from 'd3-time'
|
||||
import { useEffect, useState } from 'react'
|
||||
import useIsInViewport, { CallbackRef, HookOptions } from 'use-is-in-viewport'
|
||||
|
||||
export function cn(...inputs: ClassValue[]) {
|
||||
return twMerge(clsx(inputs))
|
||||
@@ -40,18 +39,14 @@ const verifyAuth = () => {
|
||||
}
|
||||
|
||||
export const updateSystemList = async () => {
|
||||
// try {
|
||||
const records = await pb.collection<SystemRecord>('systems').getFullList({ sort: '+name' })
|
||||
const records = await pb
|
||||
.collection<SystemRecord>('systems')
|
||||
.getFullList({ sort: '+name', fields: 'id,name,host,info,status' })
|
||||
if (records.length) {
|
||||
$systems.set(records)
|
||||
} else {
|
||||
verifyAuth()
|
||||
}
|
||||
// }
|
||||
// catch (e) {
|
||||
// console.log('verifying auth error', e)
|
||||
// verifyAuth()
|
||||
// }
|
||||
}
|
||||
|
||||
export const updateAlerts = () => {
|
||||
@@ -182,7 +177,7 @@ export const chartTimeData: ChartTimeData = {
|
||||
expectedInterval: 60_000 * 120,
|
||||
label: '1 week',
|
||||
ticks: 7,
|
||||
format: (timestamp: string) => formatShortDate(timestamp),
|
||||
format: (timestamp: string) => formatDay(timestamp),
|
||||
getOffset: (endTime: Date) => timeDay.offset(endTime, -7),
|
||||
},
|
||||
'30d': {
|
||||
@@ -218,24 +213,6 @@ export function useYAxisWidth() {
|
||||
return { yAxisWidth, updateYAxisWidth }
|
||||
}
|
||||
|
||||
export function useClampedIsInViewport(options: HookOptions): [boolean | null, CallbackRef] {
|
||||
const [isInViewport, wrappedTargetRef] = useIsInViewport(options)
|
||||
const [wasInViewportAtleastOnce, setWasInViewportAtleastOnce] = useState(isInViewport)
|
||||
|
||||
useEffect(() => {
|
||||
setWasInViewportAtleastOnce((prev) => {
|
||||
// this will clamp it to the first true
|
||||
// received from useIsInViewport
|
||||
if (!prev) {
|
||||
return isInViewport
|
||||
}
|
||||
return prev
|
||||
})
|
||||
}, [isInViewport])
|
||||
|
||||
return [wasInViewportAtleastOnce, wrappedTargetRef]
|
||||
}
|
||||
|
||||
export function toFixedWithoutTrailingZeros(num: number, digits: number) {
|
||||
return parseFloat(num.toFixed(digits)).toString()
|
||||
}
|
||||
@@ -294,3 +271,17 @@ export async function updateUserSettings() {
|
||||
console.log('create settings', e)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the unit of size (TB or GB) for a given size in gigabytes
|
||||
* @param n size in gigabytes
|
||||
* @returns unit of size (TB or GB)
|
||||
*/
|
||||
export const getSizeUnit = (n: number) => (n >= 1_000 ? ' TB' : ' GB')
|
||||
|
||||
/**
|
||||
* Get the value of number in gigabytes if less than 1000, otherwise in terabytes
|
||||
* @param n size in gigabytes
|
||||
* @returns value in GB if less than 1000, otherwise value in TB
|
||||
*/
|
||||
export const getSizeVal = (n: number) => (n >= 1_000 ? n / 1_000 : n)
|
||||
|
||||
4
beszel/site/src/types.d.ts
vendored
4
beszel/site/src/types.d.ts
vendored
@@ -12,10 +12,12 @@ export interface SystemRecord extends RecordModel {
|
||||
export interface SystemInfo {
|
||||
/** hostname */
|
||||
h: string
|
||||
/** kernel **/
|
||||
k?: string
|
||||
/** cpu percent */
|
||||
cpu: number
|
||||
/** cpu threads */
|
||||
t: number
|
||||
t?: number
|
||||
/** cpu cores */
|
||||
c: number
|
||||
/** cpu model */
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package beszel
|
||||
|
||||
const (
|
||||
Version = "0.4.0"
|
||||
Version = "0.5.1"
|
||||
AppName = "beszel"
|
||||
)
|
||||
|
||||
25
readme.md
25
readme.md
@@ -98,20 +98,23 @@ Use `./beszel update` and `./beszel-agent update` to update to the latest versio
|
||||
|
||||
### Hub
|
||||
|
||||
| Name | Default | Description |
|
||||
| ----------------------- | ------- | -------------------------------- |
|
||||
| `DISABLE_PASSWORD_AUTH` | false | Disables password authentication |
|
||||
| Name | Default | Description |
|
||||
| ----------------------- | ------- | ------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `CSP` | unset | Adds a [Content-Security-Policy](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy) header with this value. |
|
||||
| `DISABLE_PASSWORD_AUTH` | false | Disables password authentication. |
|
||||
|
||||
### Agent
|
||||
|
||||
| Name | Default | Description |
|
||||
| ------------------- | ------- | ---------------------------------------------------------------------------------------- |
|
||||
| `DOCKER_HOST` | unset | Overrides the docker host (docker.sock) if using a proxy.[^socket] |
|
||||
| `EXTRA_FILESYSTEMS` | unset | See [Monitoring additional disks / partitions](#monitoring-additional-disks--partitions) |
|
||||
| `EXTRA_FILESYSTEMS` | unset | See [Monitoring additional disks, partitions, or remote mounts](#monitoring-additional-disks-partitions-or-remote-mounts) |
|
||||
| `FILESYSTEM` | unset | Device, partition, or mount point to use for root disk stats. |
|
||||
| `KEY` | unset | Public SSH key to use for authentication. Provided in hub. |
|
||||
| `LOG_LEVEL` | info | Logging level. Valid values: "debug", "info", "warn", "error". |
|
||||
| `NICS` | unset | Whitelist of network interfaces to monitor for bandwidth chart. |
|
||||
| `PORT` | 45876 | Port or address:port to listen on. |
|
||||
| `SENSORS` | unset | Whitelist of temperature sensors to monitor. |
|
||||
|
||||
<!-- | `SYS_SENSORS` | unset | Overrides the sys location for sensors. | -->
|
||||
|
||||
@@ -151,15 +154,17 @@ Visit the "Auth providers" page to enable your provider. The redirect / callback
|
||||
|
||||
</details>
|
||||
|
||||
## Monitoring additional disks / partitions
|
||||
## Monitoring additional disks, partitions, or remote mounts
|
||||
|
||||
You can configure the agent to monitor the usage and I/O of more than one disk or partition. The approach differs depending on the deployment method.
|
||||
The method for adding additional disks differs depending on your deployment method.
|
||||
|
||||
Use `lsblk` to find the names and mount points of your partitions. If you have trouble, check the agent logs.
|
||||
|
||||
> Note: The charts will use the name of the device or partition if available, and fall back to the folder name. You will not get I/O stats for network mounted drives.
|
||||
|
||||
### Docker
|
||||
|
||||
Mount a folder from the partition's filesystem in the container's `/extra-filesystems` directory, like the example below. The charts will use the name of the device or partition, not the name of the folder.
|
||||
Mount a folder from the target filesystem in the container's `/extra-filesystems` directory. For example:
|
||||
|
||||
```yaml
|
||||
volumes:
|
||||
@@ -169,10 +174,10 @@ volumes:
|
||||
|
||||
### Binary
|
||||
|
||||
Set the `EXTRA_FILESYSTEMS` environment variable to a comma-separated list of devices or partitions to monitor. For example:
|
||||
Set the `EXTRA_FILESYSTEMS` environment variable to a comma-separated list of devices, partitions, or mount points to monitor. For example:
|
||||
|
||||
```bash
|
||||
EXTRA_FILESYSTEMS="sdb,sdc1,mmcblk0"
|
||||
EXTRA_FILESYSTEMS="sdb,sdc1,mmcblk0,/mnt/network-share"
|
||||
```
|
||||
|
||||
## REST API
|
||||
@@ -212,7 +217,7 @@ Assuming the agent is running, the connection is probably being blocked by a fir
|
||||
1. Add an inbound rule to the agent system's firewall(s) to allow TCP connections to the port. Check any active firewalls, like iptables, and your cloud provider's firewall settings if applicable.
|
||||
2. Alternatively, use software like [Cloudflare Tunnel](https://www.cloudflare.com/products/tunnel/), [WireGuard](https://www.wireguard.com/), or [Tailscale](https://tailscale.com/) to securely bypass your firewall.
|
||||
|
||||
You can test connectivity by running telnet `<agent-ip> <port>`.
|
||||
You can test connectivity by running `telnet <agent-ip> <port>`.
|
||||
|
||||
### Connecting the hub and agent on the same system using Docker
|
||||
|
||||
|
||||
Reference in New Issue
Block a user